idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
12,200
def _tensors ( cls , fluents : Sequence [ FluentPair ] ) -> Iterable [ tf . Tensor ] : for _ , fluent in fluents : tensor = cls . _output_size ( fluent . tensor ) yield tensor
Yields the fluents tensors .
12,201
def _dtype ( cls , tensor : tf . Tensor ) -> tf . Tensor : if tensor . dtype != tf . float32 : tensor = tf . cast ( tensor , tf . float32 ) return tensor
Converts tensor to tf . float32 datatype if needed .
12,202
def _output ( cls , fluents : Sequence [ FluentPair ] ) -> Sequence [ tf . Tensor ] : return tuple ( cls . _dtype ( t ) for t in cls . _tensors ( fluents ) )
Returns output tensors for fluents .
12,203
def output_size ( self ) -> Tuple [ Sequence [ Shape ] , Sequence [ Shape ] , Sequence [ Shape ] , int ] : return self . _cell . output_size
Returns the simulation output size .
12,204
def timesteps ( self , horizon : int ) -> tf . Tensor : start , limit , delta = horizon - 1 , - 1 , - 1 timesteps_range = tf . range ( start , limit , delta , dtype = tf . float32 ) timesteps_range = tf . expand_dims ( timesteps_range , - 1 ) batch_timesteps = tf . stack ( [ timesteps_range ] * self . batch_size ) return batch_timesteps
Returns the input tensor for the given horizon .
12,205
def trajectory ( self , horizon : int , initial_state : Optional [ StateTensor ] = None ) -> TrajectoryOutput : if initial_state is None : initial_state = self . _cell . initial_state ( ) with self . graph . as_default ( ) : self . inputs = self . timesteps ( horizon ) outputs , _ = tf . nn . dynamic_rnn ( self . _cell , self . inputs , initial_state = initial_state , dtype = tf . float32 , scope = "trajectory" ) states , actions , interms , rewards = outputs state_dtype = map ( rddl2tf . utils . range_type_to_dtype , self . _cell . _compiler . rddl . state_range_type ) states = self . _output ( states , state_dtype ) interm_dtype = map ( rddl2tf . utils . range_type_to_dtype , self . _cell . _compiler . rddl . interm_range_type ) interms = self . _output ( interms , interm_dtype ) action_dtype = map ( rddl2tf . utils . range_type_to_dtype , self . _cell . _compiler . rddl . action_range_type ) actions = self . _output ( actions , action_dtype ) outputs = ( initial_state , states , actions , interms , rewards ) return outputs
Returns the ops for the trajectory generation with given horizon and initial_state .
12,206
def run ( self , horizon : int , initial_state : Optional [ StateTensor ] = None ) -> SimulationOutput : trajectory = self . trajectory ( horizon , initial_state ) with tf . Session ( graph = self . graph ) as sess : sess . run ( tf . global_variables_initializer ( ) ) non_fluents = sess . run ( self . _non_fluents ) initial_state , states , actions , interms , rewards = sess . run ( trajectory ) non_fluent_ordering = self . _cell . _compiler . rddl . domain . non_fluent_ordering non_fluents = tuple ( zip ( non_fluent_ordering , non_fluents ) ) state_fluent_ordering = self . _cell . _compiler . rddl . domain . state_fluent_ordering states = tuple ( zip ( state_fluent_ordering , states ) ) interm_fluent_ordering = self . _cell . _compiler . rddl . domain . interm_fluent_ordering interms = tuple ( zip ( interm_fluent_ordering , interms ) ) action_fluent_ordering = self . _cell . _compiler . rddl . domain . action_fluent_ordering actions = tuple ( zip ( action_fluent_ordering , actions ) ) rewards = np . squeeze ( rewards ) outputs = ( non_fluents , initial_state , states , actions , interms , rewards ) return outputs
Builds the MDP graph and simulates in batch the trajectories with given horizon . Returns the non - fluents states actions interms and rewards . Fluents and non - fluents are returned in factored form .
12,207
def _output ( cls , tensors : Sequence [ tf . Tensor ] , dtypes : Sequence [ tf . DType ] ) -> Sequence [ tf . Tensor ] : outputs = [ ] for tensor , dtype in zip ( tensors , dtypes ) : tensor = tensor [ 0 ] if tensor . dtype != dtype : tensor = tf . cast ( tensor , dtype ) outputs . append ( tensor ) return tuple ( outputs )
Converts tensors to the corresponding dtypes .
12,208
def _get_biallelic_variant ( self , variant , info , _check_alleles = True ) : info = info . iloc [ 0 , : ] assert not info . multiallelic self . _impute2_file . seek ( info . seek ) genotypes = self . _parse_impute2_line ( self . _impute2_file . readline ( ) ) variant_alleles = variant . _encode_alleles ( [ genotypes . reference , genotypes . coded , ] ) if ( _check_alleles and variant_alleles != variant . alleles ) : logging . variant_not_found ( variant ) return [ ] return [ genotypes ]
Creates a bi - allelic variant .
12,209
def _fix_genotypes_object ( self , genotypes , variant_info ) : if self . has_index and variant_info . name != genotypes . variant . name : if not variant_info . name . startswith ( genotypes . variant . name ) : raise ValueError ( "Index file not synced with IMPUTE2 file" ) genotypes . variant . name = variant_info . name if self . has_index and self . _index_has_location : genotypes . multiallelic = variant_info . multiallelic else : logging . warning ( "Multiallelic variants are not detected on " "unindexed files." )
Fixes a genotypes object ( variant name multi - allelic value .
12,210
def _normalize_missing ( g ) : g = g . astype ( float ) g [ g == - 1.0 ] = np . nan return g
Normalize a plink genotype vector .
12,211
def maybe_download_and_extract ( ) : dest_directory = "/tmp/cifar" if not os . path . exists ( dest_directory ) : os . makedirs ( dest_directory ) filename = DATA_URL . split ( '/' ) [ - 1 ] filepath = os . path . join ( dest_directory , filename ) if not os . path . exists ( filepath ) : def _progress ( count , block_size , total_size ) : sys . stdout . write ( '\r>> Downloading %s %.1f%%' % ( filename , float ( count * block_size ) / float ( total_size ) * 100.0 ) ) sys . stdout . flush ( ) filepath , _ = urllib . request . urlretrieve ( DATA_URL , filepath , _progress ) print ( ) statinfo = os . stat ( filepath ) print ( 'Successfully downloaded' , filename , statinfo . st_size , 'bytes.' ) tarfile . open ( filepath , 'r:gz' ) . extractall ( dest_directory )
Download and extract the tarball from Alex s website .
12,212
def plot ( config , image , file ) : image = np . squeeze ( image ) print ( file , image . shape ) imsave ( file , image )
Plot a single CIFAR image .
12,213
def _get_seqtype_from_ext ( handle ) : if isinstance ( handle , basestring ) : name = handle elif hasattr ( handle , 'filename' ) : name = handle . filename elif hasattr ( handle , 'name' ) : name = handle . name else : raise ValueError ( "Unknown datatype for handle!" ) modifier = '' dummy , ext = path . splitext ( name . lower ( ) ) if ext == ".gz" : modifier = 'gz-' dummy , ext = path . splitext ( dummy ) if not ext : ext = "." + dummy if ext in ( ".gbk" , ".gb" , ".genbank" , ".gbff" ) : return modifier + "genbank" elif ext in ( ".embl" , ".emb" ) : return modifier + "embl" elif ext in ( ".fa" , ".fasta" , ".fna" , ".faa" , ".fas" ) : return modifier + "fasta" else : raise ValueError ( "Unknown file format '%s'." % ext )
Predict the filetype from a handle s name
12,214
def _guess_seqtype_from_file ( handle ) : "Guess the sequence type from the file's contents" if isinstance ( handle , basestring ) : handle = StringIO ( handle ) for line in handle : if not line . strip ( ) : continue if line . lstrip ( ) . split ( ) [ 0 ] in ( 'LOCUS' , 'FEATURES' , 'source' , 'CDS' , 'gene' ) : return 'genbank' if len ( line ) > 2 and line [ : 3 ] in ( 'ID ' , 'FT ' ) : return 'embl' if line . startswith ( '>' ) : return 'fasta' handle . seek ( 0 ) import string from Bio . Data import IUPACData as iupac all_input_letters = set ( handle . read ( ) . lower ( ) ) all_valid = set ( string . digits ) all_valid . update ( set ( iupac . protein_letters . lower ( ) ) ) all_valid . update ( set ( iupac . unambiguous_dna_letters . lower ( ) ) ) all_valid . update ( set ( '- \n' ) ) if all_valid . issuperset ( all_input_letters ) : return 'fasta' raise ValueError ( "Failed to guess format for input" )
Guess the sequence type from the file s contents
12,215
def _unzip_handle ( handle ) : if isinstance ( handle , basestring ) : handle = _gzip_open_filename ( handle ) else : handle = _gzip_open_handle ( handle ) return handle
Transparently unzip the file handle
12,216
def sanity_check_insdcio ( handle , id_marker , fake_id_line ) : found_id = False found_end_marker = False for line in handle : line = line . strip ( ) if not line : continue if line . startswith ( id_marker ) : found_id = True break if line . startswith ( '//' ) : found_end_marker = True break handle . seek ( 0 ) if found_id : return handle if not found_end_marker : return handle new_handle = StringIO ( ) new_handle . write ( "%s\n" % fake_id_line ) new_handle . write ( handle . read ( ) ) new_handle . seek ( 0 ) return new_handle
Sanity check for insdcio style files
12,217
def sanity_check_fasta ( handle ) : header_found = False for line in handle : if line . startswith ( '>' ) : header_found = True break handle . seek ( 0 ) if header_found : return handle fake_header_line = ">DUMMY" new_handle = StringIO ( ) new_handle . write ( "%s\n" % fake_header_line ) new_handle . write ( handle . read ( ) ) new_handle . seek ( 0 ) return new_handle
Sanity check FASTA files .
12,218
def parse ( handle , seqtype = None , robust = False ) : if seqtype is None : seqtype = _get_seqtype_from_ext ( handle ) if seqtype . startswith ( 'gz-' ) : handle = _unzip_handle ( handle ) seqtype = seqtype [ 3 : ] if robust : if seqtype == "embl" : handle = sanity_check_embl ( handle ) elif seqtype == "genbank" : handle = sanity_check_genbank ( handle ) elif seqtype == "fasta" : handle = sanity_check_fasta ( handle ) return SeqIO . parse ( handle , seqtype )
Wrap SeqIO . parse
12,219
def isOrderFixed ( self ) : return ( self == PortConstraints . FIXED_ORDER or self == PortConstraints . FIXED_RATIO or self == PortConstraints . FIXED_POS )
Returns whether the order of ports is fixed .
12,220
def _dicts_to_columns ( dicts ) : keys = dicts [ 0 ] . keys ( ) result = dict ( ( k , [ ] ) for k in keys ) for d in dicts : for k , v in d . items ( ) : result [ k ] += [ v ] return result
Given a List of Dictionaries with uniform keys returns a single Dictionary with keys holding a List of values matching the key in the original List .
12,221
def from_vertices_and_edges ( vertices , edges , vertex_name_key = 'name' , vertex_id_key = 'id' , edge_foreign_keys = ( 'source' , 'target' ) , directed = True ) : vertex_data = _dicts_to_columns ( vertices ) edge_data = _dicts_to_columns ( edges ) n = len ( vertices ) vertex_index = dict ( zip ( vertex_data [ vertex_id_key ] , range ( n ) ) ) edge_list = list ( map ( lambda source , target : ( vertex_index [ source ] , vertex_index [ target ] ) , edge_data [ edge_foreign_keys [ 0 ] ] , edge_data [ edge_foreign_keys [ 1 ] ] ) ) g = IGraph ( n = n , edges = edge_list , directed = directed , vertex_attrs = vertex_data , edge_attrs = edge_data ) g . vs [ 'name' ] = g . vs [ vertex_name_key ] g . vs [ 'indegree' ] = g . degree ( mode = "in" ) g . vs [ 'outdegree' ] = g . degree ( mode = "out" ) g . vs [ 'label' ] = g . vs [ vertex_name_key ] if 'group' not in g . vs . attributes ( ) : g . vs [ 'group' ] = labels_to_groups ( g . vs [ 'label' ] ) return g
This representation assumes that vertices and edges are encoded in two lists each list containing a Python dict for each vertex and each edge respectively . A distinguished element of the vertex dicts contain a vertex ID which is used in the edge dicts to refer to source and target vertices . All the remaining elements of the dicts are considered vertex and edge attributes .
12,222
def from_edges ( edges , source_key = 'source' , target_key = 'target' , weight_key = 'weight' , directed = True ) : raw = list ( map ( lambda x : [ x [ source_key ] , x [ target_key ] , int ( x [ weight_key ] ) ] , edges ) ) g = IGraph . TupleList ( raw , weights = True , directed = directed ) g . vs [ 'indegree' ] = g . degree ( mode = "in" ) g . vs [ 'outdegree' ] = g . degree ( mode = "out" ) g . vs [ 'label' ] = g . vs [ 'name' ] if 'group' not in g . vs . attributes ( ) : g . vs [ 'group' ] = labels_to_groups ( g . vs [ 'label' ] ) return g
Given a List of Dictionaries with source target and weight attributes return a weighted directed graph .
12,223
def flip_alleles ( genotypes ) : warnings . warn ( "deprecated: use 'Genotypes.flip_coded'" , DeprecationWarning ) genotypes . reference , genotypes . coded = ( genotypes . coded , genotypes . reference ) genotypes . genotypes = 2 - genotypes . genotypes return genotypes
Flip the alleles of an Genotypes instance .
12,224
def code_minor ( genotypes ) : warnings . warn ( "deprecated: use 'Genotypes.code_minor'" , DeprecationWarning ) _ , minor_coded = maf ( genotypes ) if not minor_coded : return flip_alleles ( genotypes ) return genotypes
Encode the genotypes with respect to the minor allele .
12,225
def maf ( genotypes ) : warnings . warn ( "deprecated: use 'Genotypes.maf'" , DeprecationWarning ) g = genotypes . genotypes maf = np . nansum ( g ) / ( 2 * np . sum ( ~ np . isnan ( g ) ) ) if maf > 0.5 : maf = 1 - maf return maf , False return maf , True
Computes the MAF and returns a boolean indicating if the minor allele is currently the coded allele .
12,226
def genotype_to_df ( g , samples , as_string = False ) : name = g . variant . name if g . variant . name else "genotypes" df = pd . DataFrame ( g . genotypes , index = samples , columns = [ name ] ) if as_string : df [ "alleles" ] = None hard_calls = df [ name ] . round ( ) df . loc [ hard_calls == 0 , "alleles" ] = "{0}/{0}" . format ( g . reference ) df . loc [ hard_calls == 1 , "alleles" ] = "{0}/{1}" . format ( g . reference , g . coded ) df . loc [ hard_calls == 2 , "alleles" ] = "{0}/{0}" . format ( g . coded ) df = df [ [ "alleles" ] ] df . columns = [ name ] return df
Convert a genotype object to a pandas dataframe .
12,227
def compute_ld ( cur_geno , other_genotypes , r2 = False ) : norm_cur = normalize_genotypes ( cur_geno ) norm_others = np . stack ( tuple ( normalize_genotypes ( g ) for g in other_genotypes ) , axis = 1 , ) assert norm_cur . shape [ 0 ] == norm_others . shape [ 0 ] n = ( ~ np . isnan ( norm_cur . reshape ( norm_cur . shape [ 0 ] , 1 ) ) * ~ np . isnan ( norm_others ) ) . sum ( axis = 0 ) r = pd . Series ( np . dot ( np . nan_to_num ( norm_cur ) , np . nan_to_num ( norm_others ) / n ) , index = [ g . variant . name for g in other_genotypes ] , name = "r2" if r2 else "r" , ) r . loc [ r > 1 ] = 1 r . loc [ r < - 1 ] = - 1 if r2 : return r ** 2 else : return r
Compute LD between a marker and a list of markers .
12,228
def normalize_genotypes ( genotypes ) : genotypes = genotypes . genotypes return ( genotypes - np . nanmean ( genotypes ) ) / np . nanstd ( genotypes )
Normalize the genotypes .
12,229
def _get_tdm ( self , m ) : m = np . atleast_2d ( m ) assert len ( m . shape ) == 2 tdm = crtomo . tdMan ( grid = self . grid , tempdir = self . tempdir ) tdm . configs . add_to_configs ( self . configs ) pid_mag = tdm . parman . add_data ( m [ 0 , : ] ) tdm . register_magnitude_model ( pid_mag ) if m . shape [ 0 ] == 2 : pid_pha = tdm . parman . add_data ( m [ 1 , : ] ) else : pid_pha = tdm . parman . add_data ( np . zeros ( m . shape [ 1 ] ) ) tdm . register_phase_model ( pid_pha ) return tdm
For a given model return a tdMan instance
12,230
def J ( self , log_sigma ) : m = 1.0 / np . exp ( log_sigma ) tdm = self . _get_tdm ( m ) tdm . model ( sensitivities = True , ) measurements = tdm . measurements ( ) sens_list = [ ] for config_nr , cids in sorted ( tdm . assignments [ 'sensitivities' ] . items ( ) ) : sens_list . append ( tdm . parman . parsets [ cids [ 0 ] ] ) sensitivities_lin = np . array ( sens_list ) sensitivities_log = sensitivities_lin measurements_rep = np . repeat ( measurements [ : , 0 , np . newaxis ] , sensitivities_lin . shape [ 1 ] , axis = 1 ) m_rep = np . repeat ( m [ np . newaxis , : ] , sensitivities_lin . shape [ 0 ] , axis = 0 ) factor = - 1 / ( m_rep * measurements_rep ) sensitivities_log = factor * sensitivities_lin return sensitivities_log
Return the sensitivity matrix
12,231
def set_ironic_uuid ( self , uuid_list ) : i = iter ( self . nodes ) for uuid in uuid_list : node = next ( i ) node . uuid = uuid
Map a list of Ironic UUID to BM nodes .
12,232
def find_resistance ( record ) : for feature in record . features : labels = set ( feature . qualifiers . get ( "label" , [ ] ) ) cassettes = labels . intersection ( _ANTIBIOTICS ) if len ( cassettes ) > 1 : raise RuntimeError ( "multiple resistance cassettes detected" ) elif len ( cassettes ) == 1 : return _ANTIBIOTICS . get ( cassettes . pop ( ) ) raise RuntimeError ( "could not find the resistance of '{}'" . format ( record . id ) )
Infer the antibiotics resistance of the given record .
12,233
def shell_cmd ( args , cwd = None ) : if cwd is None : cwd = os . path . abspath ( '.' ) if not isinstance ( args , ( list , tuple ) ) : args = [ args ] ps = Popen ( args , shell = True , cwd = cwd , stdout = PIPE , stderr = PIPE , close_fds = True ) stdout , stderr = ps . communicate ( ) if ps . returncode != 0 : if stderr : stderr = stderr . strip ( ) raise IOError ( 'Shell command %s failed (exit status %r): %s' % ( args , ps . returncode , stderr ) ) return stdout . strip ( )
Returns stdout as string or None on failure
12,234
def reverse_complement ( self , id = False , name = False , description = False , features = True , annotations = False , letter_annotations = True , dbxrefs = False , ) : return type ( self ) ( super ( CircularRecord , self ) . reverse_complement ( id = id , name = name , description = description , features = features , annotations = annotations , letter_annotations = letter_annotations , dbxrefs = dbxrefs , ) )
Return a new CircularRecord with reverse complement sequence .
12,235
def load_private_key ( self , priv_key ) : with open ( priv_key ) as fd : self . _private_key = paramiko . RSAKey . from_private_key ( fd )
Register the SSH private key .
12,236
def start ( self ) : if self . via_ip : connect_to = self . via_ip self . description = '[%s@%s via %s]' % ( self . _user , self . _hostname , self . via_ip ) else : connect_to = self . _hostname self . description = '[%s@%s]' % ( self . _user , self . _hostname ) exception = None for i in range ( 60 ) : try : self . _client . connect ( connect_to , username = self . _user , allow_agent = True , key_filename = self . _key_filename ) self . _transport = self . _get_transport ( ) except ( OSError , TypeError , ssh_exception . SSHException , ssh_exception . NoValidConnectionsError ) as e : exception = e LOG . info ( '%s waiting for %s: %s' % ( self . description , connect_to , str ( exception ) ) ) time . sleep ( 1 ) else : LOG . debug ( '%s connected' % self . description ) self . _started = True return _error = ( "unable to connect to ssh service on '%s': %s" % ( self . _hostname , str ( exception ) ) ) LOG . error ( _error ) raise exception
Start the ssh client and connect to the host .
12,237
def _get_channel ( self ) : channel = self . _transport . open_session ( ) channel . set_combine_stderr ( True ) channel . get_pty ( ) return channel
Returns a channel according to if there is a redirection to do or not .
12,238
def print_fields ( bf , * args , ** kwargs ) : vals = { k : hex ( v ) for k , v in bf . items ( ) } print ( bf . base , vals , * args , ** kwargs )
Print all the fields of a Bitfield object to stdout . This is primarly a diagnostic aid during debugging .
12,239
def clone ( self ) : temp = self . __class__ ( ) temp . base = self . base return temp
Return a new bitfield with the same value . The returned value is a copy and so is no longer linked to the original bitfield . This is important when the original is located at anything other than normal memory with accesses to it either slow or having side effects . Creating a clone and working against that clone means that only one read will occur .
12,240
def new ( self , base : pathlib . PurePath = pathlib . PurePath ( ) , include_intermediates : bool = True ) -> Iterator [ str ] : if self . is_new : yield str ( base / self . right . name )
Find the list of new paths in this comparison .
12,241
def modified ( self , base : pathlib . PurePath = pathlib . PurePath ( ) ) -> Iterator [ str ] : if self . is_modified : yield str ( base / self . right . name )
Find the paths of modified files . There is no option to include intermediate directories as all files and directories exist in both the left and right trees .
12,242
def deleted ( self , base : pathlib . PurePath = pathlib . PurePath ( ) , include_children : bool = True , include_directories : bool = True ) -> Iterator [ str ] : if self . is_deleted : yield str ( base / self . left . name )
Find the paths of entities deleted between the left and right entities in this comparison .
12,243
def compare ( left : Optional [ L ] , right : Optional [ R ] ) -> 'Comparison[L, R]' : if isinstance ( left , File ) and isinstance ( right , Directory ) : return FileDirectoryComparison ( left , right ) if isinstance ( left , Directory ) and isinstance ( right , File ) : return DirectoryFileComparison ( left , right ) if isinstance ( left , File ) or isinstance ( right , File ) : return FileComparison ( left , right ) if isinstance ( left , Directory ) or isinstance ( right , Directory ) : return DirectoryComparison ( left , right ) raise TypeError ( f'Cannot compare entities: {left}, {right}' )
Calculate the comparison of two entities .
12,244
def print_hierarchy ( self , level : int = 0 , file : IO [ str ] = sys . stdout ) -> None : print ( ' ' * self . _INDENT_SIZE * level + str ( self ) , file = file )
Print this comparison and its children with indentation to represent nesting .
12,245
def is_modified ( self ) -> bool : if self . is_new or self . is_deleted : return False return self . left . md5 != self . right . md5
Find whether the files on the left and right are different . Note modified implies the contents of the file have changed which is predicated on the file existing on both the left and right . Therefore this will be false if the file on the left has been deleted or the file on the right is new .
12,246
def generate_index ( fn , cols = None , names = None , sep = " " ) : assert cols is not None , "'cols' was not set" assert names is not None , "'names' was not set" assert len ( cols ) == len ( names ) bgzip , open_func = get_open_func ( fn , return_fmt = True ) data = pd . read_csv ( fn , sep = sep , engine = "c" , usecols = cols , names = names , compression = "gzip" if bgzip else None ) f = open_func ( fn , "rb" ) data [ "seek" ] = np . fromiter ( _seek_generator ( f ) , dtype = np . uint ) [ : - 1 ] f . close ( ) write_index ( get_index_fn ( fn ) , data ) return data
Build a index for the given file .
12,247
def get_open_func ( fn , return_fmt = False ) : bgzip = None with open ( fn , "rb" ) as i_file : bgzip = i_file . read ( 3 ) == b"\x1f\x8b\x08" if bgzip and not HAS_BIOPYTHON : raise ValueError ( "needs BioPython to index a bgzip file" ) open_func = open if bgzip : open_func = BgzfReader try : with open_func ( fn , "r" ) as i_file : if bgzip : if not i_file . seekable ( ) : raise ValueError pass except ValueError : raise ValueError ( "{}: use bgzip for compression..." . format ( fn ) ) if return_fmt : return bgzip , open_func return open_func
Get the opening function .
12,248
def get_index ( fn , cols , names , sep ) : if not has_index ( fn ) : return generate_index ( fn , cols , names , sep ) file_index = read_index ( get_index_fn ( fn ) ) if len ( set ( names ) - ( set ( file_index . columns ) - { 'seek' } ) ) != 0 : raise ValueError ( "{}: missing index columns: reindex" . format ( fn ) ) if "seek" not in file_index . columns : raise ValueError ( "{}: invalid index: reindex" . format ( fn ) ) return file_index
Restores the index for a given file .
12,249
def write_index ( fn , index ) : with open ( fn , "wb" ) as o_file : o_file . write ( _CHECK_STRING ) o_file . write ( zlib . compress ( bytes ( index . to_csv ( None , index = False , encoding = "utf-8" ) , encoding = "utf-8" , ) ) )
Writes the index to file .
12,250
def read_index ( fn ) : index = None with open ( fn , "rb" ) as i_file : if i_file . read ( len ( _CHECK_STRING ) ) != _CHECK_STRING : raise ValueError ( "{}: not a valid index file" . format ( fn ) ) index = pd . read_csv ( io . StringIO ( zlib . decompress ( i_file . read ( ) ) . decode ( encoding = "utf-8" ) , ) ) return index
Reads index from file .
12,251
def make_path ( phase ) -> str : return "{}/{}{}{}" . format ( conf . instance . output_path , phase . phase_path , phase . phase_name , phase . phase_tag )
Create the path to the folder at which the metadata and optimizer pickle should be saved
12,252
def save_optimizer_for_phase ( phase ) : with open ( make_optimizer_pickle_path ( phase ) , "w+b" ) as f : f . write ( pickle . dumps ( phase . optimizer ) )
Save the optimizer associated with the phase as a pickle
12,253
def assert_optimizer_pickle_matches_for_phase ( phase ) : path = make_optimizer_pickle_path ( phase ) if os . path . exists ( path ) : with open ( path , "r+b" ) as f : loaded_optimizer = pickle . loads ( f . read ( ) ) if phase . optimizer != loaded_optimizer : raise exc . PipelineException ( f"Can't restart phase at path {path} because settings don't match. " f"Did you change the optimizer settings or model?" )
Assert that the previously saved optimizer is equal to the phase s optimizer if a saved optimizer is found .
12,254
def add ( self , phase_name , result ) : if phase_name in self . __result_dict : raise exc . PipelineException ( "Results from a phase called {} already exist in the pipeline" . format ( phase_name ) ) self . __result_list . append ( result ) self . __result_dict [ phase_name ] = result
Add the result of a phase .
12,255
def from_phase ( self , phase_name ) : try : return self . __result_dict [ phase_name ] except KeyError : raise exc . PipelineException ( "No previous phase named {} found in results ({})" . format ( phase_name , ", " . join ( self . __result_dict . keys ( ) ) ) )
Returns the result of a previous phase by its name
12,256
def save_metadata ( self , phase , data_name ) : with open ( "{}/.metadata" . format ( make_path ( phase ) ) , "w+" ) as f : f . write ( "pipeline={}\nphase={}\ndata={}" . format ( self . pipeline_name , phase . phase_name , data_name ) )
Save metadata associated with the phase such as the name of the pipeline the name of the phase and the name of the data being fit
12,257
def run_function ( self , func , data_name = None , assert_optimizer_pickle_matches = True ) : results = ResultsCollection ( ) for i , phase in enumerate ( self . phases ) : logger . info ( "Running Phase {} (Number {})" . format ( phase . optimizer . phase_name , i ) ) if assert_optimizer_pickle_matches : assert_optimizer_pickle_matches_for_phase ( phase ) save_optimizer_for_phase ( phase ) self . save_metadata ( phase , data_name ) results . add ( phase . phase_name , func ( phase , results ) ) return results
Run the function for each phase in the pipeline .
12,258
def strtobytes ( input , encoding ) : py_version = sys . version_info [ 0 ] if py_version >= 3 : return _strtobytes_py3 ( input , encoding ) return _strtobytes_py2 ( input , encoding )
Take a str and transform it into a byte array .
12,259
def index_impute2 ( fn ) : logger . info ( "Indexing {} (IMPUTE2)" . format ( fn ) ) impute2_index ( fn , cols = [ 0 , 1 , 2 ] , names = [ "chrom" , "name" , "pos" ] , sep = " " ) logger . info ( "Index generated" )
Indexes an IMPUTE2 file .
12,260
def index_bgen ( fn , legacy = False ) : logger . info ( "Indexing {} (BGEN) using 'bgenix'{}" . format ( fn , " (legacy mode)" if legacy else "" , ) ) command = [ "bgenix" , "-g" , fn , "-index" ] if legacy : command . append ( "-with-rowid" ) try : logger . info ( "Executing '{}'" . format ( " " . join ( command ) ) ) subprocess . Popen ( command ) . communicate ( ) except FileNotFoundError : logger . error ( "Cannot find 'bgenix', impossible to index {}" . format ( fn ) ) sys . exit ( 1 ) logger . info ( "Index generated" )
Indexes a BGEN file .
12,261
def create_untl_xml_subelement ( parent , element , prefix = '' ) : subelement = SubElement ( parent , prefix + element . tag ) if element . content is not None : subelement . text = element . content if element . qualifier is not None : subelement . attrib [ "qualifier" ] = element . qualifier if element . children > 0 : for child in element . children : SubElement ( subelement , prefix + child . tag ) . text = child . content else : subelement . text = element . content return subelement
Create a UNTL XML subelement .
12,262
def add_missing_children ( required_children , element_children ) : element_tags = [ element . tag for element in element_children ] for contained_element in required_children : if contained_element not in element_tags : try : added_child = PYUNTL_DISPATCH [ contained_element ] ( content = '' ) except : added_child = PYUNTL_DISPATCH [ contained_element ] ( ) element_children . append ( added_child ) return element_children
Determine if there are elements not in the children that need to be included as blank elements in the form .
12,263
def set_qualifier ( self , value ) : if self . allows_qualifier : self . qualifier = value . strip ( ) else : raise UNTLStructureException ( 'Element "%s" does not allow a qualifier' % ( self . tag , ) )
Set the qualifier for the element .
12,264
def add_form ( self , ** kwargs ) : vocabularies = kwargs . get ( 'vocabularies' , None ) qualifier = kwargs . get ( 'qualifier' , None ) content = kwargs . get ( 'content' , None ) parent_tag = kwargs . get ( 'parent_tag' , None ) superuser = kwargs . get ( 'superuser' , False ) if qualifier is not None and content is not None : self . form = UNTL_FORM_DISPATCH [ self . tag ] ( vocabularies = vocabularies , qualifier_value = qualifier , input_value = content , untl_object = self , superuser = superuser , ) elif qualifier is not None : self . form = UNTL_FORM_DISPATCH [ self . tag ] ( vocabularies = vocabularies , qualifier_value = qualifier , untl_object = self , superuser = superuser , ) elif content is not None : if parent_tag is None : self . form = UNTL_FORM_DISPATCH [ self . tag ] ( vocabularies = vocabularies , input_value = content , untl_object = self , superuser = superuser , ) else : self . form = UNTL_FORM_DISPATCH [ self . tag ] ( vocabularies = vocabularies , input_value = content , untl_object = self , parent_tag = parent_tag , superuser = superuser , ) else : if parent_tag is None : self . form = UNTL_FORM_DISPATCH [ self . tag ] ( vocabularies = vocabularies , untl_object = self , superuser = superuser , ) else : self . form = UNTL_FORM_DISPATCH [ self . tag ] ( vocabularies = vocabularies , untl_object = self , parent_tag = parent_tag , superuser = superuser , )
Add the form attribute to the UNTL Python object .
12,265
def record_content_length ( self ) : untldict = py2dict ( self ) untldict . pop ( 'meta' , None ) return len ( str ( untldict ) )
Calculate length of record excluding metadata .
12,266
def create_form_data ( self , ** kwargs ) : children = kwargs . get ( 'children' , [ ] ) sort_order = kwargs . get ( 'sort_order' , None ) solr_response = kwargs . get ( 'solr_response' , None ) superuser = kwargs . get ( 'superuser' , False ) vocabularies = self . get_vocabularies ( ) for element in children : element . children = add_missing_children ( element . contained_children , element . children , ) element . add_form ( vocabularies = vocabularies , qualifier = element . qualifier , content = element . content , superuser = superuser , ) if element . form . has_children : if getattr ( element . form , 'qualifier_name' , False ) : add_parent = PARENT_FORM [ element . form . qualifier_name ] ( content = element . qualifier , ) element . children . append ( add_parent ) element . children . sort ( key = lambda obj : element . form . child_sort . index ( obj . tag ) ) for child in element . children : child . add_form ( vocabularies = vocabularies , qualifier = child . qualifier , content = child . content , parent_tag = element . tag , superuser = superuser , ) element_group_dict = { } for element in children : if element . form . name == 'meta' and element . qualifier == 'hidden' : element_group_dict [ 'hidden' ] = [ element ] else : if element . form . name not in element_group_dict : element_group_dict [ element . form . name ] = [ ] element_group_dict [ element . form . name ] . append ( element ) if 'hidden' not in element_group_dict : hidden_element = PYUNTL_DISPATCH [ 'meta' ] ( qualifier = 'hidden' , content = 'False' ) hidden_element . add_form ( vocabularies = vocabularies , qualifier = hidden_element . qualifier , content = hidden_element . content , superuser = superuser , ) element_group_dict [ 'hidden' ] = [ hidden_element ] element_list = self . create_form_groupings ( vocabularies , solr_response , element_group_dict , sort_order , ) return element_list
Create groupings of form elements .
12,267
def create_form_groupings ( self , vocabularies , solr_response , element_group_dict , sort_order ) : element_list = [ ] for group_name , group_list in element_group_dict . items ( ) : element_group = UNTL_GROUP_DISPATCH [ group_name ] ( vocabularies = vocabularies , solr_response = solr_response , group_name = group_name , group_list = group_list , ) if element_group . adjustable_form is not None : for adj_name , form_dict in element_group . adjustable_form . items ( ) : if form_dict [ 'value_py' ] is not None : self . adjustable_items . append ( adj_name ) element_list . append ( element_group ) element_list . sort ( key = lambda obj : sort_order . index ( obj . group_name ) ) return element_list
Create a group object from groupings of element objects .
12,268
def get_vocabularies ( self ) : timeout = 15 socket . setdefaulttimeout ( timeout ) vocab_url = VOCABULARIES_URL . replace ( 'all' , 'all-verbose' ) try : vocab_dict = eval ( urllib2 . urlopen ( vocab_url ) . read ( ) ) except : raise UNTLStructureException ( 'Could not retrieve the vocabularies' ) return vocab_dict
Get the vocabularies to pull the qualifiers from .
12,269
def create_xml_string ( self ) : root = self . create_xml ( ) xml = '<?xml version="1.0" encoding="UTF-8"?>\n' + tostring ( root , pretty_print = True ) return xml
Create a UNTL document in a string from a UNTL metadata root object .
12,270
def create_xml ( self , useNamespace = False ) : UNTL_NAMESPACE = 'http://digital2.library.unt.edu/untl/' UNTL = '{%s}' % UNTL_NAMESPACE NSMAP = { 'untl' : UNTL_NAMESPACE } if useNamespace : root = Element ( UNTL + self . tag , nsmap = NSMAP ) else : root = Element ( self . tag ) self . sort_untl ( UNTL_XML_ORDER ) for element in self . children : if useNamespace : create_untl_xml_subelement ( root , element , UNTL ) else : create_untl_xml_subelement ( root , element ) return root
Create an ElementTree representation of the object .
12,271
def create_element_dict ( self ) : untl_dict = { } for element in self . children : if element . tag not in untl_dict : untl_dict [ element . tag ] = [ ] element_dict = { } if element . qualifier is not None : element_dict [ 'qualifier' ] = element . qualifier if len ( element . contained_children ) > 0 : child_dict = { } for child in element . children : if child . content is not None : child_dict [ child . tag ] = child . content element_dict [ 'content' ] = child_dict elif element . content is not None : element_dict [ 'content' ] = element . content untl_dict [ element . tag ] . append ( element_dict ) return untl_dict
Convert a UNTL Python object into a UNTL Python dictionary .
12,272
def create_xml_file ( self , untl_filename ) : try : f = open ( untl_filename , 'w' ) f . write ( self . create_xml_string ( ) . encode ( 'utf-8' ) ) f . close ( ) except : raise UNTLStructureException ( 'Failed to create UNTL XML file. File: %s' % ( untl_filename ) )
Create a UNTL file .
12,273
def sort_untl ( self , sort_structure ) : self . children . sort ( key = lambda obj : sort_structure . index ( obj . tag ) )
Sort the UNTL Python object by the index of a sort structure pre - ordered list .
12,274
def generate_form_data ( self , ** kwargs ) : self . children = add_missing_children ( self . contained_children , self . children ) kwargs [ 'children' ] = self . children return FormGenerator ( ** kwargs )
Create a form dictionary with the key being the element name and the value being a list of form element objects .
12,275
def contributor_director ( ** kwargs ) : if kwargs . get ( 'qualifier' ) in ETD_MS_CONTRIBUTOR_EXPANSION : return ETD_MSContributor ( role = ETD_MS_CONTRIBUTOR_EXPANSION [ kwargs . get ( 'qualifier' ) ] , ** kwargs ) else : return None
Define the expanded qualifier name .
12,276
def date_director ( ** kwargs ) : if kwargs . get ( 'qualifier' ) == 'creation' : return ETD_MSDate ( content = kwargs . get ( 'content' ) . strip ( ) ) elif kwargs . get ( 'qualifier' ) != 'digitized' : return ETD_MSDate ( content = kwargs . get ( 'content' ) . strip ( ) ) else : return None
Direct which class should be used based on the date qualifier or if the date should be converted at all .
12,277
def subject_director ( ** kwargs ) : if kwargs . get ( 'qualifier' ) not in [ 'KWD' , '' ] : return ETD_MSSubject ( scheme = kwargs . get ( 'qualifier' ) , ** kwargs ) else : return ETD_MSSubject ( content = kwargs . get ( 'content' ) )
Direct how to handle a subject element .
12,278
def get_child_content ( self , children , element_name ) : for child in children : if child . tag == element_name : return child . content return ''
Get the requested element content from a list of children .
12,279
def shiftedColorMap ( cmap , start = 0 , midpoint = 0.5 , stop = 1.0 , name = 'shiftedcmap' ) : cdict = { 'red' : [ ] , 'green' : [ ] , 'blue' : [ ] , 'alpha' : [ ] } reg_index = np . linspace ( start , stop , 257 ) shift_index = np . hstack ( [ np . linspace ( 0.0 , midpoint , 128 , endpoint = False ) , np . linspace ( midpoint , 1.0 , 129 , endpoint = True ) ] ) for ri , si in zip ( reg_index , shift_index ) : r , g , b , a = cmap ( ri ) cdict [ 'red' ] . append ( ( si , r , r ) ) cdict [ 'green' ] . append ( ( si , g , g ) ) cdict [ 'blue' ] . append ( ( si , b , b ) ) cdict [ 'alpha' ] . append ( ( si , a , a ) ) newcmap = mpl . colors . LinearSegmentedColormap ( name , cdict ) plt . register_cmap ( cmap = newcmap ) return newcmap
Function to offset the center of a colormap . Useful for data with a negative min and positive max and you want the middle of the colormap s dynamic range to be at zero
12,280
def read_lastmodfile ( directory ) : filename = '{0}/exe/inv.lastmod' . format ( directory ) if ( not os . path . isfile ( filename ) ) : return None linestring = open ( filename , 'r' ) . readline ( ) . strip ( ) linestring = linestring . replace ( "\n" , '' ) linestring = linestring . replace ( ".mag" , '' ) linestring = linestring . replace ( "../inv/rho" , '' ) return linestring
Return the number of the final inversion result .
12,281
def setHandler ( self , event_name , callback ) : if event_name not in self . handlers : raise ValueError ( '{} is not a valid event' . format ( event_name ) ) if callable ( event_name ) : raise TypeError ( '{} is not callable' . format ( callback ) ) self . handlers [ event_name ] = callback
Set an handler for given event .
12,282
def isHandlerPresent ( self , event_name ) : if event_name not in self . handlers : raise ValueError ( '{} is not a valid event' . format ( event_name ) ) return self . handlers [ event_name ] is not None
Check if an event has an handler .
12,283
def removeHandler ( self , event_name ) : if event_name not in self . handlers : raise ValueError ( '{} is not a valid event' . format ( event_name ) ) self . handlers [ event_name ] = None
Remove handler for given event .
12,284
def _get_fct_number_of_arg ( self , fct ) : py_version = sys . version_info [ 0 ] if py_version >= 3 : return len ( inspect . signature ( fct ) . parameters ) return len ( inspect . getargspec ( fct ) [ 0 ] )
Get the number of argument of a fuction .
12,285
def event_tracker ( func ) : @ wraps ( func ) async def wrapper ( * args , ** kwargs ) : event = Event ( args [ 0 ] ) session = kwargs [ 'session' ] service_name = session . name await track_event ( event , EventState . started , service_name ) await func ( * args , ** kwargs ) await track_event ( event , EventState . completed , service_name ) return wrapper
Event tracking handler
12,286
def ensure_ajax ( valid_request_methods , error_response_context = None ) : def real_decorator ( view_func ) : def wrap_func ( request , * args , ** kwargs ) : if not isinstance ( request , HttpRequest ) : return generate_error_json_response ( "Invalid request!" , error_response_context ) elif not request . is_ajax ( ) : return generate_error_json_response ( "Invalid request type!" , error_response_context ) elif request . method not in valid_request_methods : return generate_error_json_response ( "Invalid request method!" , error_response_context ) else : return view_func ( request , * args , ** kwargs ) wrap_func . __doc__ = view_func . __doc__ wrap_func . __name__ = view_func . __name__ return wrap_func return real_decorator
Intends to ensure the received the request is ajax request and it is included in the valid request methods
12,287
def generate_error_json_response ( error_dict , error_response_context = None ) : response = error_dict if isinstance ( error_dict , str ) : response = { "error" : response } if error_response_context is None : error_response_context = { 'draw' : 0 , 'recordsTotal' : 0 , 'recordsFiltered' : 0 , 'data' : [ ] } response . update ( error_response_context ) return JsonResponse ( response )
Intends to build an error json response . If the error_response_context is None then we generate this response using data tables format
12,288
def _mergeGoSymbols ( self , jsons = [ ] ) : symbols = { } symbols [ "types" ] = [ ] symbols [ "funcs" ] = [ ] symbols [ "vars" ] = [ ] for file_json in jsons : symbols [ "types" ] += file_json [ "types" ] symbols [ "funcs" ] += file_json [ "funcs" ] symbols [ "vars" ] += file_json [ "vars" ] return symbols
Exported symbols for a given package does not have any prefix . So I can drop all import paths that are file specific and merge all symbols . Assuming all files in the given package has mutual exclusive symbols .
12,289
def read ( self , n ) : out = ctypes . create_string_buffer ( n ) ctypes . windll . kernel32 . RtlMoveMemory ( out , self . view + self . pos , n ) self . pos += n return out . raw
Read n bytes from mapped view .
12,290
def _output ( cls , fluents : Sequence [ FluentPair ] ) -> Sequence [ tf . Tensor ] : output = [ ] for _ , fluent in fluents : tensor = fluent . tensor if tensor . dtype != tf . float32 : tensor = tf . cast ( tensor , tf . float32 ) output . append ( tensor ) return tuple ( output )
Converts fluents to tensors with datatype tf . float32 .
12,291
def set ( self , key , value ) : self . store [ key ] = value return self . store
Sets a hyperparameter . Can be used to set an array of hyperparameters .
12,292
def config_at ( self , i ) : selections = { } for key in self . store : value = self . store [ key ] if isinstance ( value , list ) : selected = i % len ( value ) i = i // len ( value ) selections [ key ] = value [ selected ] else : selections [ key ] = value return Config ( selections )
Gets the ith config
12,293
def top ( self , sort_by ) : sort = sorted ( self . results , key = sort_by ) return sort
Get the best results according to your custom sort method .
12,294
def load_or_create_config ( self , filename , config = None ) : os . makedirs ( os . path . dirname ( os . path . expanduser ( filename ) ) , exist_ok = True ) if os . path . exists ( filename ) : return self . load ( filename ) if ( config == None ) : config = self . random_config ( ) self . save ( filename , config ) return config
Loads a config from disk . Defaults to a random config if none is specified
12,295
def configure ( self , repositories ) : self . enable_repositories ( repositories ) self . create_stack_user ( ) self . install_base_packages ( ) self . clean_system ( ) self . yum_update ( allow_reboot = True ) self . install_osp ( ) self . set_selinux ( 'permissive' ) self . fix_hostname ( )
Prepare the system to be ready for an undercloud installation .
12,296
def openstack_undercloud_install ( self ) : instack_undercloud_ver , _ = self . run ( 'repoquery --whatprovides /usr/share/instack-undercloud/puppet-stack-config/puppet-stack-config.pp' ) if instack_undercloud_ver . rstrip ( '\n' ) == 'instack-undercloud-0:2.2.0-1.el7ost.noarch' : LOG . warn ( 'Workaround for BZ1298189' ) self . run ( "sed -i \"s/.*Keystone_domain\['heat_domain'\].*/Service\['keystone'\] -> Class\['::keystone::roles::admin'\] -> Class\['::heat::keystone::domain'\]/\" /usr/share/instack-undercloud/puppet-stack-config/puppet-stack-config.pp" ) self . run ( 'OS_PASSWORD=bob openstack undercloud install' , user = 'stack' ) if self . run ( 'rpm -qa openstack-ironic-api' ) [ 0 ] . rstrip ( '\n' ) == 'openstack-ironic-api-4.2.2-3.el7ost.noarch' : LOG . warn ( 'Workaround for BZ1297796' ) self . run ( 'systemctl start openstack-ironic-api.service' ) self . add_environment_file ( user = 'stack' , filename = 'stackrc' ) self . run ( 'heat stack-list' , user = 'stack' )
Deploy an undercloud on the host .
12,297
def create_flavor ( self , name ) : self . add_environment_file ( user = 'stack' , filename = 'stackrc' ) self . run ( 'openstack flavor create --id auto --ram 4096 --disk 40 --vcpus 1 baremetal' , user = 'stack' , success_status = ( 0 , 1 ) ) self . run ( 'openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" baremetal' , user = 'stack' ) self . run ( 'openstack flavor set --property "capabilities:profile"="baremetal" baremetal' , user = 'stack' )
Create a new baremetal flavor .
12,298
def list_nodes ( self ) : self . add_environment_file ( user = 'stack' , filename = 'stackrc' ) ret , _ = self . run ( "ironic node-list --fields uuid|awk '/-.*-/ {print $2}'" , user = 'stack' ) return ret . split ( )
List the Ironic nodes UUID .
12,299
def set_flavor ( self , node , flavor ) : command = ( 'ironic node-update {uuid} add ' 'properties/capabilities=profile:{flavor},boot_option:local' ) . format ( uuid = node . uuid , flavor = flavor ) node . flavor = flavor self . add_environment_file ( user = 'stack' , filename = 'stackrc' ) self . run ( command , user = 'stack' )
Set a flavor to a given ironic node .