idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
18,700
def get_extended_source_fluxes ( self , id , j2000_ra , j2000_dec , energies ) : return self . _extended_sources . values ( ) [ id ] ( j2000_ra , j2000_dec , energies )
Get the flux of the id - th extended sources at the given position at the given energies
18,701
def long_path_formatter ( line , max_width = pd . get_option ( 'max_colwidth' ) ) : if len ( line ) > max_width : tokens = line . split ( "." ) trial1 = "%s...%s" % ( tokens [ 0 ] , tokens [ - 1 ] ) if len ( trial1 ) > max_width : return "...%s" % ( tokens [ - 1 ] [ - 1 : - ( max_width - 3 ) ] ) else : return trial1 else : return line
If a path is longer than max_width it substitute it with the first and last element joined by ... . For example this . is . a . long . path . which . we . want . to . shorten becomes this ... shorten
18,702
def has_free_parameters ( self ) : for component in self . _components . values ( ) : for par in component . shape . parameters . values ( ) : if par . free : return True for par in self . position . parameters . values ( ) : if par . free : return True return False
Returns True or False whether there is any parameter in this source
18,703
def _repr__base ( self , rich_output = False ) : repr_dict = collections . OrderedDict ( ) key = '%s (point source)' % self . name repr_dict [ key ] = collections . OrderedDict ( ) repr_dict [ key ] [ 'position' ] = self . _sky_position . to_dict ( minimal = True ) repr_dict [ key ] [ 'spectrum' ] = collections . OrderedDict ( ) for component_name , component in self . components . iteritems ( ) : repr_dict [ key ] [ 'spectrum' ] [ component_name ] = component . to_dict ( minimal = True ) return dict_to_list ( repr_dict , rich_output )
Representation of the object
18,704
def get_function ( function_name , composite_function_expression = None ) : if composite_function_expression is not None : return _parse_function_expression ( composite_function_expression ) else : if function_name in _known_functions : return _known_functions [ function_name ] ( ) else : from astromodels . functions . template_model import TemplateModel , MissingDataFile try : instance = TemplateModel ( function_name ) except MissingDataFile : raise UnknownFunction ( "Function %s is not known. Known functions are: %s" % ( function_name , "," . join ( _known_functions . keys ( ) ) ) ) else : return instance
Returns the function name which must be among the known functions or a composite function .
18,705
def get_function_class ( function_name ) : if function_name in _known_functions : return _known_functions [ function_name ] else : raise UnknownFunction ( "Function %s is not known. Known functions are: %s" % ( function_name , "," . join ( _known_functions . keys ( ) ) ) )
Return the type for the requested function
18,706
def check_calling_sequence ( name , function_name , function , possible_variables ) : try : calling_sequence = inspect . getargspec ( function . input_object ) . args except AttributeError : calling_sequence = inspect . getargspec ( function ) . args assert calling_sequence [ 0 ] == 'self' , "Wrong syntax for 'evaluate' in %s. The first argument " "should be called 'self'." % name variables = filter ( lambda var : var in possible_variables , calling_sequence ) assert len ( variables ) > 0 , "The name of the variables for 'evaluate' in %s must be one or more " "among %s, instead of %s" % ( name , ',' . join ( possible_variables ) , "," . join ( variables ) ) if variables != possible_variables [ : len ( variables ) ] : raise AssertionError ( "The variables %s are out of order in '%s' of %s. Should be %s." % ( "," . join ( variables ) , function_name , name , possible_variables [ : len ( variables ) ] ) ) other_parameters = filter ( lambda var : var not in variables and var != 'self' , calling_sequence ) return variables , other_parameters
Check the calling sequence for the function looking for the variables specified . One or more of the variables can be in the calling sequence . Note that the order of the variables will be enforced . It will also enforce that the first parameter in the calling sequence is called self .
18,707
def free_parameters ( self ) : free_parameters = collections . OrderedDict ( [ ( k , v ) for k , v in self . parameters . iteritems ( ) if v . free ] ) return free_parameters
Returns a dictionary of free parameters for this function
18,708
def _get_data_file_path ( data_file ) : try : file_path = pkg_resources . resource_filename ( "astromodels" , 'data/%s' % data_file ) except KeyError : raise IOError ( "Could not read or find data file %s. Try reinstalling astromodels. If this does not fix your " "problem, open an issue on github." % ( data_file ) ) else : return os . path . abspath ( file_path )
Returns the absolute path to the required data files .
18,709
def _setup ( self ) : tablepath = _get_data_file_path ( "dark_matter/gammamc_dif.dat" ) self . _data = np . loadtxt ( tablepath ) channel_index_mapping = { 1 : 8 , 2 : 6 , 3 : 3 , 4 : 1 , 5 : 2 , 6 : 7 , 7 : 4 , 8 : 5 , 9 : 0 , 10 : 10 , 11 : 11 , 12 : 9 , } ndec = 10.0 xedge = np . linspace ( 0 , 1.0 , 251 ) self . _x = 0.5 * ( xedge [ 1 : ] + xedge [ : - 1 ] ) * ndec - ndec ichan = channel_index_mapping [ int ( self . channel . value ) ] self . _mass = np . array ( [ 2.0 , 4.0 , 6.0 , 8.0 , 10.0 , 25.0 , 50.0 , 80.3 , 91.2 , 100.0 , 150.0 , 176.0 , 200.0 , 250.0 , 350.0 , 500.0 , 750.0 , 1000.0 , 1500.0 , 2000.0 , 3000.0 , 5000.0 , 7000.0 , 1E4 ] ) self . _dn = self . _data . reshape ( ( 12 , 24 , 250 ) ) self . _dn_interp = RegularGridInterpolator ( [ self . _mass , self . _x ] , self . _dn [ ichan , : , : ] , bounds_error = False , fill_value = None ) if self . mass . value > 10000 : print "Warning: DMFitFunction only appropriate for masses <= 10 TeV" print "To model DM from 2 GeV < mass < 1 PeV use DMSpectra"
Mapping between the channel codes and the rows in the gammamc file
18,710
def _setup ( self ) : tablepath_h = _get_data_file_path ( "dark_matter/dmSpecTab.npy" ) self . _data_h = np . load ( tablepath_h ) tablepath_f = _get_data_file_path ( "dark_matter/gammamc_dif.dat" ) self . _data_f = np . loadtxt ( tablepath_f ) channel_index_mapping = { 1 : 8 , 2 : 6 , 3 : 3 , 4 : 1 , 5 : 2 , 6 : 7 , 7 : 4 , 8 : 5 , 9 : 0 , 10 : 10 , 11 : 11 , 12 : 9 , } ndec = 10.0 xedge = np . linspace ( 0 , 1.0 , 251 ) self . _x = 0.5 * ( xedge [ 1 : ] + xedge [ : - 1 ] ) * ndec - ndec ichan = channel_index_mapping [ int ( self . channel . value ) ] self . _mass_h = np . array ( [ 50. , 61.2 , 74.91 , 91.69 , 112.22 , 137.36 , 168.12 , 205.78 , 251.87 , 308.29 , 377.34 , 461.86 , 565.31 , 691.93 , 846.91 , 1036.6 , 1268.78 , 1552.97 , 1900.82 , 2326.57 , 2847.69 , 3485.53 , 4266.23 , 5221.81 , 6391.41 , 7823.0 , 9575.23 , 11719.94 , 14345.03 , 17558.1 , 21490.85 , 26304.48 , 32196.3 , 39407.79 , 48234.54 , 59038.36 , 72262.07 , 88447.7 , 108258.66 , 132506.99 , 162186.57 , 198513.95 , 242978.11 , 297401.58 , 364015.09 , 445549.04 , 545345.37 , 667494.6 , 817003.43 , 1000000. ] ) self . _mass_f = np . array ( [ 2.0 , 4.0 , 6.0 , 8.0 , 10.0 , 25.0 , 50.0 , 80.3 , 91.2 , 100.0 , 150.0 , 176.0 , 200.0 , 250.0 , 350.0 , 500.0 , 750.0 , 1000.0 , 1500.0 , 2000.0 , 3000.0 , 5000.0 , 7000.0 , 1E4 ] ) self . _mass = np . append ( self . _mass_f , self . _mass_h [ 27 : ] ) self . _dn_f = self . _data_f . reshape ( ( 12 , 24 , 250 ) ) self . _dn_h = self . _data_h self . _dn = np . zeros ( ( 12 , len ( self . _mass ) , 250 ) ) self . _dn [ : , 0 : 24 , : ] = self . _dn_f self . _dn [ : , 24 : , : ] = self . _dn_h [ : , 27 : , : ] self . _dn_interp = RegularGridInterpolator ( [ self . _mass , self . _x ] , self . _dn [ ichan , : , : ] , bounds_error = False , fill_value = None ) if self . channel . value in [ 1 , 6 , 7 ] and self . mass . value > 10000. : print "ERROR: currently spectra for selected channel and mass not implemented." print "Spectra for channels ['ee','gg','WW'] currently not available for mass > 10 TeV"
Mapping between the channel codes and the rows in the gammamc file dmSpecTab . npy created to match this mapping too
18,711
def is_valid_variable_name ( string_to_check ) : try : parse ( '{} = None' . format ( string_to_check ) ) return True except ( SyntaxError , ValueError , TypeError ) : return False
Returns whether the provided name is a valid variable name in Python
18,712
def _check_unit ( new_unit , old_unit ) : try : new_unit . physical_type except AttributeError : raise UnitMismatch ( "The provided unit (%s) has no physical type. Was expecting a unit for %s" % ( new_unit , old_unit . physical_type ) ) if new_unit . physical_type != old_unit . physical_type : raise UnitMismatch ( "Physical type mismatch: you provided a unit for %s instead of a unit for %s" % ( new_unit . physical_type , old_unit . physical_type ) )
Check that the new unit is compatible with the old unit for the quantity described by variable_name
18,713
def peak_energy ( self ) : return self . piv . value * pow ( 10 , ( ( 2 + self . alpha . value ) * np . log ( 10 ) ) / ( 2 * self . beta . value ) )
Returns the peak energy in the nuFnu spectrum
18,714
def in_unit_of ( self , unit , as_quantity = False ) : new_unit = u . Unit ( unit ) new_quantity = self . as_quantity . to ( new_unit ) if as_quantity : return new_quantity else : return new_quantity . value
Return the current value transformed to the new units
18,715
def _get_value ( self ) : if self . _aux_variable : return self . _aux_variable [ 'law' ] ( self . _aux_variable [ 'variable' ] . value ) if self . _transformation is None : return self . _internal_value else : return self . _transformation . backward ( self . _internal_value )
Return current parameter value
18,716
def _set_value ( self , new_value ) : if self . min_value is not None and new_value < self . min_value : raise SettingOutOfBounds ( "Trying to set parameter {0} = {1}, which is less than the minimum allowed {2}" . format ( self . name , new_value , self . min_value ) ) if self . max_value is not None and new_value > self . max_value : raise SettingOutOfBounds ( "Trying to set parameter {0} = {1}, which is more than the maximum allowed {2}" . format ( self . name , new_value , self . max_value ) ) if self . has_auxiliary_variable ( ) : with warnings . catch_warnings ( ) : warnings . simplefilter ( "always" , RuntimeWarning ) warnings . warn ( "You are trying to assign to a parameter which is either linked or " "has auxiliary variables. The assignment has no effect." , RuntimeWarning ) if self . _transformation is None : new_internal_value = new_value else : new_internal_value = self . _transformation . forward ( new_value ) if new_internal_value != self . _internal_value : self . _internal_value = new_internal_value for callback in self . _callbacks : try : callback ( self ) except : raise NotCallableOrErrorInCall ( "Could not call callback for parameter %s" % self . name )
Sets the current value of the parameter ensuring that it is within the allowed range .
18,717
def _set_internal_value ( self , new_internal_value ) : if new_internal_value != self . _internal_value : self . _internal_value = new_internal_value for callback in self . _callbacks : callback ( self )
This is supposed to be only used by fitting engines
18,718
def _set_min_value ( self , min_value ) : if self . _transformation is not None : if min_value is not None : try : _ = self . _transformation . forward ( min_value ) except FloatingPointError : raise ValueError ( "The provided minimum %s cannot be transformed with the transformation %s which " "is defined for the parameter %s" % ( min_value , type ( self . _transformation ) , self . path ) ) self . _external_min_value = min_value if self . _external_min_value is not None and self . value < self . _external_min_value : warnings . warn ( "The current value of the parameter %s (%s) " "was below the new minimum %s." % ( self . name , self . value , self . _external_min_value ) , exceptions . RuntimeWarning ) self . value = self . _external_min_value
Sets current minimum allowed value
18,719
def _set_max_value ( self , max_value ) : self . _external_max_value = max_value if self . _external_max_value is not None and self . value > self . _external_max_value : warnings . warn ( "The current value of the parameter %s (%s) " "was above the new maximum %s." % ( self . name , self . value , self . _external_max_value ) , exceptions . RuntimeWarning ) self . value = self . _external_max_value
Sets current maximum allowed value
18,720
def _set_bounds ( self , bounds ) : min_value , max_value = bounds self . min_value = None self . max_value = None self . min_value = min_value self . max_value = max_value
Sets the boundaries for this parameter to min_value and max_value
18,721
def _set_prior ( self , prior ) : if prior is None : self . _prior = None else : try : _ = prior ( self . value ) except : raise NotCallableOrErrorInCall ( "Could not call the provided prior. " + "Is it a function accepting the current value of the parameter?" ) try : prior . set_units ( self . unit , u . dimensionless_unscaled ) except AttributeError : raise NotCallableOrErrorInCall ( "It looks like the provided prior is not a astromodels function." ) self . _prior = prior
Set prior for this parameter . The prior must be a function accepting the current value of the parameter as input and giving the probability density as output .
18,722
def set_uninformative_prior ( self , prior_class ) : prior_instance = prior_class ( ) if self . min_value is None : raise ParameterMustHaveBounds ( "Parameter %s does not have a defined minimum. Set one first, then re-run " "set_uninformative_prior" % self . path ) else : try : prior_instance . lower_bound = self . min_value except SettingOutOfBounds : raise SettingOutOfBounds ( "Cannot use minimum of %s for prior %s" % ( self . min_value , prior_instance . name ) ) if self . max_value is None : raise ParameterMustHaveBounds ( "Parameter %s does not have a defined maximum. Set one first, then re-run " "set_uninformative_prior" % self . path ) else : try : prior_instance . upper_bound = self . max_value except SettingOutOfBounds : raise SettingOutOfBounds ( "Cannot use maximum of %s for prior %s" % ( self . max_value , prior_instance . name ) ) assert np . isfinite ( prior_instance . upper_bound . value ) , "The parameter %s must have a finite maximum" % self . name assert np . isfinite ( prior_instance . lower_bound . value ) , "The parameter %s must have a finite minimum" % self . name self . _set_prior ( prior_instance )
Sets the prior for the parameter to a uniform prior between the current minimum and maximum or a log - uniform prior between the current minimum and maximum .
18,723
def remove_auxiliary_variable ( self ) : if not self . has_auxiliary_variable ( ) : warnings . warn ( "Cannot remove a non-existing auxiliary variable" , RuntimeWarning ) else : self . _remove_child ( self . _aux_variable [ 'law' ] . name ) self . _aux_variable = { } self . free = self . _old_free
Remove an existing auxiliary variable
18,724
def _get_child_from_path ( self , path ) : keys = path . split ( "." ) this_child = self for key in keys : try : this_child = this_child . _get_child ( key ) except KeyError : raise KeyError ( "Child %s not found" % path ) return this_child
Return a children below this level starting from a path of the kind this_level . something . something . name
18,725
def _find_instances ( self , cls ) : instances = collections . OrderedDict ( ) for child_name , child in self . _children . iteritems ( ) : if isinstance ( child , cls ) : key_name = "." . join ( child . _get_path ( ) ) instances [ key_name ] = child if child . _children : instances . update ( child . _find_instances ( cls ) ) else : instances . update ( child . _find_instances ( cls ) ) return instances
Find all the instances of cls below this node .
18,726
def find_library ( library_root , additional_places = None ) : first_guess = ctypes . util . find_library ( library_root ) if first_guess is not None : if sys . platform . lower ( ) . find ( "linux" ) >= 0 : return sanitize_lib_name ( first_guess ) , None elif sys . platform . lower ( ) . find ( "darwin" ) >= 0 : return sanitize_lib_name ( first_guess ) , os . path . dirname ( first_guess ) else : raise NotImplementedError ( "Platform %s is not supported" % sys . platform ) else : if sys . platform . lower ( ) . find ( "linux" ) >= 0 : possible_locations = os . environ . get ( "LD_LIBRARY_PATH" , "" ) . split ( ":" ) elif sys . platform . lower ( ) . find ( "darwin" ) >= 0 : possible_locations = os . environ . get ( "DYLD_LIBRARY_PATH" , "" ) . split ( ":" ) else : raise NotImplementedError ( "Platform %s is not supported" % sys . platform ) if additional_places is not None : possible_locations . extend ( additional_places ) library_name = None library_dir = None for search_path in possible_locations : if search_path == "" : continue results = glob . glob ( os . path . join ( search_path , "lib%s*" % library_root ) ) if len ( results ) >= 1 : for result in results : if re . match ( "lib%s[\-_\.]" % library_root , os . path . basename ( result ) ) is None : continue else : library_name = result library_dir = search_path break else : continue if library_name is not None : break if library_name is None : return None , None else : return sanitize_lib_name ( library_name ) , library_dir
Returns the name of the library without extension
18,727
def dict_to_table ( dictionary , list_of_keys = None ) : table = Table ( ) if len ( dictionary ) > 0 : table [ 'name' ] = dictionary . keys ( ) prototype = dictionary . values ( ) [ 0 ] column_names = prototype . keys ( ) if list_of_keys is not None : column_names = filter ( lambda key : key in list_of_keys , column_names ) for column_name in column_names : table [ column_name ] = map ( lambda x : x [ column_name ] , dictionary . values ( ) ) return table
Return a table representing the dictionary .
18,728
def _base_repr_ ( self , html = False , show_name = True , ** kwargs ) : table_id = 'table{id}' . format ( id = id ( self ) ) data_lines , outs = self . formatter . _pformat_table ( self , tableid = table_id , html = html , max_width = ( - 1 if html else None ) , show_name = show_name , show_unit = None , show_dtype = False ) out = '\n' . join ( data_lines ) return out
Override the method in the astropy . Table class to avoid displaying the description and the format of the columns
18,729
def fetch_cache_key ( request ) : m = hashlib . md5 ( ) m . update ( request . body ) return m . hexdigest ( )
Returns a hashed cache key .
18,730
def dispatch ( self , request , * args , ** kwargs ) : if not graphql_api_settings . CACHE_ACTIVE : return self . super_call ( request , * args , ** kwargs ) cache = caches [ "default" ] operation_ast = self . get_operation_ast ( request ) if operation_ast and operation_ast . operation == "mutation" : cache . clear ( ) return self . super_call ( request , * args , ** kwargs ) cache_key = "_graplql_{}" . format ( self . fetch_cache_key ( request ) ) response = cache . get ( cache_key ) if not response : response = self . super_call ( request , * args , ** kwargs ) cache . set ( cache_key , response , timeout = graphql_api_settings . CACHE_TIMEOUT ) return response
Fetches queried data from graphql and returns cached & hashed key .
18,731
def _parse ( partial_dt ) : dt = None try : if isinstance ( partial_dt , datetime ) : dt = partial_dt if isinstance ( partial_dt , date ) : dt = _combine_date_time ( partial_dt , time ( 0 , 0 , 0 ) ) if isinstance ( partial_dt , time ) : dt = _combine_date_time ( date . today ( ) , partial_dt ) if isinstance ( partial_dt , ( int , float ) ) : dt = datetime . fromtimestamp ( partial_dt ) if isinstance ( partial_dt , ( str , bytes ) ) : dt = parser . parse ( partial_dt , default = timezone . now ( ) ) if dt is not None and timezone . is_naive ( dt ) : dt = timezone . make_aware ( dt ) return dt except ValueError : return None
parse a partial datetime object to a complete datetime object
18,732
def clean_dict ( d ) : if not isinstance ( d , ( dict , list ) ) : return d if isinstance ( d , list ) : return [ v for v in ( clean_dict ( v ) for v in d ) if v ] return OrderedDict ( [ ( k , v ) for k , v in ( ( k , clean_dict ( v ) ) for k , v in list ( d . items ( ) ) ) if v ] )
Remove all empty fields in a nested dict
18,733
def _get_queryset ( klass ) : if isinstance ( klass , QuerySet ) : return klass elif isinstance ( klass , Manager ) : manager = klass elif isinstance ( klass , ModelBase ) : manager = klass . _default_manager else : if isinstance ( klass , type ) : klass__name = klass . __name__ else : klass__name = klass . __class__ . __name__ raise ValueError ( "Object is of type '{}', but must be a Django Model, " "Manager, or QuerySet" . format ( klass__name ) ) return manager . all ( )
Returns a QuerySet from a Model Manager or QuerySet . Created to make get_object_or_404 and get_list_or_404 more DRY .
18,734
def find_schema_paths ( schema_files_path = DEFAULT_SCHEMA_FILES_PATH ) : paths = [ ] for path in schema_files_path : if os . path . isdir ( path ) : paths . append ( path ) if paths : return paths raise SchemaFilesNotFound ( "Searched " + os . pathsep . join ( schema_files_path ) )
Searches the locations in the SCHEMA_FILES_PATH to try to find where the schema SQL files are located .
18,735
def run ( ) : parser = argparse . ArgumentParser ( description = 'SharQ Server.' ) parser . add_argument ( '-c' , '--config' , action = 'store' , required = True , help = 'Absolute path of the SharQ configuration file.' , dest = 'sharq_config' ) parser . add_argument ( '-gc' , '--gunicorn-config' , action = 'store' , required = False , help = 'Gunicorn configuration file.' , dest = 'gunicorn_config' ) parser . add_argument ( '--version' , action = 'version' , version = 'SharQ Server %s' % __version__ ) args = parser . parse_args ( ) config_parser = ConfigParser . SafeConfigParser ( ) sharq_config = os . path . abspath ( args . sharq_config ) config_parser . read ( sharq_config ) host = config_parser . get ( 'sharq-server' , 'host' ) port = config_parser . get ( 'sharq-server' , 'port' ) bind = '%s:%s' % ( host , port ) try : workers = config_parser . get ( 'sharq-server' , 'workers' ) except ConfigParser . NoOptionError : workers = number_of_workers ( ) try : accesslog = config_parser . get ( 'sharq-server' , 'accesslog' ) except ConfigParser . NoOptionError : accesslog = None options = { 'bind' : bind , 'workers' : workers , 'worker_class' : 'gevent' } if accesslog : options . update ( { 'accesslog' : accesslog } ) if args . gunicorn_config : gunicorn_config = os . path . abspath ( args . gunicorn_config ) options . update ( { 'config' : gunicorn_config } ) print % ( __version__ , bind ) server = setup_server ( sharq_config ) SharQServerApplicationRunner ( server . app , options ) . run ( )
Exposes a CLI to configure the SharQ Server and runs the server .
18,736
def setup_server ( config_path ) : server = SharQServer ( config_path ) gevent . spawn ( server . requeue ) return server
Configure SharQ server start the requeue loop and return the server .
18,737
def requeue ( self ) : job_requeue_interval = float ( self . config . get ( 'sharq' , 'job_requeue_interval' ) ) while True : self . sq . requeue ( ) gevent . sleep ( job_requeue_interval / 1000.00 )
Loop endlessly and requeue expired jobs .
18,738
def _view_enqueue ( self , queue_type , queue_id ) : response = { 'status' : 'failure' } try : request_data = json . loads ( request . data ) except Exception , e : response [ 'message' ] = e . message return jsonify ( ** response ) , 400 request_data . update ( { 'queue_type' : queue_type , 'queue_id' : queue_id } ) try : response = self . sq . enqueue ( ** request_data ) except Exception , e : response [ 'message' ] = e . message return jsonify ( ** response ) , 400 return jsonify ( ** response ) , 201
Enqueues a job into SharQ .
18,739
def _view_dequeue ( self , queue_type ) : response = { 'status' : 'failure' } request_data = { 'queue_type' : queue_type } try : response = self . sq . dequeue ( ** request_data ) if response [ 'status' ] == 'failure' : return jsonify ( ** response ) , 404 except Exception , e : response [ 'message' ] = e . message return jsonify ( ** response ) , 400 return jsonify ( ** response )
Dequeues a job from SharQ .
18,740
def _view_finish ( self , queue_type , queue_id , job_id ) : response = { 'status' : 'failure' } request_data = { 'queue_type' : queue_type , 'queue_id' : queue_id , 'job_id' : job_id } try : response = self . sq . finish ( ** request_data ) if response [ 'status' ] == 'failure' : return jsonify ( ** response ) , 404 except Exception , e : response [ 'message' ] = e . message return jsonify ( ** response ) , 400 return jsonify ( ** response )
Marks a job as finished in SharQ .
18,741
def _view_interval ( self , queue_type , queue_id ) : response = { 'status' : 'failure' } try : request_data = json . loads ( request . data ) interval = request_data [ 'interval' ] except Exception , e : response [ 'message' ] = e . message return jsonify ( ** response ) , 400 request_data = { 'queue_type' : queue_type , 'queue_id' : queue_id , 'interval' : interval } try : response = self . sq . interval ( ** request_data ) if response [ 'status' ] == 'failure' : return jsonify ( ** response ) , 404 except Exception , e : response [ 'message' ] = e . message return jsonify ( ** response ) , 400 return jsonify ( ** response )
Updates the queue interval in SharQ .
18,742
def _view_metrics ( self , queue_type , queue_id ) : response = { 'status' : 'failure' } request_data = { } if queue_type : request_data [ 'queue_type' ] = queue_type if queue_id : request_data [ 'queue_id' ] = queue_id try : response = self . sq . metrics ( ** request_data ) except Exception , e : response [ 'message' ] = e . message return jsonify ( ** response ) , 400 return jsonify ( ** response )
Gets SharQ metrics based on the params .
18,743
def _view_clear_queue ( self , queue_type , queue_id ) : response = { 'status' : 'failure' } try : request_data = json . loads ( request . data ) except Exception , e : response [ 'message' ] = e . message return jsonify ( ** response ) , 400 request_data . update ( { 'queue_type' : queue_type , 'queue_id' : queue_id } ) try : response = self . sq . clear_queue ( ** request_data ) except Exception , e : response [ 'message' ] = e . message return jsonify ( ** response ) , 400 return jsonify ( ** response )
remove queueu from SharQ based on the queue_type and queue_id .
18,744
def start_patching ( name = None ) : global _factory_map , _patchers , _mocks if _patchers and name is None : warnings . warn ( 'start_patching() called again, already patched' ) _pre_import ( ) if name is not None : factory = _factory_map [ name ] items = [ ( name , factory ) ] else : items = _factory_map . items ( ) for name , factory in items : patcher = mock . patch ( name , new = factory ( ) ) mocked = patcher . start ( ) _patchers [ name ] = patcher _mocks [ name ] = mocked
Initiate mocking of the functions listed in _factory_map .
18,745
def stop_patching ( name = None ) : global _patchers , _mocks if not _patchers : warnings . warn ( 'stop_patching() called again, already stopped' ) if name is not None : items = [ ( name , _patchers [ name ] ) ] else : items = list ( _patchers . items ( ) ) for name , patcher in items : patcher . stop ( ) del _patchers [ name ] del _mocks [ name ]
Finish the mocking initiated by start_patching
18,746
def standardize_back ( xs , offset , scale ) : try : offset = float ( offset ) except : raise ValueError ( 'The argument offset is not None or float.' ) try : scale = float ( scale ) except : raise ValueError ( 'The argument scale is not None or float.' ) try : xs = np . array ( xs , dtype = "float64" ) except : raise ValueError ( 'The argument xs is not numpy array or similar.' ) return xs * scale + offset
This is function for de - standarization of input series .
18,747
def standardize ( x , offset = None , scale = None ) : if offset == None : offset = np . array ( x ) . mean ( ) else : try : offset = float ( offset ) except : raise ValueError ( 'The argument offset is not None or float' ) if scale == None : scale = np . array ( x ) . std ( ) else : try : scale = float ( scale ) except : raise ValueError ( 'The argument scale is not None or float' ) try : x = np . array ( x , dtype = "float64" ) except : raise ValueError ( 'The argument x is not numpy array or similar.' ) return ( x - offset ) / scale
This is function for standarization of input series .
18,748
def input_from_history ( a , n , bias = False ) : if not type ( n ) == int : raise ValueError ( 'The argument n must be int.' ) if not n > 0 : raise ValueError ( 'The argument n must be greater than 0' ) try : a = np . array ( a , dtype = "float64" ) except : raise ValueError ( 'The argument a is not numpy array or similar.' ) x = np . array ( [ a [ i : i + n ] for i in range ( len ( a ) - n + 1 ) ] ) if bias : x = np . vstack ( ( x . T , np . ones ( len ( x ) ) ) ) . T return x
This is function for creation of input matrix .
18,749
def init_weights ( self , w , n = - 1 ) : if n == - 1 : n = self . n if type ( w ) == str : if w == "random" : w = np . random . normal ( 0 , 0.5 , n ) elif w == "zeros" : w = np . zeros ( n ) else : raise ValueError ( 'Impossible to understand the w' ) elif len ( w ) == n : try : w = np . array ( w , dtype = "float64" ) except : raise ValueError ( 'Impossible to understand the w' ) else : raise ValueError ( 'Impossible to understand the w' ) self . w = w
This function initialises the adaptive weights of the filter .
18,750
def predict ( self , x ) : y = np . dot ( self . w , x ) return y
This function calculates the new output value y from input array x .
18,751
def explore_learning ( self , d , x , mu_start = 0 , mu_end = 1. , steps = 100 , ntrain = 0.5 , epochs = 1 , criteria = "MSE" , target_w = False ) : mu_range = np . linspace ( mu_start , mu_end , steps ) errors = np . zeros ( len ( mu_range ) ) for i , mu in enumerate ( mu_range ) : self . init_weights ( "zeros" ) self . mu = mu y , e , w = self . pretrained_run ( d , x , ntrain = ntrain , epochs = epochs ) if type ( target_w ) != bool : errors [ i ] = get_mean_error ( w [ - 1 ] - target_w , function = criteria ) else : errors [ i ] = get_mean_error ( e , function = criteria ) return errors , mu_range
Test what learning rate is the best .
18,752
def check_float_param ( self , param , low , high , name ) : try : param = float ( param ) except : raise ValueError ( 'Parameter {} is not float or similar' . format ( name ) ) if low != None or high != None : if not low <= param <= high : raise ValueError ( 'Parameter {} is not in range <{}, {}>' . format ( name , low , high ) ) return param
Check if the value of the given parameter is in the given range and a float . Designed for testing parameters like mu and eps . To pass this function the variable param must be able to be converted into a float with a value between low and high .
18,753
def check_int_param ( self , param , low , high , name ) : try : param = int ( param ) except : raise ValueError ( 'Parameter {} is not int or similar' . format ( name ) ) if low != None or high != None : if not low <= param <= high : raise ValueError ( 'Parameter {} is not in range <{}, {}>' . format ( name , low , high ) ) return param
Check if the value of the given parameter is in the given range and an int . Designed for testing parameters like mu and eps . To pass this function the variable param must be able to be converted into a float with a value between low and high .
18,754
def MAE ( x1 , x2 = - 1 ) : e = get_valid_error ( x1 , x2 ) return np . sum ( np . abs ( e ) ) / float ( len ( e ) )
Mean absolute error - this function accepts two series of data or directly one series with error .
18,755
def MSE ( x1 , x2 = - 1 ) : e = get_valid_error ( x1 , x2 ) return np . dot ( e , e ) / float ( len ( e ) )
Mean squared error - this function accepts two series of data or directly one series with error .
18,756
def RMSE ( x1 , x2 = - 1 ) : e = get_valid_error ( x1 , x2 ) return np . sqrt ( np . dot ( e , e ) / float ( len ( e ) ) )
Root - mean - square error - this function accepts two series of data or directly one series with error .
18,757
def ELBND ( w , e , function = "max" ) : if not function in [ "max" , "sum" ] : raise ValueError ( 'Unknown output function' ) N = w . shape [ 0 ] n = w . shape [ 1 ] dw = np . zeros ( w . shape ) dw [ : - 1 ] = np . abs ( np . diff ( w , axis = 0 ) ) a = np . random . random ( ( 5 , 2 ) ) b = a . T * np . array ( [ 1 , 2 , 3 , 4 , 5 ] ) elbnd = np . abs ( ( dw . T * e ) . T ) if function == "max" : elbnd = np . max ( elbnd , axis = 1 ) elif function == "sum" : elbnd = np . sum ( elbnd , axis = 1 ) return elbnd
This function estimates Error and Learning Based Novelty Detection measure from given data .
18,758
def LDA_base ( x , labels ) : classes = np . array ( tuple ( set ( labels ) ) ) cols = x . shape [ 1 ] means = np . zeros ( ( len ( classes ) , cols ) ) for i , cl in enumerate ( classes ) : means [ i ] = np . mean ( x [ labels == cl ] , axis = 0 ) scatter_within = np . zeros ( ( cols , cols ) ) for cl , mean in zip ( classes , means ) : scatter_class = np . zeros ( ( cols , cols ) ) for row in x [ labels == cl ] : dif = row - mean scatter_class += np . dot ( dif . reshape ( cols , 1 ) , dif . reshape ( 1 , cols ) ) scatter_within += scatter_class total_mean = np . mean ( x , axis = 0 ) scatter_between = np . zeros ( ( cols , cols ) ) for cl , mean in zip ( classes , means ) : dif = mean - total_mean dif_product = np . dot ( dif . reshape ( cols , 1 ) , dif . reshape ( 1 , cols ) ) scatter_between += x [ labels == cl , : ] . shape [ 0 ] * dif_product scatter_product = np . dot ( np . linalg . inv ( scatter_within ) , scatter_between ) eigen_values , eigen_vectors = np . linalg . eig ( scatter_product ) return eigen_values , eigen_vectors
Base function used for Linear Discriminant Analysis .
18,759
def LDA ( x , labels , n = False ) : if not n : n = x . shape [ 1 ] - 1 try : x = np . array ( x ) except : raise ValueError ( 'Impossible to convert x to a numpy array.' ) assert type ( n ) == int , "Provided n is not an integer." assert x . shape [ 1 ] > n , "The requested n is bigger than \ number of features in x." eigen_values , eigen_vectors = LDA_base ( x , labels ) eigen_order = eigen_vectors . T [ ( - eigen_values ) . argsort ( ) ] return eigen_order [ : n ] . dot ( x . T ) . T
Linear Discriminant Analysis function .
18,760
def LDA_discriminants ( x , labels ) : try : x = np . array ( x ) except : raise ValueError ( 'Impossible to convert x to a numpy array.' ) eigen_values , eigen_vectors = LDA_base ( x , labels ) return eigen_values [ ( - eigen_values ) . argsort ( ) ]
Linear Discriminant Analysis helper for determination how many columns of data should be reduced .
18,761
def read_memory ( self ) : if self . mem_empty == True : if self . mem_idx == 0 : m_x = np . zeros ( self . n ) m_d = 0 else : m_x = np . mean ( self . mem_x [ : self . mem_idx + 1 ] , axis = 0 ) m_d = np . mean ( self . mem_d [ : self . mem_idx ] ) else : m_x = np . mean ( self . mem_x , axis = 0 ) m_d = np . mean ( np . delete ( self . mem_d , self . mem_idx ) ) self . mem_idx += 1 if self . mem_idx > len ( self . mem_x ) - 1 : self . mem_idx = 0 self . mem_empty = False return m_d , m_x
This function read mean value of target d and input vector x from history
18,762
def learning_entropy ( w , m = 10 , order = 1 , alpha = False ) : w = np . array ( w ) N = w . shape [ 0 ] n = w . shape [ 1 ] dw = np . copy ( w ) dw [ order : ] = np . abs ( np . diff ( dw , n = order , axis = 0 ) ) awd = np . zeros ( w . shape ) if not alpha : swd = np . zeros ( w . shape ) for k in range ( m , N ) : awd [ k ] = np . mean ( dw [ k - m : k ] , axis = 0 ) swd [ k ] = np . std ( dw [ k - m : k ] , axis = 0 ) eps = 1e-10 le = ( dw - awd ) / ( swd + eps ) else : for k in range ( m , N ) : awd [ k ] = np . mean ( dw [ k - m : k ] , axis = 0 ) alphas = np . array ( alpha ) fh = np . zeros ( N ) for alpha in alphas : fh += np . sum ( awd * alpha < dw , axis = 1 ) le = fh / float ( n * len ( alphas ) ) le [ : m ] = 0 return le
This function estimates Learning Entropy .
18,763
def activation ( self , x , f = "sigmoid" , der = False ) : if f == "sigmoid" : if der : return x * ( 1 - x ) return 1. / ( 1 + np . exp ( - x ) ) elif f == "tanh" : if der : return 1 - x ** 2 return ( 2. / ( 1 + np . exp ( - 2 * x ) ) ) - 1
This function process values of layer outputs with activation function .
18,764
def train ( self , x , d , epochs = 10 , shuffle = False ) : N = len ( x ) if not len ( d ) == N : raise ValueError ( 'The length of vector d and matrix x must agree.' ) if not len ( x [ 0 ] ) == self . n_input : raise ValueError ( 'The number of network inputs is not correct.' ) if self . outputs == 1 : if not len ( d . shape ) == 1 : raise ValueError ( 'For one output MLP the d must have one dimension' ) else : if not d . shape [ 1 ] == self . outputs : raise ValueError ( 'The number of outputs must agree with number of columns in d' ) try : x = np . array ( x ) d = np . array ( d ) except : raise ValueError ( 'Impossible to convert x or d to a numpy array' ) if self . outputs == 1 : e = np . zeros ( epochs * N ) else : e = np . zeros ( ( epochs * N , self . outputs ) ) MSE = np . zeros ( epochs ) if shuffle : randomize = np . arange ( len ( x ) ) np . random . shuffle ( randomize ) x = x [ randomize ] d = d [ randomize ] for epoch in range ( epochs ) : for k in range ( N ) : self . predict ( x [ k ] ) e [ ( epoch * N ) + k ] = self . update ( d [ k ] ) MSE [ epoch ] = np . sum ( e [ epoch * N : ( epoch + 1 ) * N - 1 ] ** 2 ) / N return e , MSE
Function for batch training of MLP .
18,765
def run ( self , x ) : try : x = np . array ( x ) except : raise ValueError ( 'Impossible to convert x to a numpy array' ) N = len ( x ) if self . outputs == 1 : y = np . zeros ( N ) else : y = np . zeros ( ( N , self . outputs ) ) for k in range ( N ) : y [ k ] = self . predict ( x [ k ] ) return y
Function for batch usage of already trained and tested MLP .
18,766
def PCA_components ( x ) : try : x = np . array ( x ) except : raise ValueError ( 'Impossible to convert x to a numpy array.' ) eigen_values , eigen_vectors = np . linalg . eig ( np . cov ( x . T ) ) eigen_order = eigen_vectors . T [ ( - eigen_values ) . argsort ( ) ] return eigen_values [ ( - eigen_values ) . argsort ( ) ]
Principal Component Analysis helper to check out eigenvalues of components .
18,767
def PCA ( x , n = False ) : if not n : n = x . shape [ 1 ] - 1 try : x = np . array ( x ) except : raise ValueError ( 'Impossible to convert x to a numpy array.' ) assert type ( n ) == int , "Provided n is not an integer." assert x . shape [ 1 ] > n , "The requested n is bigger than \ number of features in x." eigen_values , eigen_vectors = np . linalg . eig ( np . cov ( x . T ) ) eigen_order = eigen_vectors . T [ ( - eigen_values ) . argsort ( ) ] return eigen_order [ : n ] . dot ( x . T ) . T
Principal component analysis function .
18,768
def clean_axis ( axis ) : axis . get_xaxis ( ) . set_ticks ( [ ] ) axis . get_yaxis ( ) . set_ticks ( [ ] ) for spine in list ( axis . spines . values ( ) ) : spine . set_visible ( False )
Remove ticks tick labels and frame from axis
18,769
def get_seaborn_colorbar ( dfr , classes ) : levels = sorted ( list ( set ( classes . values ( ) ) ) ) paldict = { lvl : pal for ( lvl , pal ) in zip ( levels , sns . cubehelix_palette ( len ( levels ) , light = 0.9 , dark = 0.1 , reverse = True , start = 1 , rot = - 2 ) , ) } lvl_pal = { cls : paldict [ lvl ] for ( cls , lvl ) in list ( classes . items ( ) ) } col_cb = pd . Series ( dfr . index ) . map ( lvl_pal ) col_cb . index = dfr . index return col_cb
Return a colorbar representing classes for a Seaborn plot .
18,770
def get_safe_seaborn_labels ( dfr , labels ) : if labels is not None : return [ labels . get ( i , i ) for i in dfr . index ] return [ i for i in dfr . index ]
Returns labels guaranteed to correspond to the dataframe .
18,771
def get_seaborn_clustermap ( dfr , params , title = None , annot = True ) : fig = sns . clustermap ( dfr , cmap = params . cmap , vmin = params . vmin , vmax = params . vmax , col_colors = params . colorbar , row_colors = params . colorbar , figsize = ( params . figsize , params . figsize ) , linewidths = params . linewidths , xticklabels = params . labels , yticklabels = params . labels , annot = annot , ) fig . cax . yaxis . set_label_position ( "left" ) if title : fig . cax . set_ylabel ( title ) fig . ax_heatmap . set_xticklabels ( fig . ax_heatmap . get_xticklabels ( ) , rotation = 90 ) fig . ax_heatmap . set_yticklabels ( fig . ax_heatmap . get_yticklabels ( ) , rotation = 0 ) return fig
Returns a Seaborn clustermap .
18,772
def heatmap_seaborn ( dfr , outfilename = None , title = None , params = None ) : maxfigsize = 120 calcfigsize = dfr . shape [ 0 ] * 1.1 figsize = min ( max ( 8 , calcfigsize ) , maxfigsize ) if figsize == maxfigsize : scale = maxfigsize / calcfigsize sns . set_context ( "notebook" , font_scale = scale ) if params . classes is None : col_cb = None else : col_cb = get_seaborn_colorbar ( dfr , params . classes ) params . labels = get_safe_seaborn_labels ( dfr , params . labels ) params . colorbar = col_cb params . figsize = figsize params . linewidths = 0.25 fig = get_seaborn_clustermap ( dfr , params , title = title ) if outfilename : fig . savefig ( outfilename ) return fig
Returns seaborn heatmap with cluster dendrograms .
18,773
def add_mpl_dendrogram ( dfr , fig , heatmap_gs , orientation = "col" ) : if orientation == "row" : dists = distance . squareform ( distance . pdist ( dfr ) ) spec = heatmap_gs [ 1 , 0 ] orient = "left" nrows , ncols = 1 , 2 height_ratios = [ 1 ] else : dists = distance . squareform ( distance . pdist ( dfr . T ) ) spec = heatmap_gs [ 0 , 1 ] orient = "top" nrows , ncols = 2 , 1 height_ratios = [ 1 , 0.15 ] gspec = gridspec . GridSpecFromSubplotSpec ( nrows , ncols , subplot_spec = spec , wspace = 0.0 , hspace = 0.1 , height_ratios = height_ratios , ) dend_axes = fig . add_subplot ( gspec [ 0 , 0 ] ) dend = sch . dendrogram ( sch . linkage ( distance . squareform ( dists ) , method = "complete" ) , color_threshold = np . inf , orientation = orient , ) clean_axis ( dend_axes ) return { "dendrogram" : dend , "gridspec" : gspec }
Return a dendrogram and corresponding gridspec attached to the fig
18,774
def get_mpl_heatmap_axes ( dfr , fig , heatmap_gs ) : heatmap_axes = fig . add_subplot ( heatmap_gs [ 1 , 1 ] ) heatmap_axes . set_xticks ( np . linspace ( 0 , dfr . shape [ 0 ] - 1 , dfr . shape [ 0 ] ) ) heatmap_axes . set_yticks ( np . linspace ( 0 , dfr . shape [ 0 ] - 1 , dfr . shape [ 0 ] ) ) heatmap_axes . grid ( False ) heatmap_axes . xaxis . tick_bottom ( ) heatmap_axes . yaxis . tick_right ( ) return heatmap_axes
Return axis for Matplotlib heatmap .
18,775
def add_mpl_colorbar ( dfr , fig , dend , params , orientation = "row" ) : for name in dfr . index [ dend [ "dendrogram" ] [ "leaves" ] ] : if name not in params . classes : params . classes [ name ] = name classdict = { cls : idx for ( idx , cls ) in enumerate ( params . classes . values ( ) ) } cblist = [ ] for name in dfr . index [ dend [ "dendrogram" ] [ "leaves" ] ] : try : cblist . append ( classdict [ params . classes [ name ] ] ) except KeyError : cblist . append ( classdict [ name ] ) colbar = pd . Series ( cblist ) if orientation == "row" : cbaxes = fig . add_subplot ( dend [ "gridspec" ] [ 0 , 1 ] ) cbaxes . imshow ( [ [ cbar ] for cbar in colbar . values ] , cmap = plt . get_cmap ( pyani_config . MPL_CBAR ) , interpolation = "nearest" , aspect = "auto" , origin = "lower" , ) else : cbaxes = fig . add_subplot ( dend [ "gridspec" ] [ 1 , 0 ] ) cbaxes . imshow ( [ colbar ] , cmap = plt . get_cmap ( pyani_config . MPL_CBAR ) , interpolation = "nearest" , aspect = "auto" , origin = "lower" , ) clean_axis ( cbaxes ) return colbar
Add class colorbars to Matplotlib heatmap .
18,776
def add_mpl_labels ( heatmap_axes , rowlabels , collabels , params ) : if params . labels : rowlabels = [ params . labels . get ( lab , lab ) for lab in rowlabels ] collabels = [ params . labels . get ( lab , lab ) for lab in collabels ] xlabs = heatmap_axes . set_xticklabels ( collabels ) ylabs = heatmap_axes . set_yticklabels ( rowlabels ) for label in xlabs : label . set_rotation ( 90 ) for labset in ( xlabs , ylabs ) : for label in labset : label . set_fontsize ( 8 )
Add labels to Matplotlib heatmap axes in - place .
18,777
def add_mpl_colorscale ( fig , heatmap_gs , ax_map , params , title = None ) : cbticks = [ params . vmin + e * params . vdiff for e in ( 0 , 0.25 , 0.5 , 0.75 , 1 ) ] if params . vmax > 10 : exponent = int ( floor ( log10 ( params . vmax ) ) ) - 1 cbticks = [ int ( round ( e , - exponent ) ) for e in cbticks ] scale_subplot = gridspec . GridSpecFromSubplotSpec ( 1 , 3 , subplot_spec = heatmap_gs [ 0 , 0 ] , wspace = 0.0 , hspace = 0.0 ) scale_ax = fig . add_subplot ( scale_subplot [ 0 , 1 ] ) cbar = fig . colorbar ( ax_map , scale_ax , ticks = cbticks ) if title : cbar . set_label ( title , fontsize = 6 ) cbar . ax . yaxis . set_ticks_position ( "left" ) cbar . ax . yaxis . set_label_position ( "left" ) cbar . ax . tick_params ( labelsize = 6 ) cbar . outline . set_linewidth ( 0 ) return cbar
Add colour scale to heatmap .
18,778
def heatmap_mpl ( dfr , outfilename = None , title = None , params = None ) : figsize = max ( 8 , dfr . shape [ 0 ] * 0.175 ) fig = plt . figure ( figsize = ( figsize , figsize ) ) heatmap_gs = gridspec . GridSpec ( 2 , 2 , wspace = 0.0 , hspace = 0.0 , width_ratios = [ 0.3 , 1 ] , height_ratios = [ 0.3 , 1 ] ) coldend = add_mpl_dendrogram ( dfr , fig , heatmap_gs , orientation = "col" ) rowdend = add_mpl_dendrogram ( dfr , fig , heatmap_gs , orientation = "row" ) heatmap_axes = get_mpl_heatmap_axes ( dfr , fig , heatmap_gs ) ax_map = heatmap_axes . imshow ( dfr . iloc [ rowdend [ "dendrogram" ] [ "leaves" ] , coldend [ "dendrogram" ] [ "leaves" ] ] , interpolation = "nearest" , cmap = params . cmap , origin = "lower" , vmin = params . vmin , vmax = params . vmax , aspect = "auto" , ) if params . classes is not None : add_mpl_colorbar ( dfr , fig , coldend , params , orientation = "col" ) add_mpl_colorbar ( dfr , fig , rowdend , params , orientation = "row" ) add_mpl_labels ( heatmap_axes , dfr . index [ rowdend [ "dendrogram" ] [ "leaves" ] ] , dfr . index [ coldend [ "dendrogram" ] [ "leaves" ] ] , params , ) add_mpl_colorscale ( fig , heatmap_gs , ax_map , params , title ) plt . subplots_adjust ( top = 0.85 ) with warnings . catch_warnings ( ) : warnings . simplefilter ( "ignore" ) heatmap_gs . tight_layout ( fig , h_pad = 0.1 , w_pad = 0.5 ) if outfilename : fig . savefig ( outfilename ) return fig
Returns matplotlib heatmap with cluster dendrograms .
18,779
def run_dependency_graph ( jobgraph , workers = None , logger = None ) : cmdsets = [ ] for job in jobgraph : cmdsets = populate_cmdsets ( job , cmdsets , depth = 1 ) cmdsets . reverse ( ) cumretval = 0 for cmdset in cmdsets : if logger : logger . info ( "Command pool now running:" ) for cmd in cmdset : logger . info ( cmd ) cumretval += multiprocessing_run ( cmdset , workers ) if logger : logger . info ( "Command pool done." ) return cumretval
Creates and runs pools of jobs based on the passed jobgraph .
18,780
def populate_cmdsets ( job , cmdsets , depth ) : if len ( cmdsets ) < depth : cmdsets . append ( set ( ) ) cmdsets [ depth - 1 ] . add ( job . command ) if len ( job . dependencies ) == 0 : return cmdsets for j in job . dependencies : cmdsets = populate_cmdsets ( j , cmdsets , depth + 1 ) return cmdsets
Creates a list of sets containing jobs at different depths of the dependency tree .
18,781
def multiprocessing_run ( cmdlines , workers = None ) : pool = multiprocessing . Pool ( processes = workers ) results = [ pool . apply_async ( subprocess . run , ( str ( cline ) , ) , { 'shell' : sys . platform != "win32" , 'stdout' : subprocess . PIPE , 'stderr' : subprocess . PIPE } ) for cline in cmdlines ] pool . close ( ) pool . join ( ) return sum ( [ r . get ( ) . returncode for r in results ] )
Distributes passed command - line jobs using multiprocessing .
18,782
def get_input_files ( dirname , * ext ) : filelist = [ f for f in os . listdir ( dirname ) if os . path . splitext ( f ) [ - 1 ] in ext ] return [ os . path . join ( dirname , f ) for f in filelist ]
Returns files in passed directory filtered by extension .
18,783
def get_sequence_lengths ( fastafilenames ) : tot_lengths = { } for fn in fastafilenames : tot_lengths [ os . path . splitext ( os . path . split ( fn ) [ - 1 ] ) [ 0 ] ] = sum ( [ len ( s ) for s in SeqIO . parse ( fn , 'fasta' ) ] ) return tot_lengths
Returns dictionary of sequence lengths keyed by organism .
18,784
def last_exception ( ) : exc_type , exc_value , exc_traceback = sys . exc_info ( ) return "" . join ( traceback . format_exception ( exc_type , exc_value , exc_traceback ) )
Returns last exception as a string or use in logging .
18,785
def make_outdir ( ) : if os . path . exists ( args . outdirname ) : if not args . force : logger . error ( "Output directory %s would overwrite existing " + "files (exiting)" , args . outdirname , ) sys . exit ( 1 ) elif args . noclobber : logger . warning ( "NOCLOBBER: not actually deleting directory %s" , args . outdirname ) else : logger . info ( "Removing directory %s and everything below it" , args . outdirname ) shutil . rmtree ( args . outdirname ) logger . info ( "Creating directory %s" , args . outdirname ) try : os . makedirs ( args . outdirname ) if args . method != "TETRA" : os . makedirs ( os . path . join ( args . outdirname , ALIGNDIR [ args . method ] ) ) except OSError : if args . noclobber and args . force : logger . info ( "NOCLOBBER+FORCE: not creating directory" ) else : logger . error ( last_exception ) sys . exit ( 1 )
Make the output directory if required .
18,786
def compress_delete_outdir ( outdir ) : tarfn = outdir + ".tar.gz" logger . info ( "\tCompressing output from %s to %s" , outdir , tarfn ) with tarfile . open ( tarfn , "w:gz" ) as fh : fh . add ( outdir ) logger . info ( "\tRemoving output directory %s" , outdir ) shutil . rmtree ( outdir )
Compress the contents of the passed directory to . tar . gz and delete .
18,787
def calculate_anim ( infiles , org_lengths ) : logger . info ( "Running ANIm" ) logger . info ( "Generating NUCmer command-lines" ) deltadir = os . path . join ( args . outdirname , ALIGNDIR [ "ANIm" ] ) logger . info ( "Writing nucmer output to %s" , deltadir ) if not args . skip_nucmer : joblist = anim . generate_nucmer_jobs ( infiles , args . outdirname , nucmer_exe = args . nucmer_exe , filter_exe = args . filter_exe , maxmatch = args . maxmatch , jobprefix = args . jobprefix , ) if args . scheduler == "multiprocessing" : logger . info ( "Running jobs with multiprocessing" ) if args . workers is None : logger . info ( "(using maximum number of available " + "worker threads)" ) else : logger . info ( "(using %d worker threads, if available)" , args . workers ) cumval = run_mp . run_dependency_graph ( joblist , workers = args . workers , logger = logger ) logger . info ( "Cumulative return value: %d" , cumval ) if 0 < cumval : logger . warning ( "At least one NUCmer comparison failed. " + "ANIm may fail." ) else : logger . info ( "All multiprocessing jobs complete." ) else : logger . info ( "Running jobs with SGE" ) logger . info ( "Jobarray group size set to %d" , args . sgegroupsize ) run_sge . run_dependency_graph ( joblist , logger = logger , jgprefix = args . jobprefix , sgegroupsize = args . sgegroupsize , sgeargs = args . sgeargs , ) else : logger . warning ( "Skipping NUCmer run (as instructed)!" ) logger . info ( "Processing NUCmer .delta files." ) results = anim . process_deltadir ( deltadir , org_lengths , logger = logger ) if results . zero_error : if not args . skip_nucmer and args . scheduler == "multiprocessing" : if 0 < cumval : logger . error ( "This has possibly been a NUCmer run failure, " + "please investigate" ) logger . error ( last_exception ( ) ) sys . exit ( 1 ) else : logger . error ( "This is possibly due to a NUCmer comparison " + "being too distant for use. Please consider " + "using the --maxmatch option." ) logger . error ( "This is alternatively due to NUCmer run " + "failure, analysis will continue, but please " + "investigate." ) if not args . nocompress : logger . info ( "Compressing/deleting %s" , deltadir ) compress_delete_outdir ( deltadir ) return results
Returns ANIm result dataframes for files in input directory .
18,788
def calculate_tetra ( infiles ) : logger . info ( "Running TETRA." ) logger . info ( "Calculating TETRA Z-scores for each sequence." ) tetra_zscores = { } for filename in infiles : logger . info ( "Calculating TETRA Z-scores for %s" , filename ) org = os . path . splitext ( os . path . split ( filename ) [ - 1 ] ) [ 0 ] tetra_zscores [ org ] = tetra . calculate_tetra_zscore ( filename ) logger . info ( "Calculating TETRA correlation scores." ) tetra_correlations = tetra . calculate_correlations ( tetra_zscores ) return tetra_correlations
Calculate TETRA for files in input directory .
18,789
def unified_anib ( infiles , org_lengths ) : logger . info ( "Running %s" , args . method ) blastdir = os . path . join ( args . outdirname , ALIGNDIR [ args . method ] ) logger . info ( "Writing BLAST output to %s" , blastdir ) if not args . skip_blastn : logger . info ( "Fragmenting input files, and writing to %s" , args . outdirname ) fragfiles , fraglengths = anib . fragment_fasta_files ( infiles , blastdir , args . fragsize ) with open ( os . path . join ( blastdir , "fraglengths.json" ) , "w" ) as outfile : json . dump ( fraglengths , outfile ) logger . info ( "Creating job dependency graph" ) jobgraph = anib . make_job_graph ( infiles , fragfiles , anib . make_blastcmd_builder ( args . method , blastdir ) ) if args . scheduler == "multiprocessing" : logger . info ( "Running jobs with multiprocessing" ) logger . info ( "Running job dependency graph" ) if args . workers is None : logger . info ( "(using maximum number of available " + "worker threads)" ) else : logger . info ( "(using %d worker threads, if available)" , args . workers ) cumval = run_mp . run_dependency_graph ( jobgraph , workers = args . workers , logger = logger ) if 0 < cumval : logger . warning ( "At least one BLAST run failed. " + "%s may fail." , args . method ) else : logger . info ( "All multiprocessing jobs complete." ) else : run_sge . run_dependency_graph ( jobgraph , logger = logger ) logger . info ( "Running jobs with SGE" ) else : if args . method == "ANIblastall" : with open ( os . path . join ( blastdir , "fraglengths.json" ) , "rU" ) as infile : fraglengths = json . load ( infile ) else : fraglengths = None logger . warning ( "Skipping BLASTN runs (as instructed)!" ) logger . info ( "Processing pairwise %s BLAST output." , args . method ) try : data = anib . process_blast ( blastdir , org_lengths , fraglengths = fraglengths , mode = args . method ) except ZeroDivisionError : logger . error ( "One or more BLAST output files has a problem." ) if not args . skip_blastn : if 0 < cumval : logger . error ( "This is possibly due to BLASTN run failure, " + "please investigate" ) else : logger . error ( "This is possibly due to a BLASTN comparison " + "being too distant for use." ) logger . error ( last_exception ( ) ) if not args . nocompress : logger . info ( "Compressing/deleting %s" , blastdir ) compress_delete_outdir ( blastdir ) return data
Calculate ANIb for files in input directory .
18,790
def subsample_input ( infiles ) : logger . info ( "--subsample: %s" , args . subsample ) try : samplesize = float ( args . subsample ) except TypeError : logger . error ( "--subsample must be int or float, got %s (exiting)" , type ( args . subsample ) ) sys . exit ( 1 ) if samplesize <= 0 : logger . error ( "--subsample must be positive value, got %s" , str ( args . subsample ) ) sys . exit ( 1 ) if int ( samplesize ) > 1 : logger . info ( "Sample size integer > 1: %d" , samplesize ) k = min ( int ( samplesize ) , len ( infiles ) ) else : logger . info ( "Sample size proportion in (0, 1]: %.3f" , samplesize ) k = int ( min ( samplesize , 1.0 ) * len ( infiles ) ) logger . info ( "Randomly subsampling %d sequences for analysis" , k ) if args . seed : logger . info ( "Setting random seed with: %s" , args . seed ) random . seed ( args . seed ) else : logger . warning ( "Subsampling without specified random seed!" ) logger . warning ( "Subsampling may NOT be easily reproducible!" ) return random . sample ( infiles , k )
Returns a random subsample of the input files .
18,791
def wait ( self , interval = SGE_WAIT ) : finished = False while not finished : time . sleep ( interval ) interval = min ( 2 * interval , 60 ) finished = os . system ( "qstat -j %s > /dev/null" % ( self . name ) )
Wait until the job finishes and poll SGE on its status .
18,792
def generate_nucmer_jobs ( filenames , outdir = "." , nucmer_exe = pyani_config . NUCMER_DEFAULT , filter_exe = pyani_config . FILTER_DEFAULT , maxmatch = False , jobprefix = "ANINUCmer" , ) : ncmds , fcmds = generate_nucmer_commands ( filenames , outdir , nucmer_exe , filter_exe , maxmatch ) joblist = [ ] for idx , ncmd in enumerate ( ncmds ) : njob = pyani_jobs . Job ( "%s_%06d-n" % ( jobprefix , idx ) , ncmd ) fjob = pyani_jobs . Job ( "%s_%06d-f" % ( jobprefix , idx ) , fcmds [ idx ] ) fjob . add_dependency ( njob ) joblist . append ( fjob ) return joblist
Return a list of Jobs describing NUCmer command - lines for ANIm
18,793
def generate_nucmer_commands ( filenames , outdir = "." , nucmer_exe = pyani_config . NUCMER_DEFAULT , filter_exe = pyani_config . FILTER_DEFAULT , maxmatch = False , ) : nucmer_cmdlines , delta_filter_cmdlines = [ ] , [ ] for idx , fname1 in enumerate ( filenames [ : - 1 ] ) : for fname2 in filenames [ idx + 1 : ] : ncmd , dcmd = construct_nucmer_cmdline ( fname1 , fname2 , outdir , nucmer_exe , filter_exe , maxmatch ) nucmer_cmdlines . append ( ncmd ) delta_filter_cmdlines . append ( dcmd ) return ( nucmer_cmdlines , delta_filter_cmdlines )
Return a tuple of lists of NUCmer command - lines for ANIm
18,794
def construct_nucmer_cmdline ( fname1 , fname2 , outdir = "." , nucmer_exe = pyani_config . NUCMER_DEFAULT , filter_exe = pyani_config . FILTER_DEFAULT , maxmatch = False , ) : outsubdir = os . path . join ( outdir , pyani_config . ALIGNDIR [ "ANIm" ] ) outprefix = os . path . join ( outsubdir , "%s_vs_%s" % ( os . path . splitext ( os . path . split ( fname1 ) [ - 1 ] ) [ 0 ] , os . path . splitext ( os . path . split ( fname2 ) [ - 1 ] ) [ 0 ] , ) , ) if maxmatch : mode = "--maxmatch" else : mode = "--mum" nucmercmd = "{0} {1} -p {2} {3} {4}" . format ( nucmer_exe , mode , outprefix , fname1 , fname2 ) filtercmd = "delta_filter_wrapper.py " + "{0} -1 {1} {2}" . format ( filter_exe , outprefix + ".delta" , outprefix + ".filter" ) return ( nucmercmd , filtercmd )
Returns a tuple of NUCmer and delta - filter commands
18,795
def process_deltadir ( delta_dir , org_lengths , logger = None ) : deltafiles = pyani_files . get_input_files ( delta_dir , ".filter" ) results = ANIResults ( list ( org_lengths . keys ( ) ) , "ANIm" ) for org , length in list ( org_lengths . items ( ) ) : results . alignment_lengths [ org ] [ org ] = length for deltafile in deltafiles : qname , sname = os . path . splitext ( os . path . split ( deltafile ) [ - 1 ] ) [ 0 ] . split ( "_vs_" ) if qname not in list ( org_lengths . keys ( ) ) : if logger : logger . warning ( "Query name %s not in input " % qname + "sequence list, skipping %s" % deltafile ) continue if sname not in list ( org_lengths . keys ( ) ) : if logger : logger . warning ( "Subject name %s not in input " % sname + "sequence list, skipping %s" % deltafile ) continue tot_length , tot_sim_error = parse_delta ( deltafile ) if tot_length == 0 and logger is not None : if logger : logger . warning ( "Total alignment length reported in " + "%s is zero!" % deltafile ) query_cover = float ( tot_length ) / org_lengths [ qname ] sbjct_cover = float ( tot_length ) / org_lengths [ sname ] try : perc_id = 1 - float ( tot_sim_error ) / tot_length except ZeroDivisionError : perc_id = 0 results . zero_error = True results . add_tot_length ( qname , sname , tot_length ) results . add_sim_errors ( qname , sname , tot_sim_error ) results . add_pid ( qname , sname , perc_id ) results . add_coverage ( qname , sname , query_cover , sbjct_cover ) return results
Returns a tuple of ANIm results for . deltas in passed directory .
18,796
def set_ncbi_email ( ) : Entrez . email = args . email logger . info ( "Set NCBI contact email to %s" , args . email ) Entrez . tool = "genbank_get_genomes_by_taxon.py"
Set contact email for NCBI .
18,797
def entrez_retry ( func , * fnargs , ** fnkwargs ) : tries , success = 0 , False while not success and tries < args . retries : try : output = func ( * fnargs , ** fnkwargs ) success = True except ( HTTPError , URLError ) : tries += 1 logger . warning ( "Entrez query %s(%s, %s) failed (%d/%d)" , func , fnargs , fnkwargs , tries + 1 , args . retries ) logger . warning ( last_exception ( ) ) if not success : logger . error ( "Too many Entrez failures (exiting)" ) sys . exit ( 1 ) return output
Retries the passed function up to the number of times specified by args . retries
18,798
def entrez_batch_webhistory ( record , expected , batchsize , * fnargs , ** fnkwargs ) : results = [ ] for start in range ( 0 , expected , batchsize ) : batch_handle = entrez_retry ( Entrez . efetch , retstart = start , retmax = batchsize , webenv = record [ "WebEnv" ] , query_key = record [ "QueryKey" ] , * fnargs , ** fnkwargs ) batch_record = Entrez . read ( batch_handle , validate = False ) results . extend ( batch_record ) return results
Recovers the Entrez data from a prior NCBI webhistory search in batches of defined size using Efetch . Returns all results as a list .
18,799
def get_asm_uids ( taxon_uid ) : query = "txid%s[Organism:exp]" % taxon_uid logger . info ( "Entrez ESearch with query: %s" , query ) handle = entrez_retry ( Entrez . esearch , db = "assembly" , term = query , format = "xml" , usehistory = "y" ) record = Entrez . read ( handle , validate = False ) result_count = int ( record [ 'Count' ] ) logger . info ( "Entrez ESearch returns %d assembly IDs" , result_count ) asm_ids = entrez_batch_webhistory ( record , result_count , 250 , db = "assembly" , retmode = "xml" ) logger . info ( "Identified %d unique assemblies" , len ( asm_ids ) ) return asm_ids
Returns a set of NCBI UIDs associated with the passed taxon .