idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
5,200 | def SetPosition ( self , track_id , position ) : self . iface . SetPosition ( convert2dbus ( track_id , 'o' ) , convert2dbus ( position , 'x' ) ) | Sets the current track position in microseconds . |
5,201 | def process_lists ( self ) : for l1_idx , obj1 in enumerate ( self . l1 ) : for l2_idx , obj2 in enumerate ( self . l2 ) : if self . equal ( obj1 , obj2 ) : self . matches . add ( ( l1_idx , l2_idx ) ) | Do any preprocessing of the lists . |
5,202 | def get_matches ( self , src , src_idx ) : if src not in ( 'l1' , 'l2' ) : raise ValueError ( 'Must have one of "l1" or "l2" as src' ) if src == 'l1' : target_list = self . l2 else : target_list = self . l1 comparator = { 'l1' : lambda s_idx , t_idx : ( s_idx , t_idx ) in self . matches , 'l2' : lambda s_idx , t_idx : ( t_idx , s_idx ) in self . matches , } [ src ] return [ ( trg_idx , obj ) for trg_idx , obj in enumerate ( target_list ) if comparator ( src_idx , trg_idx ) ] | Get elements equal to the idx th in src from the other list . |
5,203 | def find_the_closest_atoms ( self , topology ) : self . universe . load_new ( topology ) self . universe . ligand_noH = self . universe . ligand . select_atoms ( "not name H*" ) ligand_positions = self . universe . ligand_noH . positions for residue in self . dict_of_plotted_res . keys ( ) : residue_selection = self . universe . select_atoms ( "resname " + residue [ 0 ] + " and resid " + residue [ 1 ] + " and segid " + residue [ 2 ] ) residue_positions = residue_selection . positions dist_array = MDAnalysis . analysis . distances . distance_array ( ligand_positions , residue_positions ) min_values_per_atom = { } i = 0 for atom in self . universe . ligand_noH : min_values_per_atom [ atom . name ] = dist_array [ i ] . min ( ) i += 1 sorted_min_values = sorted ( min_values_per_atom . items ( ) , key = operator . itemgetter ( 1 ) ) self . closest_atoms [ residue ] = [ ( sorted_min_values [ 0 ] [ 0 ] , sorted_min_values [ 0 ] [ 1 ] ) ] | This function defines the ligand atoms that are closest to the residues that will be plotted in the final graph . |
5,204 | def load_data ( self , topology , mol_file , ligand_name , offset = 0 ) : self . load_topology ( topology ) self . renumber_system ( offset ) self . rename_ligand ( ligand_name , mol_file ) self . load_mol ( mol_file ) | This function loads all relevant data - except trajectories since those are dealt with one at a time . Therefore this process only needs to be done once and every time a trajectory needs to be loaded it can be loaded seperataly and the Data object can be shared across LINTools processes . |
5,205 | def analyse_topology ( self , topology , cutoff = 3.5 ) : self . define_residues_for_plotting_topology ( cutoff ) self . find_the_closest_atoms ( topology ) | In case user wants to analyse only a single topology file this process will determine the residues that should be plotted and find the ligand atoms closest to these residues . |
5,206 | def get_header ( vcf_file_path ) : logger . info ( "Parsing header of file {0}" . format ( vcf_file_path ) ) head = HeaderParser ( ) handle = get_vcf_handle ( infile = vcf_file_path ) for line in handle : line = line . rstrip ( ) if line . startswith ( '#' ) : if line . startswith ( '##' ) : head . parse_meta_data ( line ) else : head . parse_header_line ( line ) else : break handle . close ( ) return head | Parse the header and return a header object |
5,207 | def sample_lonlat ( self , n ) : radius = self . sample_radius ( n ) a = radius b = self . jacobian * radius t = 2. * np . pi * np . random . rand ( n ) cost , sint = np . cos ( t ) , np . sin ( t ) phi = np . pi / 2. - np . deg2rad ( self . theta ) cosphi , sinphi = np . cos ( phi ) , np . sin ( phi ) x = a * cost * cosphi - b * sint * sinphi y = a * cost * sinphi + b * sint * cosphi if self . projector is None : logger . debug ( "Creating AITOFF projector for sampling" ) projector = Projector ( self . lon , self . lat , 'ait' ) else : projector = self . projector lon , lat = projector . imageToSphere ( x , y ) return lon , lat | Sample 2D distribution of points in lon lat |
5,208 | def group ( iterable , key ) : for _ , grouped in groupby ( sorted ( iterable , key = key ) , key = key ) : yield list ( grouped ) | groupby which sorts the input discards the key and returns the output as a sequence of lists . |
5,209 | def aggregate_count ( keyname ) : def inner ( docs ) : return sum ( doc [ keyname ] for doc in docs ) return keyname , inner | Straightforward sum of the given keyname . |
5,210 | def aggregate_rate ( rate_key , count_key ) : def inner ( docs ) : total = sum ( doc [ count_key ] for doc in docs ) weighted_total = sum ( doc [ rate_key ] * doc [ count_key ] for doc in docs ) total_rate = weighted_total / total return total_rate return rate_key , inner | Compute an aggregate rate for rate_key weighted according to count_rate . |
5,211 | def make_aggregate ( docs , aggregations ) : new_doc = dict ( docs [ 0 ] ) for keyname , aggregation_function in aggregations : new_doc [ keyname ] = aggregation_function ( docs ) return new_doc | Given docs and aggregations return a single document with the aggregations applied . |
5,212 | def json ( value ) : uncleaned = jsonlib . dumps ( value ) clean = bleach . clean ( uncleaned ) return mark_safe ( clean ) | Sanitize the JSON string using the Bleach HTML tag remover |
5,213 | def find_pareto_front ( population ) : pareto_front = set ( range ( len ( population ) ) ) for i in range ( len ( population ) ) : if i not in pareto_front : continue ind1 = population [ i ] for j in range ( i + 1 , len ( population ) ) : ind2 = population [ j ] if ind2 . fitness . dominates ( ind1 . fitness ) or ind1 . fitness == ind2 . fitness : pareto_front . discard ( i ) if ind1 . fitness . dominates ( ind2 . fitness ) : pareto_front . discard ( j ) return pareto_front | Finds a subset of nondominated individuals in a given list |
5,214 | def _to_ndarray ( self , a ) : if isinstance ( a , ( list , tuple ) ) : a = numpy . array ( a ) if not is_ndarray ( a ) : raise TypeError ( "Expected an ndarray but got object of type '{}' instead" . format ( type ( a ) ) ) return a | Casts Python lists and tuples to a numpy array or raises an AssertionError . |
5,215 | def fn_abs ( self , value ) : if is_ndarray ( value ) : return numpy . absolute ( value ) else : return abs ( value ) | Return the absolute value of a number . |
5,216 | def fn_get_mask ( self , value ) : value = self . _to_ndarray ( value ) if numpy . ma . is_masked ( value ) : return value . mask else : return numpy . zeros ( value . shape ) . astype ( bool ) | Return an array mask . |
5,217 | def fn_min ( self , a , axis = None ) : return numpy . nanmin ( self . _to_ndarray ( a ) , axis = axis ) | Return the minimum of an array ignoring any NaNs . |
5,218 | def fn_max ( self , a , axis = None ) : return numpy . nanmax ( self . _to_ndarray ( a ) , axis = axis ) | Return the maximum of an array ignoring any NaNs . |
5,219 | def fn_median ( self , a , axis = None ) : return numpy . nanmedian ( self . _to_ndarray ( a ) , axis = axis ) | Compute the median of an array ignoring NaNs . |
5,220 | def fn_mean ( self , a , axis = None ) : return numpy . nanmean ( self . _to_ndarray ( a ) , axis = axis ) | Compute the arithmetic mean of an array ignoring NaNs . |
5,221 | def fn_std ( self , a , axis = None ) : return numpy . nanstd ( self . _to_ndarray ( a ) , axis = axis ) | Compute the standard deviation of an array ignoring NaNs . |
5,222 | def fn_var ( self , a , axis = None ) : return numpy . nanvar ( self . _to_ndarray ( a ) , axis = axis ) | Compute the variance of an array ignoring NaNs . |
5,223 | def fn_ceil ( self , value ) : if is_ndarray ( value ) or isinstance ( value , ( list , tuple ) ) : return numpy . ceil ( self . _to_ndarray ( value ) ) else : return math . ceil ( value ) | Return the ceiling of a number . |
5,224 | def fn_int ( self , value ) : if is_ndarray ( value ) or isinstance ( value , ( list , tuple ) ) : return self . _to_ndarray ( value ) . astype ( 'int' ) else : return int ( value ) | Return the value cast to an int . |
5,225 | def fn_float ( self , value ) : if is_ndarray ( value ) or isinstance ( value , ( list , tuple ) ) : return self . _to_ndarray ( value ) . astype ( 'float' ) else : return float ( value ) | Return the value cast to a float . |
5,226 | def make_datetime ( dt , date_parser = parse_date ) : if ( isinstance ( dt , ( datetime . datetime , datetime . date , datetime . time , pd . Timestamp , np . datetime64 ) ) or dt in ( float ( 'nan' ) , float ( 'inf' ) , float ( '-inf' ) , None , '' ) ) : return dt if isinstance ( dt , ( float , int ) ) : return datetime_from_ordinal_float ( dt ) if isinstance ( dt , datetime . date ) : return datetime . datetime ( dt . year , dt . month , dt . day ) if isinstance ( dt , datetime . time ) : return datetime . datetime ( 1 , 1 , 1 , dt . hour , dt . minute , dt . second , dt . microsecond ) if not dt : return datetime . datetime ( 1970 , 1 , 1 ) if isinstance ( dt , basestring ) : try : return date_parser ( dt ) except ValueError : print ( 'Unable to make_datetime({})' . format ( dt ) ) raise try : return datetime . datetime ( * dt . timetuple ( ) [ : 7 ] ) except AttributeError : try : dt = list ( dt ) if 0 < len ( dt ) < 7 : try : return datetime . datetime ( * dt [ : 7 ] ) except ( TypeError , IndexError , ValueError ) : pass except ( TypeError , IndexError , ValueError , AttributeError ) : return dt return [ make_datetime ( val , date_parser = date_parser ) for val in dt ] | Coerce a datetime or string into datetime . datetime object |
5,227 | def quantize_datetime ( dt , resolution = None ) : resolution = int ( resolution or 6 ) if hasattr ( dt , 'timetuple' ) : dt = dt . timetuple ( ) if isinstance ( dt , time . struct_time ) : dt = list ( dt ) [ : 6 ] dt += [ int ( ( dt [ 5 ] - int ( dt [ 5 ] ) ) * 1000000 ) ] dt [ 5 ] = int ( dt [ 5 ] ) return datetime . datetime ( * ( dt [ : resolution ] + [ 1 ] * max ( 3 - resolution , 0 ) ) ) if isinstance ( dt , tuple ) and len ( dt ) <= 9 and all ( isinstance ( val , ( float , int ) ) for val in dt ) : dt = list ( dt ) + [ 0 ] * ( max ( 6 - len ( dt ) , 0 ) ) if len ( dt ) == 6 and isinstance ( dt [ 5 ] , float ) : dt = list ( dt ) + [ 1000000 * ( dt [ 5 ] - int ( dt [ 5 ] ) ) ] dt [ 5 ] = int ( dt [ 5 ] ) dt = tuple ( int ( val ) for val in dt ) return datetime . datetime ( * ( dt [ : resolution ] + [ 1 ] * max ( resolution - 3 , 0 ) ) ) return [ quantize_datetime ( value ) for value in dt ] | Quantize a datetime to integer years months days hours minutes seconds or microseconds |
5,228 | def timetag_str ( dt = None , sep = '-' , filler = '0' , resolution = 6 ) : resolution = int ( resolution or 6 ) if sep in ( None , False ) : sep = '' sep = str ( sep ) dt = datetime . datetime . now ( ) if dt is None else dt return sep . join ( ( '{0:' + filler + ( '2' if filler else '' ) + 'd}' ) . format ( i ) for i in tuple ( dt . timetuple ( ) [ : resolution ] ) ) | Generate a date - time tag suitable for appending to a file name . |
5,229 | def make_tz_aware ( dt , tz = 'UTC' , is_dst = None ) : dt = make_datetime ( dt ) if not isinstance ( dt , ( list , datetime . datetime , datetime . date , datetime . time , pd . Timestamp ) ) : return dt try : tz = dt . tzinfo or tz except ( ValueError , AttributeError , TypeError ) : pass try : tzstr = str ( tz ) . strip ( ) . upper ( ) if tzstr in TZ_ABBREV_NAME : is_dst = is_dst or tzstr . endswith ( 'DT' ) tz = TZ_ABBREV_NAME . get ( tzstr , tz ) except ( ValueError , AttributeError , TypeError ) : pass try : tz = pytz . timezone ( tz ) except ( ValueError , AttributeError , TypeError ) : pass try : return tz . localize ( dt , is_dst = is_dst ) except ( ValueError , AttributeError , TypeError ) : pass if not isinstance ( dt , list ) : return dt . replace ( tzinfo = tz ) return [ make_tz_aware ( dt0 , tz = tz , is_dst = is_dst ) for dt0 in dt ] | Add timezone information to a datetime object only if it is naive . |
5,230 | def translate_addresstype ( f ) : @ wraps ( f ) def wr ( r , pc ) : at = r [ "addressType" ] try : r . update ( { "addressType" : POSTCODE_API_TYPEDEFS_ADDRESS_TYPES [ at ] } ) except : logger . warning ( "Warning: {}: " "unknown 'addressType': {}" . format ( pc , at ) ) return f ( r , pc ) return wr | decorator to translate the addressType field . |
5,231 | def translate_purposes ( f ) : @ wraps ( f ) def wr ( r , pc ) : tmp = [ ] for P in r [ "purposes" ] : try : tmp . append ( POSTCODE_API_TYPEDEFS_PURPOSES [ P ] ) except : logger . warning ( "Warning: {}: " "cannot translate 'purpose': {}" . format ( pc , P ) ) tmp . append ( P ) r . update ( { "purposes" : tmp } ) return f ( r , pc ) return wr | decorator to translate the purposes field . |
5,232 | def quantile ( data , num_breaks ) : def scipy_mquantiles ( a , prob = list ( [ .25 , .5 , .75 ] ) , alphap = .4 , betap = .4 , axis = None , limit = ( ) ) : def _quantiles1D ( data , m , p ) : x = numpy . sort ( data . compressed ( ) ) n = len ( x ) if n == 0 : return numpy . ma . array ( numpy . empty ( len ( p ) , dtype = float ) , mask = True ) elif n == 1 : return numpy . ma . array ( numpy . resize ( x , p . shape ) , mask = numpy . ma . nomask ) aleph = ( n * p + m ) k = numpy . floor ( aleph . clip ( 1 , n - 1 ) ) . astype ( int ) gamma = ( aleph - k ) . clip ( 0 , 1 ) return ( 1. - gamma ) * x [ ( k - 1 ) . tolist ( ) ] + gamma * x [ k . tolist ( ) ] data = numpy . ma . array ( a , copy = False ) if data . ndim > 2 : raise TypeError ( "Array should be 2D at most !" ) if limit : condition = ( limit [ 0 ] < data ) & ( data < limit [ 1 ] ) data [ ~ condition . filled ( True ) ] = numpy . ma . masked p = numpy . array ( prob , copy = False , ndmin = 1 ) m = alphap + p * ( 1. - alphap - betap ) if ( axis is None ) : return _quantiles1D ( data , m , p ) return numpy . ma . apply_along_axis ( _quantiles1D , axis , data , m , p ) return scipy_mquantiles ( data , numpy . linspace ( 1.0 / num_breaks , 1 , num_breaks ) ) | Calculate quantile breaks . |
5,233 | def equal ( data , num_breaks ) : step = ( numpy . amax ( data ) - numpy . amin ( data ) ) / num_breaks return numpy . linspace ( numpy . amin ( data ) + step , numpy . amax ( data ) , num_breaks ) | Calculate equal interval breaks . |
5,234 | def add_column ( filename , column , formula , force = False ) : columns = parse_formula ( formula ) logger . info ( "Running file: %s" % filename ) logger . debug ( " Reading columns: %s" % columns ) data = fitsio . read ( filename , columns = columns ) logger . debug ( ' Evaluating formula: %s' % formula ) col = eval ( formula ) col = np . asarray ( col , dtype = [ ( column , col . dtype ) ] ) insert_columns ( filename , col , force = force ) return True | Add a column to a FITS file . |
5,235 | def load_files ( filenames , multiproc = False , ** kwargs ) : filenames = np . atleast_1d ( filenames ) logger . debug ( "Loading %s files..." % len ( filenames ) ) kwargs = [ dict ( filename = f , ** kwargs ) for f in filenames ] if multiproc : from multiprocessing import Pool processes = multiproc if multiproc > 0 else None p = Pool ( processes , maxtasksperchild = 1 ) out = p . map ( load_file , kwargs ) else : out = [ load_file ( kw ) for kw in kwargs ] dtype = out [ 0 ] . dtype for i , d in enumerate ( out ) : if d . dtype != dtype : logger . warn ( "Casting input data to same type." ) out [ i ] = d . astype ( dtype , copy = False ) logger . debug ( 'Concatenating arrays...' ) return np . concatenate ( out ) | Load a set of FITS files with kwargs . |
5,236 | def applyFracdet ( self , lon , lat ) : self . loadFracdet ( ) fracdet_core = meanFracdet ( self . m_fracdet , lon , lat , np . tile ( 0.1 , len ( lon ) ) ) fracdet_wide = meanFracdet ( self . m_fracdet , lon , lat , np . tile ( 0.5 , len ( lon ) ) ) return ( fracdet_core >= self . config [ self . algorithm ] [ 'fracdet_core_threshold' ] ) & ( fracdet_core >= self . config [ self . algorithm ] [ 'fracdet_core_threshold' ] ) | We want to enforce minimum fracdet for a satellite to be considered detectable |
5,237 | def applyHotspot ( self , lon , lat ) : self . loadRealResults ( ) cut_detect_real = ( self . data_real [ 'SIG' ] >= self . config [ self . algorithm ] [ 'sig_threshold' ] ) lon_real = self . data_real [ 'RA' ] [ cut_detect_real ] lat_real = self . data_real [ 'DEC' ] [ cut_detect_real ] cut_hotspot = np . tile ( True , len ( lon ) ) for ii in range ( 0 , len ( lon ) ) : cut_hotspot [ ii ] = ~ np . any ( angsep ( lon [ ii ] , lat [ ii ] , lon_real , lat_real ) < self . config [ self . algorithm ] [ 'hotspot_angsep_threshold' ] ) return cut_hotspot | Exclude objects that are too close to hotspot |
5,238 | def predict ( self , lon , lat , ** kwargs ) : assert self . classifier is not None , 'ERROR' pred = np . zeros ( len ( lon ) ) cut_geometry , flags_geometry = self . applyGeometry ( lon , lat ) x_test = [ ] for key , operation in self . config [ 'operation' ] [ 'params_intrinsic' ] : assert operation . lower ( ) in [ 'linear' , 'log' ] , 'ERROR' if operation . lower ( ) == 'linear' : x_test . append ( kwargs [ key ] ) else : x_test . append ( np . log10 ( kwargs [ key ] ) ) x_test = np . vstack ( x_test ) . T pred [ cut_geometry ] = self . classifier . predict_proba ( x_test [ cut_geometry ] ) [ : , 1 ] self . validatePredict ( pred , flags_geometry , lon , lat , kwargs [ 'r_physical' ] , kwargs [ 'abs_mag' ] , kwargs [ 'distance' ] ) return pred , flags_geometry | distance abs_mag r_physical |
5,239 | def catalogFactory ( name , ** kwargs ) : fn = lambda member : inspect . isclass ( member ) and member . __module__ == __name__ catalogs = odict ( inspect . getmembers ( sys . modules [ __name__ ] , fn ) ) if name not in list ( catalogs . keys ( ) ) : msg = "%s not found in catalogs:\n %s" % ( name , list ( kernels . keys ( ) ) ) logger . error ( msg ) msg = "Unrecognized catalog: %s" % name raise Exception ( msg ) return catalogs [ name ] ( ** kwargs ) | Factory for various catalogs . |
5,240 | def write_results ( filename , config , srcfile , samples ) : results = createResults ( config , srcfile , samples = samples ) results . write ( filename ) | Package everything nicely |
5,241 | def estimate ( self , param , burn = None , clip = 10.0 , alpha = 0.32 ) : if param not in list ( self . samples . names ) + list ( self . source . params ) + [ 'age' , 'metallicity' ] : msg = 'Unrecognized parameter: %s' % param raise KeyError ( msg ) if param in self . samples . names : if param . startswith ( 'position_angle' ) : return self . estimate_position_angle ( param , burn = burn , clip = clip , alpha = alpha ) return self . samples . peak_interval ( param , burn = burn , clip = clip , alpha = alpha ) mle = self . get_mle ( ) errors = [ np . nan , np . nan ] if param in self . source . params : err = self . source . params [ param ] . errors if err is not None : errors = err return [ float ( mle [ param ] ) , errors ] | Estimate parameter value and uncertainties |
5,242 | def estimate_params ( self , burn = None , clip = 10.0 , alpha = 0.32 ) : mle = self . get_mle ( ) out = odict ( ) for param in mle . keys ( ) : out [ param ] = self . estimate ( param , burn = burn , clip = clip , alpha = alpha ) return out | Estimate all source parameters |
5,243 | def estimate_position_angle ( self , param = 'position_angle' , burn = None , clip = 10.0 , alpha = 0.32 ) : pa = self . samples . get ( param , burn = burn , clip = clip ) peak = ugali . utils . stats . kde_peak ( pa ) shift = 180. * ( ( pa + 90 - peak ) > 180 ) pa -= shift ret = ugali . utils . stats . peak_interval ( pa , alpha ) if ret [ 0 ] < 0 : ret [ 0 ] += 180. ret [ 1 ] [ 0 ] += 180. ret [ 1 ] [ 1 ] += 180. return ret | Estimate the position angle from the posterior dealing with periodicity . |
5,244 | def date_range_for_webtrends ( cls , start_at = None , end_at = None ) : if start_at and end_at : start_date = cls . parse_standard_date_string_to_date ( start_at ) end_date = cls . parse_standard_date_string_to_date ( end_at ) return [ ( cls . parse_date_for_query ( start_date ) , cls . parse_date_for_query ( end_date ) ) ] else : return [ ( "current_hour-1" , "current_hour-1" ) ] | Get the start and end formatted for query or the last hour if none specified . Unlike reports this does not aggregate periods and so it is possible to just query a range and parse out the individual hours . |
5,245 | def get_ugali_dir ( ) : dirname = os . getenv ( 'UGALIDIR' ) if not dirname : dirname = os . path . join ( os . getenv ( 'HOME' ) , '.ugali' ) if not os . path . exists ( dirname ) : from ugali . utils . logger import logger msg = "Creating UGALIDIR:\n%s" % dirname logger . warning ( msg ) return mkdir ( dirname ) | Get the path to the ugali data directory from the environment |
5,246 | def get_iso_dir ( ) : dirname = os . path . join ( get_ugali_dir ( ) , 'isochrones' ) if not os . path . exists ( dirname ) : from ugali . utils . logger import logger msg = "Isochrone directory not found:\n%s" % dirname logger . warning ( msg ) return dirname | Get the ugali isochrone directory . |
5,247 | def registerParser ( self , parser ) : if not isinstance ( parser , Subparser ) : raise TypeError ( "%s is not an instance of a subparser." % parser ) self . parsers . append ( parser ) | Registers a parser to parse configuration inputs . |
5,248 | def addConfig ( self , name , default = None , cast = None , required = False , description = None ) : if not self . configNameRE . match ( name ) : raise InvalidConfigurationException ( "Invalid configuration name: %s" % name ) self . configs [ self . _sanitizeName ( name ) ] = { 'default' : default , 'cast' : cast , 'required' : required , 'description' : description } | Adds the given configuration option to the ConfigManager . |
5,249 | def parse ( self ) : self . _config = _Config ( ) self . _setDefaults ( ) for parser in self . parsers : for key , value in parser . parse ( self , self . _config ) . items ( ) : key = self . _sanitizeName ( key ) if key not in self . configs : raise UnknownConfigurationException ( key ) if value is not None : self . _setConfig ( key , value ) self . _ensureRequired ( ) self . _cast ( ) return self . _config | Executes the registered parsers to parse input configurations . |
5,250 | def _setDefaults ( self ) : for configName , configDict in self . configs . items ( ) : self . _setConfig ( configName , configDict [ 'default' ] ) | Sets all the expected configuration options on the config object as either the requested default value or None . |
5,251 | def _cast ( self ) : for configName , configDict in self . configs . items ( ) : if configDict [ 'cast' ] is not None : configValue = getattr ( self . _config , configName ) if configValue is not None : try : self . _setConfig ( configName , configDict [ 'cast' ] ( configValue ) ) except : raise InvalidConfigurationException ( "%s: %r" % ( configName , configValue ) ) | Iterates through our parsed configuration options and cast any options with marked cast types . |
5,252 | def list_models ( self , macaroons ) : return make_request ( "{}model" . format ( self . url ) , timeout = self . timeout , client = self . _client , cookies = self . cookies ) | Get the logged in user s models from the JIMM controller . |
5,253 | def write ( self , novel_title = 'novel' , filetype = 'txt' ) : self . _compose_chapters ( ) self . _write_to_file ( novel_title , filetype ) | Composes chapters and writes the novel to a text file |
5,254 | def _compose_chapters ( self ) : for count in range ( self . chapter_count ) : chapter_num = count + 1 c = Chapter ( self . markov , chapter_num ) self . chapters . append ( c ) | Creates a chapters and appends them to list |
5,255 | def valid_address ( address ) : if not address : return False components = str ( address ) . split ( ':' ) if len ( components ) > 2 or not valid_hostname ( components [ 0 ] ) : return False if len ( components ) == 2 and not valid_port ( components [ 1 ] ) : return False return True | Determines whether the specified address string is valid . |
5,256 | def valid_hostname ( host ) : if len ( host ) > 255 : return False if host [ - 1 : ] == '.' : host = host [ : - 1 ] return all ( _hostname_re . match ( c ) for c in host . split ( '.' ) ) | Returns whether the specified string is a valid hostname . |
5,257 | def sample ( self , n , mass_min = 0.1 , mass_max = 10. , steps = 10000 , seed = None ) : if seed is not None : np . random . seed ( seed ) d_mass = ( mass_max - mass_min ) / float ( steps ) mass = np . linspace ( mass_min , mass_max , steps ) cdf = np . insert ( np . cumsum ( d_mass * self . pdf ( mass [ 1 : ] , log_mode = False ) ) , 0 , 0. ) cdf = cdf / cdf [ - 1 ] f = scipy . interpolate . interp1d ( cdf , mass ) return f ( np . random . uniform ( size = n ) ) | Sample initial mass values between mass_min and mass_max following the IMF distribution . |
5,258 | def pdf ( cls , mass , log_mode = True ) : log_mass = np . log10 ( mass ) mb = mbreak = [ 0.08 , 0.5 ] a = alpha = [ 0.3 , 1.3 , 2.3 ] norm = 0.27947743949440446 b = 1. / norm c = b * mbreak [ 0 ] ** ( alpha [ 1 ] - alpha [ 0 ] ) d = c * mbreak [ 1 ] ** ( alpha [ 2 ] - alpha [ 1 ] ) dn_dm = b * ( mass < 0.08 ) * mass ** ( - alpha [ 0 ] ) dn_dm += c * ( 0.08 <= mass ) * ( mass < 0.5 ) * mass ** ( - alpha [ 1 ] ) dn_dm += d * ( 0.5 <= mass ) * mass ** ( - alpha [ 2 ] ) if log_mode : return dn_dm * ( mass * np . log ( 10 ) ) else : return dn_dm | PDF for the Kroupa IMF . |
5,259 | def pdf ( cls , mass , log_mode = True ) : alpha = 2.35 a = 0.060285569480482866 dn_dm = a * mass ** ( - alpha ) if log_mode : return dn_dm * ( mass * np . log ( 10 ) ) else : return dn_dm | PDF for the Salpeter IMF . |
5,260 | def _getConfigFile ( self , config ) : joinPath = lambda p : ( os . path . join ( p ) if isinstance ( p , ( tuple , list ) ) else p ) if self . filepathConfig is not None and self . filenameConfig is not None : if hasattr ( config , self . filepathConfig ) and hasattr ( config , self . filenameConfig ) : path = joinPath ( getattr ( config , self . filepathConfig ) ) name = getattr ( config , self . filenameConfig ) if os . path . isfile ( os . path . join ( path , name ) ) : return open ( os . path . join ( path , name ) , 'r' ) if self . filepath is not None and self . filename is not None : path = joinPath ( self . filepath ) name = self . filename if os . path . isfile ( os . path . join ( path , name ) ) : return open ( os . path . join ( path , name ) , 'r' ) | Retrieves a file descriptor to a configuration file to process . |
5,261 | def _count_citations ( aux_file ) : counter = defaultdict ( int ) with open ( aux_file ) as fobj : content = fobj . read ( ) for match in CITE_PATTERN . finditer ( content ) : name = match . groups ( ) [ 0 ] counter [ name ] += 1 return counter | Counts the citations in an aux - file . |
5,262 | def _setup_logger ( self ) : log = logging . getLogger ( 'latexmk.py' ) handler = logging . StreamHandler ( ) log . addHandler ( handler ) if self . opt . verbose : log . setLevel ( logging . INFO ) return log | Set up a logger . |
5,263 | def _parse_texlipse_config ( self ) : if not os . path . isfile ( '.texlipse' ) : time . sleep ( 0.1 ) if not os . path . isfile ( '.texlipse' ) : self . log . error ( '! Fatal error: File .texlipse is missing.' ) self . log . error ( '! Exiting...' ) sys . exit ( 1 ) with open ( '.texlipse' ) as fobj : content = fobj . read ( ) match = TEXLIPSE_MAIN_PATTERN . search ( content ) if match : project_name = match . groups ( ) [ 0 ] self . log . info ( 'Found inputfile in ".texlipse": %s.tex' % project_name ) return project_name else : self . log . error ( '! Fatal error: Parsing .texlipse failed.' ) self . log . error ( '! Exiting...' ) sys . exit ( 1 ) | Read the project name from the texlipse config file . texlipse . |
5,264 | def _read_latex_files ( self ) : if os . path . isfile ( '%s.aux' % self . project_name ) : cite_counter = self . generate_citation_counter ( ) self . read_glossaries ( ) else : cite_counter = { '%s.aux' % self . project_name : defaultdict ( int ) } fname = '%s.toc' % self . project_name if os . path . isfile ( fname ) : with open ( fname ) as fobj : toc_file = fobj . read ( ) else : toc_file = '' gloss_files = dict ( ) for gloss in self . glossaries : ext = self . glossaries [ gloss ] [ 1 ] filename = '%s.%s' % ( self . project_name , ext ) if os . path . isfile ( filename ) : with open ( filename ) as fobj : gloss_files [ gloss ] = fobj . read ( ) return cite_counter , toc_file , gloss_files | Check if some latex output files exist before first latex run process them and return the generated data . |
5,265 | def read_glossaries ( self ) : filename = '%s.aux' % self . project_name with open ( filename ) as fobj : main_aux = fobj . read ( ) pattern = r'\\@newglossary\{(.*)\}\{.*\}\{(.*)\}\{(.*)\}' for match in re . finditer ( pattern , main_aux ) : name , ext_i , ext_o = match . groups ( ) self . glossaries [ name ] = ( ext_i , ext_o ) | Read all existing glossaries in the main aux - file . |
5,266 | def check_errors ( self ) : errors = ERROR_PATTTERN . findall ( self . out ) if errors : self . log . error ( '! Errors occurred:' ) self . log . error ( '\n' . join ( [ error . replace ( '\r' , '' ) . strip ( ) for error in chain ( * errors ) if error . strip ( ) ] ) ) self . log . error ( '! See "%s.log" for details.' % self . project_name ) if self . opt . exit_on_error : self . log . error ( '! Exiting...' ) sys . exit ( 1 ) | Check if errors occured during a latex run by scanning the output . |
5,267 | def generate_citation_counter ( self ) : cite_counter = dict ( ) filename = '%s.aux' % self . project_name with open ( filename ) as fobj : main_aux = fobj . read ( ) cite_counter [ filename ] = _count_citations ( filename ) for match in re . finditer ( r'\\@input\{(.*.aux)\}' , main_aux ) : filename = match . groups ( ) [ 0 ] try : counter = _count_citations ( filename ) except IOError : pass else : cite_counter [ filename ] = counter return cite_counter | Generate dictionary with the number of citations in all included files . If this changes after the first latex run you have to run bibtex . |
5,268 | def latex_run ( self ) : self . log . info ( 'Running %s...' % self . latex_cmd ) cmd = [ self . latex_cmd ] cmd . extend ( LATEX_FLAGS ) cmd . append ( '%s.tex' % self . project_name ) try : with open ( os . devnull , 'w' ) as null : Popen ( cmd , stdout = null , stderr = null ) . wait ( ) except OSError : self . log . error ( NO_LATEX_ERROR % self . latex_cmd ) self . latex_run_counter += 1 fname = '%s.log' % self . project_name with codecs . open ( fname , 'r' , 'utf-8' , 'replace' ) as fobj : self . out = fobj . read ( ) self . check_errors ( ) | Start latex run . |
5,269 | def bibtex_run ( self ) : self . log . info ( 'Running bibtex...' ) try : with open ( os . devnull , 'w' ) as null : Popen ( [ 'bibtex' , self . project_name ] , stdout = null ) . wait ( ) except OSError : self . log . error ( NO_LATEX_ERROR % 'bibtex' ) sys . exit ( 1 ) shutil . copy ( '%s.bib' % self . bib_file , '%s.bib.old' % self . bib_file ) | Start bibtex run . |
5,270 | def makeindex_runs ( self , gloss_files ) : gloss_changed = False for gloss in self . glossaries : make_gloss = False ext_i , ext_o = self . glossaries [ gloss ] fname_in = '%s.%s' % ( self . project_name , ext_i ) fname_out = '%s.%s' % ( self . project_name , ext_o ) if re . search ( 'No file %s.' % fname_in , self . out ) : make_gloss = True if not os . path . isfile ( fname_out ) : make_gloss = True else : with open ( fname_out ) as fobj : try : if gloss_files [ gloss ] != fobj . read ( ) : make_gloss = True except KeyError : make_gloss = True if make_gloss : self . log . info ( 'Running makeindex (%s)...' % gloss ) try : cmd = [ 'makeindex' , '-q' , '-s' , '%s.ist' % self . project_name , '-o' , fname_in , fname_out ] with open ( os . devnull , 'w' ) as null : Popen ( cmd , stdout = null ) . wait ( ) except OSError : self . log . error ( NO_LATEX_ERROR % 'makeindex' ) sys . exit ( 1 ) gloss_changed = True return gloss_changed | Check for each glossary if it has to be regenerated with makeindex . |
5,271 | def open_preview ( self ) : self . log . info ( 'Opening preview...' ) if self . opt . pdf : ext = 'pdf' else : ext = 'dvi' filename = '%s.%s' % ( self . project_name , ext ) if sys . platform == 'win32' : try : os . startfile ( filename ) except OSError : self . log . error ( 'Preview-Error: Extension .%s is not linked to a ' 'specific application!' % ext ) elif sys . platform == 'darwin' : call ( [ 'open' , filename ] ) else : self . log . error ( 'Preview-Error: Preview function is currently not ' 'supported on Linux.' ) | Try to open a preview of the generated document . Currently only supported on Windows . |
5,272 | def need_latex_rerun ( self ) : for pattern in LATEX_RERUN_PATTERNS : if pattern . search ( self . out ) : return True return False | Test for all rerun patterns if they match the output . |
5,273 | def run ( self ) : self . old_dir = [ ] if self . opt . clean : self . old_dir = os . listdir ( '.' ) cite_counter , toc_file , gloss_files = self . _read_latex_files ( ) self . latex_run ( ) self . read_glossaries ( ) gloss_changed = self . makeindex_runs ( gloss_files ) if gloss_changed or self . _is_toc_changed ( toc_file ) : self . latex_run ( ) if self . _need_bib_run ( cite_counter ) : self . bibtex_run ( ) self . latex_run ( ) while ( self . latex_run_counter < MAX_RUNS ) : if not self . need_latex_rerun ( ) : break self . latex_run ( ) if self . opt . check_cite : cites = set ( ) with open ( '%s.aux' % self . project_name ) as fobj : aux_content = fobj . read ( ) for match in BIBCITE_PATTERN . finditer ( aux_content ) : name = match . groups ( ) [ 0 ] cites . add ( name ) with open ( '%s.bib' % self . bib_file ) as fobj : bib_content = fobj . read ( ) for match in BIBENTRY_PATTERN . finditer ( bib_content ) : name = match . groups ( ) [ 0 ] if name not in cites : self . log . info ( 'Bib entry not cited: "%s"' % name ) if self . opt . clean : ending = '.dvi' if self . opt . pdf : ending = '.pdf' for fname in os . listdir ( '.' ) : if not ( fname in self . old_dir or fname . endswith ( ending ) ) : try : os . remove ( fname ) except IOError : pass if self . opt . preview : self . open_preview ( ) | Run the LaTeX compilation . |
5,274 | def command ( self , outfile , configfile , pix ) : params = dict ( script = self . config [ 'scan' ] [ 'script' ] , config = configfile , outfile = outfile , nside = self . nside_likelihood , pix = pix , verbose = '-v' if self . verbose else '' ) cmd = '%(script)s %(config)s %(outfile)s --hpx %(nside)i %(pix)i %(verbose)s' % params return cmd | Generate the command for running the likelihood scan . |
5,275 | def submit_all ( self , coords = None , queue = None , debug = False ) : if coords is None : pixels = np . arange ( hp . nside2npix ( self . nside_likelihood ) ) else : coords = np . asarray ( coords ) if coords . ndim == 1 : coords = np . array ( [ coords ] ) if coords . shape [ 1 ] == 2 : lon , lat = coords . T radius = np . zeros ( len ( lon ) ) elif coords . shape [ 1 ] == 3 : lon , lat , radius = coords . T else : raise Exception ( "Unrecognized coords shape:" + str ( coords . shape ) ) if self . config [ 'coords' ] [ 'coordsys' ] . lower ( ) == 'cel' : lon , lat = gal2cel ( lon , lat ) vec = ang2vec ( lon , lat ) pixels = np . zeros ( 0 , dtype = int ) for v , r in zip ( vec , radius ) : pix = query_disc ( self . nside_likelihood , v , r , inclusive = True , fact = 32 ) pixels = np . hstack ( [ pixels , pix ] ) inside = ugali . utils . skymap . inFootprint ( self . config , pixels ) if inside . sum ( ) != len ( pixels ) : logger . warning ( "Ignoring pixels outside survey footprint:\n" + str ( pixels [ ~ inside ] ) ) if inside . sum ( ) == 0 : logger . warning ( "No pixels inside footprint." ) return outdir = mkdir ( self . config [ 'output' ] [ 'likedir' ] ) shutil . copy ( self . config . filename , outdir ) configfile = join ( outdir , os . path . basename ( self . config . filename ) ) pixels = pixels [ inside ] self . submit ( pixels , queue = queue , debug = debug , configfile = configfile ) | Submit likelihood analyses on a set of coordinates . If coords is None submit all coordinates in the footprint . |
5,276 | def check ( cls ) : if cls == AppSettings : return None exceptions = [ ] for setting in cls . settings . values ( ) : try : setting . check ( ) except Exception as e : exceptions . append ( str ( e ) ) if exceptions : raise ImproperlyConfigured ( "\n" . join ( exceptions ) ) | Class method to check every settings . |
5,277 | def parse_args ( name = "" , args = None ) : def _load_json_file ( path ) : with open ( path ) as f : json_data = json . load ( f ) json_data [ 'path_to_json_file' ] = path return json_data parser = argparse . ArgumentParser ( description = "%s collector for sending" " data to the performance" " platform" % name ) parser . add_argument ( '-c' , '--credentials' , dest = 'credentials' , type = _load_json_file , help = 'JSON file containing credentials ' 'for the collector' , required = True ) group = parser . add_mutually_exclusive_group ( required = True ) group . add_argument ( '-l' , '--collector' , dest = 'collector_slug' , type = str , help = 'Collector slug to query the API for the ' 'collector config' ) group . add_argument ( '-q' , '--query' , dest = 'query' , type = _load_json_file , help = 'JSON file containing details ' 'about the query to make ' 'against the source API ' 'and the target data-set' ) parser . add_argument ( '-t' , '--token' , dest = 'token' , type = _load_json_file , help = 'JSON file containing token ' 'for the collector' , required = True ) parser . add_argument ( '-b' , '--performanceplatform' , dest = 'performanceplatform' , type = _load_json_file , help = 'JSON file containing the Performance Platform ' 'config for the collector' , required = True ) parser . add_argument ( '-s' , '--start' , dest = 'start_at' , type = parse_date , help = 'Date to start collection from' ) parser . add_argument ( '-e' , '--end' , dest = 'end_at' , type = parse_date , help = 'Date to end collection' ) parser . add_argument ( '--console-logging' , dest = 'console_logging' , action = 'store_true' , help = 'Output logging to the console rather than file' ) parser . add_argument ( '--dry-run' , dest = 'dry_run' , action = 'store_true' , help = 'Instead of pushing to the Performance Platform ' 'the collector will print out what would have ' 'been pushed' ) parser . set_defaults ( console_logging = False , dry_run = False ) args = parser . parse_args ( args ) return args | Parse command line argument for a collector |
5,278 | def hash ( self ) : renderer_str = "{}|{}|{}|{}" . format ( self . renderer . __class__ . __name__ , self . renderer . colormap , self . renderer . fill_value , self . renderer . background_color ) if isinstance ( self . renderer , StretchedRenderer ) : renderer_str = "{}|{}|{}" . format ( renderer_str , self . renderer . method , self . renderer . colorspace ) elif isinstance ( self . renderer , UniqueValuesRenderer ) : renderer_str = "{}|{}" . format ( renderer_str , self . renderer . labels ) return hash ( "{}/{}/{}" . format ( self . variable . pk , renderer_str , self . time_index ) ) | Returns a hash of this render configuration from the variable renderer and time_index parameters . Used for caching the full - extent native projection render so that subsequent requests can be served by a warp operation only . |
5,279 | def factory ( type , module = None , ** kwargs ) : cls = type if module is None : module = __name__ fn = lambda member : inspect . isclass ( member ) and member . __module__ == module classes = odict ( inspect . getmembers ( sys . modules [ module ] , fn ) ) members = odict ( [ ( k . lower ( ) , v ) for k , v in classes . items ( ) ] ) lower = cls . lower ( ) if lower not in list ( members . keys ( ) ) : msg = "Unrecognized class: %s.%s" % ( module , cls ) raise KeyError ( msg ) return members [ lower ] ( ** kwargs ) | Factory for creating objects . Arguments are passed directly to the constructor of the chosen class . |
5,280 | def get_definition_from_renderer ( renderer ) : config = { 'colors' : [ [ x [ 0 ] , x [ 1 ] . to_hex ( ) ] for x in renderer . colormap ] , 'options' : { } } if renderer . fill_value : config [ 'options' ] [ 'fill_value' ] = renderer . fill_value if isinstance ( renderer , StretchedRenderer ) : config [ 'type' ] = 'stretched' config [ 'options' ] [ 'color_space' ] = renderer . colorspace elif isinstance ( renderer , UniqueValuesRenderer ) : config [ 'type' ] = 'unique' if renderer . labels : config [ 'options' ] [ 'labels' ] = renderer . labels elif isinstance ( renderer , ClassifiedRenderer ) : config [ 'type' ] = 'classified' else : raise ValueError ( '{0} is not a valid renderer type' . format ( renderer . __class__ . __name__ ) ) return config | Returns a dictionary definition of the given renderer |
5,281 | def set_model ( self , name , model ) : try : self . __getattribute__ ( 'models' ) except AttributeError : object . __setattr__ ( self , 'models' , odict ( ) ) self . models [ name ] = model | Set a model . |
5,282 | def set_params ( self , ** kwargs ) : for key , value in list ( kwargs . items ( ) ) : setattr ( self , key , value ) | Set the parameter values |
5,283 | def get_params ( self ) : return odict ( [ ( key , param . value ) for key , param in self . params . items ( ) ] ) | Get an odict of the parameter names and values |
5,284 | def get_free_params ( self ) : return odict ( [ ( key , param . value ) for key , param in self . params . items ( ) if param . free ] ) | Get an odict of free parameter names and values |
5,285 | def iter_finds ( regex_obj , s ) : if isinstance ( regex_obj , str ) : for m in re . finditer ( regex_obj , s ) : yield m . group ( ) else : for m in regex_obj . finditer ( s ) : yield m . group ( ) | Generate all matches found within a string for a regex and yield each match as a string |
5,286 | def composite_decorator ( func ) : @ wraps ( func ) def wrapper ( self , * args , ** kwargs ) : total = [ ] for weight , iso in zip ( self . weights , self . isochrones ) : subfunc = getattr ( iso , func . __name__ ) total . append ( weight * subfunc ( * args , ** kwargs ) ) return np . sum ( total , axis = 0 ) return wrapper | Decorator for wrapping functions that calculate a weighted sum |
5,287 | def mergeCatalogs ( catalog_list ) : for c in catalog_list : if c . data . dtype . names != catalog_list [ 0 ] . data . dtype . names : msg = "Catalog data columns not the same." raise Exception ( msg ) data = np . concatenate ( [ c . data for c in catalog_list ] ) config = catalog_list [ 0 ] . config return Catalog ( config , data = data ) | Merge a list of Catalogs . |
5,288 | def applyCut ( self , cut ) : return Catalog ( self . config , data = self . data [ cut ] ) | Return a new catalog which is a subset of objects selected using the input cut array . |
5,289 | def bootstrap ( self , mc_bit = 0x10 , seed = None ) : if seed is not None : np . random . seed ( seed ) data = copy . deepcopy ( self . data ) idx = np . random . randint ( 0 , len ( data ) , len ( data ) ) data [ self . config [ 'catalog' ] [ 'mag_1_field' ] ] [ : ] = self . mag_1 [ idx ] data [ self . config [ 'catalog' ] [ 'mag_err_1_field' ] ] [ : ] = self . mag_err_1 [ idx ] data [ self . config [ 'catalog' ] [ 'mag_2_field' ] ] [ : ] = self . mag_2 [ idx ] data [ self . config [ 'catalog' ] [ 'mag_err_2_field' ] ] [ : ] = self . mag_err_2 [ idx ] data [ self . config [ 'catalog' ] [ 'mc_source_id_field' ] ] [ : ] |= mc_bit return Catalog ( self . config , data = data ) | Return a random catalog by boostrapping the colors of the objects in the current catalog . |
5,290 | def project ( self , projector = None ) : msg = "'%s.project': ADW 2018-05-05" % self . __class__ . __name__ DeprecationWarning ( msg ) if projector is None : try : self . projector = ugali . utils . projector . Projector ( self . config [ 'coords' ] [ 'reference' ] [ 0 ] , self . config [ 'coords' ] [ 'reference' ] [ 1 ] ) except KeyError : logger . warning ( 'Projection reference point is median (lon, lat) of catalog objects' ) self . projector = ugali . utils . projector . Projector ( np . median ( self . lon ) , np . median ( self . lat ) ) else : self . projector = projector self . x , self . y = self . projector . sphereToImage ( self . lon , self . lat ) | Project coordinates on sphere to image plane using Projector class . |
5,291 | def spatialBin ( self , roi ) : if hasattr ( self , 'pixel_roi_index' ) and hasattr ( self , 'pixel' ) : logger . warning ( 'Catalog alread spatially binned' ) return self . pixel = ang2pix ( self . config [ 'coords' ] [ 'nside_pixel' ] , self . lon , self . lat ) self . pixel_roi_index = roi . indexROI ( self . lon , self . lat ) logger . info ( "Found %i objects outside ROI" % ( self . pixel_roi_index < 0 ) . sum ( ) ) | Calculate indices of ROI pixels corresponding to object locations . |
5,292 | def write ( self , outfile , clobber = True , ** kwargs ) : fitsio . write ( outfile , self . data , clobber = True , ** kwargs ) | Write the current object catalog to FITS file . |
5,293 | def _parse ( self , roi = None , filenames = None ) : if ( roi is not None ) and ( filenames is not None ) : msg = "Cannot take both roi and filenames" raise Exception ( msg ) if roi is not None : pixels = roi . getCatalogPixels ( ) filenames = self . config . getFilenames ( ) [ 'catalog' ] [ pixels ] elif filenames is None : filenames = self . config . getFilenames ( ) [ 'catalog' ] . compressed ( ) else : filenames = np . atleast_1d ( filenames ) if len ( filenames ) == 0 : msg = "No catalog files found." raise Exception ( msg ) self . data = load_infiles ( filenames ) self . _applySelection ( ) self . data = self . data . view ( np . recarray ) | Parse catalog FITS files into recarray . |
5,294 | def _defineVariables ( self ) : logger . info ( 'Catalog contains %i objects' % ( len ( self . data ) ) ) mc_source_id_field = self . config [ 'catalog' ] [ 'mc_source_id_field' ] if mc_source_id_field is not None : if mc_source_id_field not in self . data . dtype . names : array = np . zeros ( len ( self . data ) , dtype = '>i8' ) self . data = mlab . rec_append_fields ( self . data , names = mc_source_id_field , arrs = array ) logger . info ( 'Found %i simulated objects' % ( np . sum ( self . mc_source_id > 0 ) ) ) | Helper funtion to define pertinent variables from catalog data . |
5,295 | def add_node ( self , node_id , task , inputs ) : if node_id in self . nodes_by_id : raise ValueError ( 'The node {0} already exists in this workflow.' . format ( node_id ) ) node = WorkflowNode ( node_id , task , inputs ) self . nodes_by_id [ node_id ] = node for source , value in six . itervalues ( inputs ) : if source == 'dependency' : dependents = self . dependents_by_node_id . get ( value [ 0 ] , set ( ) ) dependents . add ( node_id ) self . dependents_by_node_id [ value [ 0 ] ] = dependents | Adds a node to the workflow . |
5,296 | def map_output ( self , node_id , node_output_name , parameter_name ) : self . output_mapping [ parameter_name ] = ( node_id , node_output_name ) dependents = self . dependents_by_node_id . get ( node_id , set ( ) ) dependents . add ( 'output_{}' . format ( parameter_name ) ) self . dependents_by_node_id [ node_id ] = dependents | Maps the output from a node to a workflow output . |
5,297 | def to_json ( self , indent = None ) : inputs = ParameterCollection ( self . inputs ) d = { 'meta' : { 'name' : self . name , 'description' : self . description } , 'inputs' : [ ] , 'workflow' : [ ] , 'outputs' : [ { 'name' : k , 'node' : v } for k , v in six . iteritems ( self . output_mapping ) ] } for parameter in self . inputs : input_info = { 'name' : parameter . name , 'type' : parameter . id } args , kwargs = parameter . serialize_args ( ) args = list ( args ) args . pop ( 0 ) kwargs . pop ( 'required' , None ) if args or kwargs : input_info [ 'args' ] = [ args , kwargs ] d [ 'inputs' ] . append ( input_info ) for node in sorted ( six . itervalues ( self . nodes_by_id ) , key = lambda x : x . id ) : task_name = node . task . name if not task_name : raise ValueError ( 'The task {0} does not have a name and therefore cannot be serialized.' . format ( node . task . __class__ . __name__ ) ) node_inputs = { } for input_name , ( source , value ) in six . iteritems ( node . inputs ) : input_info = { 'source' : source } if source == 'input' : input_info [ 'input' ] = inputs . by_name [ value ] . name else : input_info [ 'node' ] = value node_inputs [ input_name ] = input_info d [ 'workflow' ] . append ( { 'id' : node . id , 'task' : task_name , 'inputs' : node_inputs } ) return json . dumps ( d , indent = indent ) | Serialize this workflow to JSON |
5,298 | def from_json ( cls , text ) : d = json . loads ( text ) meta = d . get ( 'meta' , { } ) workflow = cls ( name = meta . get ( 'name' ) , description = meta . get ( 'description' ) ) for workflow_input in d . get ( 'inputs' , [ ] ) : parameter_cls = Parameter . by_id ( workflow_input [ 'type' ] ) args = [ workflow_input [ 'name' ] ] kwargs = { 'required' : True } if workflow_input . get ( 'args' ) : args = workflow_input [ 'args' ] [ 0 ] + args kwargs . update ( workflow_input [ 'args' ] [ 1 ] ) args , kwargs = parameter_cls . deserialize_args ( args , kwargs ) workflow . inputs . append ( parameter_cls ( * args , ** kwargs ) ) for node in d . get ( 'workflow' , [ ] ) : node_inputs = { } for k , v in six . iteritems ( node . get ( 'inputs' , { } ) ) : node_inputs [ k ] = ( v [ 'source' ] , v . get ( 'input' ) or v . get ( 'node' ) ) workflow . add_node ( node [ 'id' ] , Task . by_name ( node [ 'task' ] ) ( ) , node_inputs ) for output in d . get ( 'outputs' , [ ] ) : node = output [ 'node' ] node_parameters = ParameterCollection ( workflow . nodes_by_id [ node [ 0 ] ] . task . outputs ) output_param = copy . copy ( node_parameters . by_name [ node [ 1 ] ] ) output_param . name = output [ 'name' ] workflow . outputs . append ( output_param ) workflow . map_output ( node [ 0 ] , node [ 1 ] , output [ 'name' ] ) return workflow | Return a new workflow deserialized from a JSON string |
5,299 | def get_handler ( self , * args , ** options ) : handler = super ( ) . get_handler ( * args , ** options ) use_static_handler = options [ 'use_static_handler' ] insecure_serving = options [ 'insecure_serving' ] if use_static_handler and ( settings . DEBUG or insecure_serving ) : return CRAStaticFilesHandler ( handler ) return handler | Return the static files serving handler wrapping the default handler if static files should be served . Otherwise return the default handler . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.