idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
62,300 | def set_bfield ( self , B_G ) : if not ( B_G > 0 ) : raise ValueError ( 'must have B_G > 0; got %r' % ( B_G , ) ) self . in_vals [ IN_VAL_B ] = B_G return self | Set the strength of the local magnetic field . |
62,301 | def set_bfield_for_s0 ( self , s0 ) : if not ( s0 > 0 ) : raise ValueError ( 'must have s0 > 0; got %r' % ( s0 , ) ) B0 = 2 * np . pi * cgs . me * cgs . c * self . in_vals [ IN_VAL_FREQ0 ] / ( cgs . e * s0 ) self . in_vals [ IN_VAL_B ] = B0 return self | Set B to probe a certain harmonic number . |
62,302 | def set_edist_powerlaw ( self , emin_mev , emax_mev , delta , ne_cc ) : if not ( emin_mev >= 0 ) : raise ValueError ( 'must have emin_mev >= 0; got %r' % ( emin_mev , ) ) if not ( emax_mev >= emin_mev ) : raise ValueError ( 'must have emax_mev >= emin_mev; got %r, %r' % ( emax_mev , emin_mev ) ) if not ( delta >= 0 ) : raise ValueError ( 'must have delta >= 0; got %r, %r' % ( delta , ) ) if not ( ne_cc >= 0 ) : raise ValueError ( 'must have ne_cc >= 0; got %r, %r' % ( ne_cc , ) ) self . in_vals [ IN_VAL_EDIST ] = EDIST_PLW self . in_vals [ IN_VAL_EMIN ] = emin_mev self . in_vals [ IN_VAL_EMAX ] = emax_mev self . in_vals [ IN_VAL_DELTA1 ] = delta self . in_vals [ IN_VAL_NB ] = ne_cc return self | Set the energy distribution function to a power law . |
62,303 | def set_edist_powerlaw_gamma ( self , gmin , gmax , delta , ne_cc ) : if not ( gmin >= 1 ) : raise ValueError ( 'must have gmin >= 1; got %r' % ( gmin , ) ) if not ( gmax >= gmin ) : raise ValueError ( 'must have gmax >= gmin; got %r, %r' % ( gmax , gmin ) ) if not ( delta >= 0 ) : raise ValueError ( 'must have delta >= 0; got %r, %r' % ( delta , ) ) if not ( ne_cc >= 0 ) : raise ValueError ( 'must have ne_cc >= 0; got %r, %r' % ( ne_cc , ) ) self . in_vals [ IN_VAL_EDIST ] = EDIST_PLG self . in_vals [ IN_VAL_EMIN ] = ( gmin - 1 ) * E0_MEV self . in_vals [ IN_VAL_EMAX ] = ( gmax - 1 ) * E0_MEV self . in_vals [ IN_VAL_DELTA1 ] = delta self . in_vals [ IN_VAL_NB ] = ne_cc return self | Set the energy distribution function to a power law in the Lorentz factor |
62,304 | def set_freqs ( self , n , f_lo_ghz , f_hi_ghz ) : if not ( f_lo_ghz >= 0 ) : raise ValueError ( 'must have f_lo_ghz >= 0; got %r' % ( f_lo_ghz , ) ) if not ( f_hi_ghz >= f_lo_ghz ) : raise ValueError ( 'must have f_hi_ghz >= f_lo_ghz; got %r, %r' % ( f_hi_ghz , f_lo_ghz ) ) if not n >= 1 : raise ValueError ( 'must have n >= 1; got %r' % ( n , ) ) self . in_vals [ IN_VAL_NFREQ ] = n self . in_vals [ IN_VAL_FREQ0 ] = f_lo_ghz * 1e9 self . in_vals [ IN_VAL_LOGDFREQ ] = np . log10 ( f_hi_ghz / f_lo_ghz ) / n return self | Set the frequency grid on which to perform the calculations . |
62,305 | def set_obs_angle ( self , theta_rad ) : self . in_vals [ IN_VAL_THETA ] = theta_rad * 180 / np . pi return self | Set the observer angle relative to the field . |
62,306 | def set_one_freq ( self , f_ghz ) : if not ( f_ghz >= 0 ) : raise ValueError ( 'must have f_lo_ghz >= 0; got %r' % ( f_lo_ghz , ) ) self . in_vals [ IN_VAL_NFREQ ] = 1 self . in_vals [ IN_VAL_FREQ0 ] = f_ghz * 1e9 self . in_vals [ IN_VAL_LOGDFREQ ] = 1.0 return self | Set the code to calculate results at just one frequency . |
62,307 | def set_padist_gaussian_loss_cone ( self , boundary_rad , expwidth ) : self . in_vals [ IN_VAL_PADIST ] = PADIST_GLC self . in_vals [ IN_VAL_LCBDY ] = boundary_rad * 180 / np . pi self . in_vals [ IN_VAL_DELTAMU ] = expwidth return self | Set the pitch - angle distribution to a Gaussian loss cone . |
62,308 | def set_thermal_background ( self , T_K , nth_cc ) : if not ( T_K >= 0 ) : raise ValueError ( 'must have T_K >= 0; got %r' % ( T_K , ) ) if not ( nth_cc >= 0 ) : raise ValueError ( 'must have nth_cc >= 0; got %r, %r' % ( nth_cc , ) ) self . in_vals [ IN_VAL_T0 ] = T_K self . in_vals [ IN_VAL_N0 ] = nth_cc return self | Set the properties of the background thermal plasma . |
62,309 | def set_trapezoidal_integration ( self , n ) : if not ( n >= 2 ) : raise ValueError ( 'must have n >= 2; got %r' % ( n , ) ) self . in_vals [ IN_VAL_INTEG_METH ] = n + 1 return self | Set the code to use trapezoidal integration . |
62,310 | def find_rt_coefficients ( self , depth0 = None ) : if self . in_vals [ IN_VAL_NFREQ ] != 1 : raise Exception ( 'must have nfreq=1 to run Calculator.find_rt_coefficients()' ) if depth0 is not None : depth = depth0 self . in_vals [ IN_VAL_DEPTH ] = depth0 else : depth = self . in_vals [ IN_VAL_DEPTH ] scale_factor = 100 buf = np . empty ( ( 1 , 5 ) , dtype = np . float32 ) def classify ( damping_factor ) : if damping_factor >= 0.99 : return 1 if damping_factor <= 0.01 : return - 1 return 0 DONE , SHRINK , GROW , ABORT = 0 , 1 , 2 , 3 actions = { ( - 1 , - 1 ) : SHRINK , ( - 1 , 0 ) : SHRINK , ( - 1 , 1 ) : ABORT , ( 0 , - 1 ) : SHRINK , ( 0 , 0 ) : DONE , ( 0 , 1 ) : GROW , ( 1 , - 1 ) : ABORT , ( 1 , 0 ) : GROW , ( 1 , 1 ) : GROW , } last_change = DONE for attempt_number in range ( 20 ) : self . compute_lowlevel ( out_values = buf ) co = classify ( buf [ 0 , OUT_VAL_ODAMP ] ) cx = classify ( buf [ 0 , OUT_VAL_XDAMP ] ) action = actions [ co , cx ] if action == DONE : break elif action == ABORT : raise Exception ( 'depths of X and O modes are seriously incompatible' ) elif action == GROW : if last_change != GROW : scale_factor *= 0.3 depth *= scale_factor last_change = GROW elif action == SHRINK : if last_change != SHRINK : scale_factor *= 0.3 depth /= scale_factor last_change = SHRINK self . in_vals [ IN_VAL_DEPTH ] = depth else : raise Exception ( 'depth-finding algorithm did not converge!' ) sfu_to_specintens = 1e4 * cgs . cgsperjy * cgs . cmperau ** 2 / self . in_vals [ IN_VAL_AREA ] damp_X = buf [ 0 , OUT_VAL_XDAMP ] alpha_X = - np . log ( damp_X ) / depth si_X = buf [ 0 , OUT_VAL_XINT ] * sfu_to_specintens j_X = si_X * alpha_X / ( 1 - damp_X ) damp_O = buf [ 0 , OUT_VAL_ODAMP ] alpha_O = - np . log ( damp_O ) / depth si_O = buf [ 0 , OUT_VAL_OINT ] * sfu_to_specintens j_O = si_O * alpha_O / ( 1 - damp_O ) return ( j_O , alpha_O , j_X , alpha_X ) | Figure out emission and absorption coefficients for the current parameters . |
62,311 | def find_rt_coefficients_tot_intens ( self , depth0 = None ) : j_O , alpha_O , j_X , alpha_X = self . find_rt_coefficients ( depth0 = depth0 ) j_I = j_O + j_X alpha_I = 0.5 * ( alpha_O + alpha_X ) return ( j_I , alpha_I ) | Figure out total - intensity emission and absorption coefficients for the current parameters . |
62,312 | def make_path_func ( * baseparts ) : from os . path import join base = join ( * baseparts ) def path_func ( * args ) : return join ( base , * args ) return path_func | Return a function that joins paths onto some base directory . |
62,313 | def djoin ( * args ) : from os . path import join i = 0 alen = len ( args ) while i < alen and ( args [ i ] == '' or args [ i ] == '.' ) : i += 1 if i == alen : return '.' return join ( * args [ i : ] ) | dotless join for nicer paths . |
62,314 | def ensure_symlink ( src , dst ) : try : os . symlink ( src , dst ) except OSError as e : if e . errno == 17 : return True raise return False | Ensure the existence of a symbolic link pointing to src named dst . Returns a boolean indicating whether the symlink already existed . |
62,315 | def ensure_dir ( self , mode = 0o777 , parents = False ) : if parents : p = self . parent if p == self : return False p . ensure_dir ( mode , True ) made_it = False try : self . mkdir ( mode ) made_it = True except OSError as e : if e . errno == 17 : return False raise if not self . is_dir ( ) : import errno raise OSError ( errno . ENOTDIR , 'Not a directory' , str ( self ) ) return made_it | Ensure that this path exists as a directory . |
62,316 | def make_tempfile ( self , want = 'handle' , resolution = 'try_unlink' , suffix = '' , ** kwargs ) : if want not in ( 'handle' , 'path' ) : raise ValueError ( 'unrecognized make_tempfile() "want" mode %r' % ( want , ) ) if resolution not in ( 'unlink' , 'try_unlink' , 'keep' , 'overwrite' ) : raise ValueError ( 'unrecognized make_tempfile() "resolution" mode %r' % ( resolution , ) ) return Path . _PathTempfileContextManager ( self , want , resolution , suffix , kwargs ) | Get a context manager that creates and cleans up a uniquely - named temporary file with a name similar to this path . |
62,317 | def try_unlink ( self ) : try : self . unlink ( ) return True except OSError as e : if e . errno == 2 : return False raise | Try to unlink this path . If it doesn t exist no error is returned . Returns a boolean indicating whether the path was really unlinked . |
62,318 | def read_pickles ( self ) : try : import cPickle as pickle except ImportError : import pickle with self . open ( mode = 'rb' ) as f : while True : try : obj = pickle . load ( f ) except EOFError : break yield obj | Generate a sequence of objects by opening the path and unpickling items until EOF is reached . |
62,319 | def read_text ( self , encoding = None , errors = None , newline = None ) : with self . open ( mode = 'rt' , encoding = encoding , errors = errors , newline = newline ) as f : return f . read ( ) | Read this path as one large chunk of text . |
62,320 | def read_toml ( self , encoding = None , errors = None , newline = None , ** kwargs ) : import pytoml with self . open ( mode = 'rt' , encoding = encoding , errors = errors , newline = newline ) as f : return pytoml . load ( f , ** kwargs ) | Read this path as a TOML document . |
62,321 | def read_yaml ( self , encoding = None , errors = None , newline = None , ** kwargs ) : import yaml with self . open ( mode = 'rt' , encoding = encoding , errors = errors , newline = newline ) as f : return yaml . load ( f , ** kwargs ) | Read this path as a YAML document . |
62,322 | def enumeration ( cls ) : from pwkit import unicode_to_str name = cls . __name__ pickle_compat = getattr ( cls , '__pickle_compat__' , False ) def __unicode__ ( self ) : return '<enumeration holder %s>' % name def getattr_error ( self , attr ) : raise AttributeError ( 'enumeration %s does not contain attribute %s' % ( name , attr ) ) def modattr_error ( self , * args , ** kwargs ) : raise AttributeError ( 'modification of %s enumeration not allowed' % name ) clsdict = { '__doc__' : cls . __doc__ , '__slots__' : ( ) , '__unicode__' : __unicode__ , '__str__' : unicode_to_str , '__repr__' : unicode_to_str , '__getattr__' : getattr_error , '__setattr__' : modattr_error , '__delattr__' : modattr_error , } for key in dir ( cls ) : if not key . startswith ( '_' ) : clsdict [ key ] = getattr ( cls , key ) if pickle_compat : clsdict [ '__call__' ] = lambda self , x : x enumcls = type ( name , ( object , ) , clsdict ) return enumcls ( ) | A very simple decorator for creating enumerations . Unlike Python 3 . 4 enumerations this just gives a way to use a class declaration to create an immutable object containing only the values specified in the class . |
62,323 | def slice_around_gaps ( values , maxgap ) : if not ( maxgap > 0 ) : raise ValueError ( 'maxgap must be positive; got %r' % maxgap ) values = np . asarray ( values ) delta = values [ 1 : ] - values [ : - 1 ] if np . any ( delta < 0 ) : raise ValueError ( 'values must be in nondecreasing order' ) whgap = np . where ( delta > maxgap ) [ 0 ] + 1 prev_idx = None for gap_idx in whgap : yield slice ( prev_idx , gap_idx ) prev_idx = gap_idx yield slice ( prev_idx , None ) | Given an ordered array of values generate a set of slices that traverse all of the values . Within each slice no gap between adjacent values is larger than maxgap . In other words these slices break the array into chunks separated by gaps of size larger than maxgap . |
62,324 | def reduce_data_frame ( df , chunk_slicers , avg_cols = ( ) , uavg_cols = ( ) , minmax_cols = ( ) , nchunk_colname = 'nchunk' , uncert_prefix = 'u' , min_points_per_chunk = 3 ) : subds = [ df . iloc [ idx ] for idx in chunk_slicers ] subds = [ sd for sd in subds if sd . shape [ 0 ] >= min_points_per_chunk ] chunked = df . __class__ ( { nchunk_colname : np . zeros ( len ( subds ) , dtype = np . int ) } ) uncert_col_name = lambda c : uncert_prefix + c for i , subd in enumerate ( subds ) : label = chunked . index [ i ] chunked . loc [ label , nchunk_colname ] = subd . shape [ 0 ] for col in avg_cols : chunked . loc [ label , col ] = subd [ col ] . mean ( ) for col in uavg_cols : ucol = uncert_col_name ( col ) v , u = weighted_mean ( subd [ col ] , subd [ ucol ] ) chunked . loc [ label , col ] = v chunked . loc [ label , ucol ] = u for col in minmax_cols : chunked . loc [ label , 'min_' + col ] = subd [ col ] . min ( ) chunked . loc [ label , 'max_' + col ] = subd [ col ] . max ( ) return chunked | Reduce a DataFrame by collapsing rows in grouped chunks . Returns another DataFrame with similar columns but fewer rows . |
62,325 | def reduce_data_frame_evenly_with_gaps ( df , valcol , target_len , maxgap , ** kwargs ) : return reduce_data_frame ( df , slice_evenly_with_gaps ( df [ valcol ] , target_len , maxgap ) , ** kwargs ) | Reduce a DataFrame by collapsing rows in grouped chunks grouping based on gaps in one of the columns . |
62,326 | def usmooth ( window , uncerts , * data , ** kwargs ) : window = np . asarray ( window ) uncerts = np . asarray ( uncerts ) k = kwargs . pop ( 'k' , None ) if len ( kwargs ) : raise TypeError ( "smooth() got an unexpected keyword argument '%s'" % kwargs . keys ( ) [ 0 ] ) if k is None : k = window . size conv = lambda q , r : np . convolve ( q , r , mode = 'valid' ) if uncerts is None : w = np . ones_like ( x ) else : w = uncerts ** - 2 cw = conv ( w , window ) cu = np . sqrt ( conv ( w , window ** 2 ) ) / cw result = [ cu ] + [ conv ( w * np . asarray ( x ) , window ) / cw for x in data ] if k != 1 : result = [ x [ : : k ] for x in result ] return result | Smooth data series according to a window weighting based on uncertainties . |
62,327 | def weighted_variance ( x , weights ) : n = len ( x ) if n < 3 : raise ValueError ( 'cannot calculate meaningful variance of fewer ' 'than three samples' ) wt_mean = np . average ( x , weights = weights ) return np . average ( np . square ( x - wt_mean ) , weights = weights ) * n / ( n - 1 ) | Return the variance of a weighted sample . |
62,328 | def unit_tophat_ee ( x ) : x = np . asarray ( x ) x1 = np . atleast_1d ( x ) r = ( ( 0 < x1 ) & ( x1 < 1 ) ) . astype ( x . dtype ) if x . ndim == 0 : return np . asscalar ( r ) return r | Tophat function on the unit interval left - exclusive and right - exclusive . Returns 1 if 0 < x < 1 0 otherwise . |
62,329 | def make_tophat_ee ( lower , upper ) : if not np . isfinite ( lower ) : raise ValueError ( '"lower" argument must be finite number; got %r' % lower ) if not np . isfinite ( upper ) : raise ValueError ( '"upper" argument must be finite number; got %r' % upper ) def range_tophat_ee ( x ) : x = np . asarray ( x ) x1 = np . atleast_1d ( x ) r = ( ( lower < x1 ) & ( x1 < upper ) ) . astype ( x . dtype ) if x . ndim == 0 : return np . asscalar ( r ) return r range_tophat_ee . __doc__ = ( 'Ranged tophat function, left-exclusive and ' 'right-exclusive. Returns 1 if %g < x < %g, ' '0 otherwise.' ) % ( lower , upper ) return range_tophat_ee | Return a ufunc - like tophat function on the defined range left - exclusive and right - exclusive . Returns 1 if lower < x < upper 0 otherwise . |
62,330 | def make_tophat_ei ( lower , upper ) : if not np . isfinite ( lower ) : raise ValueError ( '"lower" argument must be finite number; got %r' % lower ) if not np . isfinite ( upper ) : raise ValueError ( '"upper" argument must be finite number; got %r' % upper ) def range_tophat_ei ( x ) : x = np . asarray ( x ) x1 = np . atleast_1d ( x ) r = ( ( lower < x1 ) & ( x1 <= upper ) ) . astype ( x . dtype ) if x . ndim == 0 : return np . asscalar ( r ) return r range_tophat_ei . __doc__ = ( 'Ranged tophat function, left-exclusive and ' 'right-inclusive. Returns 1 if %g < x <= %g, ' '0 otherwise.' ) % ( lower , upper ) return range_tophat_ei | Return a ufunc - like tophat function on the defined range left - exclusive and right - inclusive . Returns 1 if lower < x < = upper 0 otherwise . |
62,331 | def make_tophat_ie ( lower , upper ) : if not np . isfinite ( lower ) : raise ValueError ( '"lower" argument must be finite number; got %r' % lower ) if not np . isfinite ( upper ) : raise ValueError ( '"upper" argument must be finite number; got %r' % upper ) def range_tophat_ie ( x ) : x = np . asarray ( x ) x1 = np . atleast_1d ( x ) r = ( ( lower <= x1 ) & ( x1 < upper ) ) . astype ( x . dtype ) if x . ndim == 0 : return np . asscalar ( r ) return r range_tophat_ie . __doc__ = ( 'Ranged tophat function, left-inclusive and ' 'right-exclusive. Returns 1 if %g <= x < %g, ' '0 otherwise.' ) % ( lower , upper ) return range_tophat_ie | Return a ufunc - like tophat function on the defined range left - inclusive and right - exclusive . Returns 1 if lower < = x < upper 0 otherwise . |
62,332 | def make_tophat_ii ( lower , upper ) : if not np . isfinite ( lower ) : raise ValueError ( '"lower" argument must be finite number; got %r' % lower ) if not np . isfinite ( upper ) : raise ValueError ( '"upper" argument must be finite number; got %r' % upper ) def range_tophat_ii ( x ) : x = np . asarray ( x ) x1 = np . atleast_1d ( x ) r = ( ( lower <= x1 ) & ( x1 <= upper ) ) . astype ( x . dtype ) if x . ndim == 0 : return np . asscalar ( r ) return r range_tophat_ii . __doc__ = ( 'Ranged tophat function, left-inclusive and ' 'right-inclusive. Returns 1 if %g <= x <= %g, ' '0 otherwise.' ) % ( lower , upper ) return range_tophat_ii | Return a ufunc - like tophat function on the defined range left - inclusive and right - inclusive . Returns 1 if lower < x < upper 0 otherwise . |
62,333 | def make_step_lcont ( transition ) : if not np . isfinite ( transition ) : raise ValueError ( '"transition" argument must be finite number; got %r' % transition ) def step_lcont ( x ) : x = np . asarray ( x ) x1 = np . atleast_1d ( x ) r = ( x1 > transition ) . astype ( x . dtype ) if x . ndim == 0 : return np . asscalar ( r ) return r step_lcont . __doc__ = ( 'Left-continuous step function. Returns 1 if x > %g, ' '0 otherwise.' ) % ( transition , ) return step_lcont | Return a ufunc - like step function that is left - continuous . Returns 1 if x > transition 0 otherwise . |
62,334 | def make_step_rcont ( transition ) : if not np . isfinite ( transition ) : raise ValueError ( '"transition" argument must be finite number; got %r' % transition ) def step_rcont ( x ) : x = np . asarray ( x ) x1 = np . atleast_1d ( x ) r = ( x1 >= transition ) . astype ( x . dtype ) if x . ndim == 0 : return np . asscalar ( r ) return r step_rcont . __doc__ = ( 'Right-continuous step function. Returns 1 if x >= ' '%g, 0 otherwise.' ) % ( transition , ) return step_rcont | Return a ufunc - like step function that is right - continuous . Returns 1 if x > = transition 0 otherwise . |
62,335 | def make_fixed_temp_multi_apec ( kTs , name_template = 'apec%d' , norm = None ) : total_model = None sub_models = [ ] for i , kT in enumerate ( kTs ) : component = ui . xsapec ( name_template % i ) component . kT = kT ui . freeze ( component . kT ) if norm is not None : component . norm = norm sub_models . append ( component ) if total_model is None : total_model = component else : total_model = total_model + component return total_model , sub_models | Create a model summing multiple APEC components at fixed temperatures . |
62,336 | def expand_rmf_matrix ( rmf ) : n_chan = rmf . e_min . size n_energy = rmf . n_grp . size expanded = np . zeros ( ( n_energy , n_chan ) ) mtx_ofs = 0 grp_ofs = 0 for i in range ( n_energy ) : for j in range ( rmf . n_grp [ i ] ) : f = rmf . f_chan [ grp_ofs ] n = rmf . n_chan [ grp_ofs ] expanded [ i , f : f + n ] = rmf . matrix [ mtx_ofs : mtx_ofs + n ] mtx_ofs += n grp_ofs += 1 return expanded | Expand an RMF matrix stored in compressed form . |
62,337 | def derive_identity_arf ( name , arf ) : from sherpa . astro . data import DataARF from sherpa . astro . instrument import ARF1D darf = DataARF ( name , arf . energ_lo , arf . energ_hi , np . ones ( arf . specresp . shape ) , arf . bin_lo , arf . bin_hi , arf . exposure , header = None , ) return ARF1D ( darf , pha = arf . _pha ) | Create an identity ARF that has uniform sensitivity . |
62,338 | def get_source_qq_data ( id = None ) : sdata = ui . get_data ( id = id ) kev = sdata . get_x ( ) obs_data = sdata . counts model_data = ui . get_model ( id = id ) ( kev ) return np . vstack ( ( kev , obs_data , model_data ) ) | Get data for a quantile - quantile plot of the source data and model . |
62,339 | def get_bkg_qq_data ( id = None , bkg_id = None ) : bdata = ui . get_bkg ( id = id , bkg_id = bkg_id ) kev = bdata . get_x ( ) obs_data = bdata . counts model_data = ui . get_bkg_model ( id = id , bkg_id = bkg_id ) ( kev ) return np . vstack ( ( kev , obs_data , model_data ) ) | Get data for a quantile - quantile plot of the background data and model . |
62,340 | def make_qq_plot ( kev , obs , mdl , unit , key_text ) : import omega as om kev = np . asarray ( kev ) obs = np . asarray ( obs ) mdl = np . asarray ( mdl ) c_obs = np . cumsum ( obs ) c_mdl = np . cumsum ( mdl ) mx = max ( c_obs [ - 1 ] , c_mdl [ - 1 ] ) p = om . RectPlot ( ) p . addXY ( [ 0 , mx ] , [ 0 , mx ] , '1:1' ) p . addXY ( c_mdl , c_obs , key_text ) locs = np . array ( [ 0 , 0.05 , 0.08 , 0.11 , 0.17 , 0.3 , 0.4 , 0.7 , 1 ] ) * ( kev . size - 2 ) c0 = mx * 1.05 c1 = mx * 1.1 for loc in locs : i0 = int ( np . floor ( loc ) ) frac = loc - i0 kevval = ( 1 - frac ) * kev [ i0 ] + frac * kev [ i0 + 1 ] mdlval = ( 1 - frac ) * c_mdl [ i0 ] + frac * c_mdl [ i0 + 1 ] obsval = ( 1 - frac ) * c_obs [ i0 ] + frac * c_obs [ i0 + 1 ] p . addXY ( [ mdlval , mdlval ] , [ c0 , c1 ] , '%.2f keV' % kevval , dsn = 2 ) p . addXY ( [ c0 , c1 ] , [ obsval , obsval ] , None , dsn = 2 ) p . setLabels ( 'Cumulative model ' + unit , 'Cumulative data ' + unit ) p . defaultKeyOverlay . vAlign = 0.3 return p | Make a quantile - quantile plot comparing events and a model . |
62,341 | def make_multi_qq_plots ( arrays , key_text ) : import omega as om p = om . RectPlot ( ) p . addXY ( [ 0 , 1. ] , [ 0 , 1. ] , '1:1' ) for index , array in enumerate ( arrays ) : kev , obs , mdl = array c_obs = np . cumsum ( obs ) c_mdl = np . cumsum ( mdl ) mx = 0.5 * ( c_obs [ - 1 ] + c_mdl [ - 1 ] ) c_obs /= mx c_mdl /= mx p . addXY ( c_mdl , c_obs , '%s #%d' % ( key_text , index ) ) locs = np . array ( [ 0 , 0.05 , 0.08 , 0.11 , 0.17 , 0.3 , 0.4 , 0.7 , 1 ] ) * ( kev . size - 2 ) c0 = 1.05 c1 = 1.1 for loc in locs : i0 = int ( np . floor ( loc ) ) frac = loc - i0 kevval = ( 1 - frac ) * kev [ i0 ] + frac * kev [ i0 + 1 ] mdlval = ( 1 - frac ) * c_mdl [ i0 ] + frac * c_mdl [ i0 + 1 ] obsval = ( 1 - frac ) * c_obs [ i0 ] + frac * c_obs [ i0 + 1 ] p . addXY ( [ mdlval , mdlval ] , [ c0 , c1 ] , '%.2f keV' % kevval , dsn = 2 ) p . addXY ( [ c0 , c1 ] , [ obsval , obsval ] , None , dsn = 2 ) p . setLabels ( 'Cumulative rescaled model' , 'Cumulative rescaled data' ) p . defaultKeyOverlay . vAlign = 0.3 return p | Make a quantile - quantile plot comparing multiple sets of events and models . |
62,342 | def make_spectrum_plot ( model_plot , data_plot , desc , xmin_clamp = 0.01 , min_valid_x = None , max_valid_x = None ) : import omega as om model_x = np . concatenate ( ( model_plot . xlo , [ model_plot . xhi [ - 1 ] ] ) ) model_x [ 0 ] = max ( model_x [ 0 ] , xmin_clamp ) model_y = np . concatenate ( ( model_plot . y , [ 0. ] ) ) is_bad = ~ np . isfinite ( model_y ) if is_bad . sum ( ) : from . cli import warn warn ( 'bad Sherpa model Y value(s) at: %r' , np . where ( is_bad ) [ 0 ] ) model_y [ is_bad ] = 0 data_left_edges = data_plot . x - 0.5 * data_plot . xerr data_left_edges [ 0 ] = max ( data_left_edges [ 0 ] , xmin_clamp ) data_hist_x = np . concatenate ( ( data_left_edges , [ data_plot . x [ - 1 ] + 0.5 * data_plot . xerr [ - 1 ] ] ) ) data_hist_y = np . concatenate ( ( data_plot . y , [ 0. ] ) ) log_bounds_pad_factor = 0.9 xlow = model_x [ 0 ] * log_bounds_pad_factor xhigh = model_x [ - 1 ] / log_bounds_pad_factor p = om . RectPlot ( ) if min_valid_x is not None : p . add ( om . rect . XBand ( 1e-3 * xlow , min_valid_x , keyText = None ) , zheight = - 1 , dsn = 1 ) if max_valid_x is not None : p . add ( om . rect . XBand ( max_valid_x , xhigh * 1e3 , keyText = None ) , zheight = - 1 , dsn = 1 ) csp = om . rect . ContinuousSteppedPainter ( keyText = desc + ' Model' ) csp . setFloats ( model_x , model_y ) p . add ( csp ) csp = om . rect . ContinuousSteppedPainter ( keyText = None ) csp . setFloats ( data_hist_x , data_hist_y ) p . add ( csp ) p . addXYErr ( data_plot . x , data_plot . y , data_plot . yerr , desc + ' Data' , lines = 0 , dsn = 1 ) p . setLabels ( data_plot . xlabel , data_plot . ylabel ) p . setLinLogAxes ( True , False ) p . setBounds ( xlow , xhigh ) return p , xlow , xhigh | Make a plot of a spectral model and data . |
62,343 | def download_file ( local_filename , url , clobber = False ) : dir_name = os . path . dirname ( local_filename ) mkdirs ( dir_name ) if clobber or not os . path . exists ( local_filename ) : i = requests . get ( url ) if i . status_code == 404 : print ( 'Failed to download file:' , local_filename , url ) return False chunk_size_in_bytes = 1024 * 1024 with open ( local_filename , 'wb' ) as local_file : for chunk in i . iter_content ( chunk_size = chunk_size_in_bytes ) : local_file . write ( chunk ) return True | Download the given file . Clobber overwrites file if exists . |
62,344 | def download_json ( local_filename , url , clobber = False ) : with open ( local_filename , 'w' ) as json_file : json_file . write ( json . dumps ( requests . get ( url ) . json ( ) , sort_keys = True , indent = 2 , separators = ( ',' , ': ' ) ) ) | Download the given JSON file and pretty - print before we output it . |
62,345 | def data_to_argb32 ( data , cmin = None , cmax = None , stretch = 'linear' , cmap = 'black_to_blue' ) : clipper = Clipper ( ) clipper . alloc_buffer ( data ) clipper . set_tile_size ( ) clipper . dmin = cmin if cmin is not None else data . min ( ) clipper . dmax = cmax if cmax is not None else data . max ( ) clipper . ensure_all_updated ( data ) stretcher = Stretcher ( stretch ) stretcher . alloc_buffer ( clipper . buffer ) stretcher . set_tile_size ( ) stretcher . ensure_all_updated ( clipper . buffer ) mapper = ColorMapper ( cmap ) mapper . alloc_buffer ( stretcher . buffer ) mapper . set_tile_size ( ) mapper . ensure_all_updated ( stretcher . buffer ) return mapper . buffer | Turn arbitrary data values into ARGB32 colors . |
62,346 | def data_to_imagesurface ( data , ** kwargs ) : import cairo data = np . atleast_2d ( data ) if data . ndim != 2 : raise ValueError ( 'input array may not have more than 2 dimensions' ) argb32 = data_to_argb32 ( data , ** kwargs ) format = cairo . FORMAT_ARGB32 height , width = argb32 . shape stride = cairo . ImageSurface . format_stride_for_width ( format , width ) if argb32 . strides [ 0 ] != stride : raise ValueError ( 'stride of data array not compatible with ARGB32' ) return cairo . ImageSurface . create_for_data ( argb32 , format , width , height , stride ) | Turn arbitrary data values into a Cairo ImageSurface . |
62,347 | def get_token ( filename = TOKEN_PATH , envvar = TOKEN_ENVVAR ) : if os . path . isfile ( filename ) : with open ( filename ) as token_file : token = token_file . readline ( ) . strip ( ) else : token = os . environ . get ( envvar ) if not token : raise ValueError ( "No token found.\n" "{} file doesn't exist.\n{} environment variable is not set." . format ( filename , envvar ) ) return token | Returns pipeline_token for API |
62,348 | def stats ( self , antnames ) : nbyant = np . zeros ( self . nants , dtype = np . int ) sum = np . zeros ( self . nants , dtype = np . complex ) sumsq = np . zeros ( self . nants ) q = np . abs ( self . normvis - 1 ) for i in range ( self . nsamps ) : i1 , i2 = self . blidxs [ i ] nbyant [ i1 ] += 1 nbyant [ i2 ] += 1 sum [ i1 ] += q [ i ] sum [ i2 ] += q [ i ] sumsq [ i1 ] += q [ i ] ** 2 sumsq [ i2 ] += q [ i ] ** 2 avg = sum / nbyant std = np . sqrt ( sumsq / nbyant - avg ** 2 ) navg = 1. / np . median ( avg ) nstd = 1. / np . median ( std ) for i in range ( self . nants ) : print ( ' %2d %10s %3d %f %f %f %f' % ( i , antnames [ i ] , nbyant [ i ] , avg [ i ] , std [ i ] , avg [ i ] * navg , std [ i ] * nstd ) ) | XXX may be out of date . |
62,349 | def _qr_factor_packed ( a , enorm , finfo ) : machep = finfo . eps n , m = a . shape if m < n : raise ValueError ( '"a" must be at least as tall as it is wide' ) acnorm = np . empty ( n , finfo . dtype ) for j in range ( n ) : acnorm [ j ] = enorm ( a [ j ] , finfo ) rdiag = acnorm . copy ( ) wa = acnorm . copy ( ) pmut = np . arange ( n ) for i in range ( n ) : kmax = rdiag [ i : ] . argmax ( ) + i if kmax != i : temp = pmut [ i ] pmut [ i ] = pmut [ kmax ] pmut [ kmax ] = temp rdiag [ kmax ] = rdiag [ i ] wa [ kmax ] = wa [ i ] temp = a [ i ] . copy ( ) a [ i ] = a [ kmax ] a [ kmax ] = temp ainorm = enorm ( a [ i , i : ] , finfo ) if ainorm == 0 : rdiag [ i ] = 0 continue if a [ i , i ] < 0 : ainorm = - ainorm a [ i , i : ] /= ainorm a [ i , i ] += 1 for j in range ( i + 1 , n ) : a [ j , i : ] -= a [ i , i : ] * np . dot ( a [ i , i : ] , a [ j , i : ] ) / a [ i , i ] if rdiag [ j ] != 0 : rdiag [ j ] *= np . sqrt ( max ( 1 - ( a [ j , i ] / rdiag [ j ] ) ** 2 , 0 ) ) if 0.05 * ( rdiag [ j ] / wa [ j ] ) ** 2 <= machep : wa [ j ] = rdiag [ j ] = enorm ( a [ j , i + 1 : ] , finfo ) rdiag [ i ] = - ainorm return pmut , rdiag , acnorm | Compute the packed pivoting Q - R factorization of a matrix . |
62,350 | def _qr_factor_full ( a , dtype = np . float ) : n , m = a . shape packed , pmut , rdiag , acnorm = _manual_qr_factor_packed ( a , dtype ) r = np . zeros ( ( n , m ) ) for i in range ( n ) : r [ i , : i ] = packed [ i , : i ] r [ i , i ] = rdiag [ i ] q = np . eye ( m ) v = np . empty ( m ) for i in range ( n ) : v [ : ] = packed [ i ] v [ : i ] = 0 hhm = np . eye ( m ) - 2 * np . outer ( v , v ) / np . dot ( v , v ) q = np . dot ( hhm , q ) return q , r , pmut | Compute the QR factorization of a matrix with pivoting . |
62,351 | def _qrd_solve ( r , pmut , ddiag , bqt , sdiag ) : n , m = r . shape for i in range ( n ) : r [ i , i : ] = r [ i : , i ] x = r . diagonal ( ) . copy ( ) zwork = bqt . copy ( ) for i in range ( n ) : li = pmut [ i ] if ddiag [ li ] == 0 : sdiag [ i ] = r [ i , i ] r [ i , i ] = x [ i ] continue sdiag [ i : ] = 0 sdiag [ i ] = ddiag [ li ] bqtpi = 0. for j in range ( i , n ) : if sdiag [ j ] == 0 : continue if abs ( r [ j , j ] ) < abs ( sdiag [ j ] ) : cot = r [ j , j ] / sdiag [ j ] sin = 0.5 / np . sqrt ( 0.25 + 0.25 * cot ** 2 ) cos = sin * cot else : tan = sdiag [ j ] / r [ j , j ] cos = 0.5 / np . sqrt ( 0.25 + 0.25 * tan ** 2 ) sin = cos * tan r [ j , j ] = cos * r [ j , j ] + sin * sdiag [ j ] temp = cos * zwork [ j ] + sin * bqtpi bqtpi = - sin * zwork [ j ] + cos * bqtpi zwork [ j ] = temp if j + 1 < n : temp = cos * r [ j , j + 1 : ] + sin * sdiag [ j + 1 : ] sdiag [ j + 1 : ] = - sin * r [ j , j + 1 : ] + cos * sdiag [ j + 1 : ] r [ j , j + 1 : ] = temp sdiag [ i ] = r [ i , i ] r [ i , i ] = x [ i ] nsing = n for i in range ( n ) : if sdiag [ i ] == 0. : nsing = i zwork [ i : ] = 0 break if nsing > 0 : zwork [ nsing - 1 ] /= sdiag [ nsing - 1 ] for i in range ( nsing - 2 , - 1 , - 1 ) : s = np . dot ( zwork [ i + 1 : nsing ] , r [ i , i + 1 : nsing ] ) zwork [ i ] = ( zwork [ i ] - s ) / sdiag [ i ] x [ pmut ] = zwork return x | Solve an equation given a QR factored matrix and a diagonal . |
62,352 | def _qrd_solve_full ( a , b , ddiag , dtype = np . float ) : a = np . asarray ( a , dtype ) b = np . asarray ( b , dtype ) ddiag = np . asarray ( ddiag , dtype ) n , m = a . shape assert m >= n assert b . shape == ( m , ) assert ddiag . shape == ( n , ) q , r , pmut = _qr_factor_full ( a ) bqt = np . dot ( b , q . T ) x , s = _manual_qrd_solve ( r [ : , : n ] , pmut , ddiag , bqt , dtype = dtype , build_s = True ) return x , s , pmut | Solve the equation A^T x = B D x = 0 . |
62,353 | def _calc_covariance ( r , pmut , tol = 1e-14 ) : n = r . shape [ 1 ] assert r . shape [ 0 ] >= n r = r . copy ( ) jrank = - 1 abstol = tol * abs ( r [ 0 , 0 ] ) for i in range ( n ) : if abs ( r [ i , i ] ) <= abstol : break r [ i , i ] **= - 1 for j in range ( i ) : temp = r [ i , i ] * r [ i , j ] r [ i , j ] = 0. r [ i , : j + 1 ] -= temp * r [ j , : j + 1 ] jrank = i for i in range ( jrank + 1 ) : for j in range ( i ) : r [ j , : j + 1 ] += r [ i , j ] * r [ i , : j + 1 ] r [ i , : i + 1 ] *= r [ i , i ] wa = np . empty ( n ) wa . fill ( r [ 0 , 0 ] ) for i in range ( n ) : pi = pmut [ i ] sing = i > jrank for j in range ( i + 1 ) : if sing : r [ i , j ] = 0. pj = pmut [ j ] if pj > pi : r [ pi , pj ] = r [ i , j ] elif pj < pi : r [ pj , pi ] = r [ i , j ] wa [ pi ] = r [ i , i ] for i in range ( n ) : r [ i , : i + 1 ] = r [ : i + 1 , i ] r [ i , i ] = wa [ i ] return r | Calculate the covariance matrix of the fitted parameters |
62,354 | def invoke_tool ( namespace , tool_class = None ) : import sys from . . import cli cli . propagate_sigint ( ) cli . unicode_stdio ( ) cli . backtrace_on_usr1 ( ) if tool_class is None : for value in itervalues ( namespace ) : if is_strict_subclass ( value , Multitool ) : if tool_class is not None : raise PKError ( 'do not know which Multitool implementation to use' ) tool_class = value if tool_class is None : raise PKError ( 'no Multitool implementation to use' ) tool = tool_class ( ) tool . populate ( itervalues ( namespace ) ) tool . commandline ( sys . argv ) | Invoke a tool and exit . |
62,355 | def get_arg_parser ( self , ** kwargs ) : import argparse ap = argparse . ArgumentParser ( prog = kwargs [ 'argv0' ] , description = self . summary , ) return ap | Return an instance of argparse . ArgumentParser used to process this tool s command - line arguments . |
62,356 | def register ( self , cmd ) : if cmd . name is None : raise ValueError ( 'no name set for Command object %r' % cmd ) if cmd . name in self . commands : raise ValueError ( 'a command named "%s" has already been ' 'registered' % cmd . name ) self . commands [ cmd . name ] = cmd return self | Register a new command with the tool . cmd is expected to be an instance of Command although here only the cmd . name attribute is investigated . Multiple commands with the same name are not allowed to be registered . Returns self . |
62,357 | def invoke_command ( self , cmd , args , ** kwargs ) : new_kwargs = kwargs . copy ( ) new_kwargs [ 'argv0' ] = kwargs [ 'argv0' ] + ' ' + cmd . name new_kwargs [ 'parent' ] = self new_kwargs [ 'parent_kwargs' ] = kwargs return cmd . invoke_with_usage ( args , ** new_kwargs ) | This function mainly exists to be overridden by subclasses . |
62,358 | def merge_bibtex_collections ( citednames , maindict , extradicts , allow_missing = False ) : allrecords = { } for ed in extradicts : allrecords . update ( ed ) allrecords . update ( maindict ) missing = [ ] from collections import OrderedDict records = OrderedDict ( ) from itertools import chain wantednames = sorted ( chain ( citednames , six . viewkeys ( maindict ) ) ) for name in wantednames : rec = allrecords . get ( name ) if rec is None : missing . append ( name ) else : records [ name ] = rec if len ( missing ) and not allow_missing : raise PKError ( 'missing BibTeX records: %s' , ' ' . join ( missing ) ) return records | There must be a way to be efficient and stream output instead of loading everything into memory at once but meh . |
62,359 | def write_bibtex_dict ( stream , entries ) : from bibtexparser . bwriter import BibTexWriter writer = BibTexWriter ( ) writer . indent = ' ' writer . entry_separator = '' first = True for rec in entries : if first : first = False else : stream . write ( b'\n' ) stream . write ( writer . _entry_to_bibtex ( rec ) . encode ( 'utf8' ) ) | bibtexparser . write converts the entire database to one big string and writes it out in one go . I m sure it will always all fit in RAM but some things just will not stand . |
62,360 | def merge_bibtex_with_aux ( auxpath , mainpath , extradir , parse = get_bibtex_dict , allow_missing = False ) : auxpath = Path ( auxpath ) mainpath = Path ( mainpath ) extradir = Path ( extradir ) with auxpath . open ( 'rt' ) as aux : citednames = sorted ( cited_names_from_aux_file ( aux ) ) main = mainpath . try_open ( mode = 'rt' ) if main is None : maindict = { } else : maindict = parse ( main ) main . close ( ) def gen_extra_dicts ( ) : for item in sorted ( extradir . glob ( '*.bib' ) ) : with item . open ( 'rt' ) as extra : yield parse ( extra ) merged = merge_bibtex_collections ( citednames , maindict , gen_extra_dicts ( ) , allow_missing = allow_missing ) with mainpath . make_tempfile ( want = 'handle' , resolution = 'overwrite' ) as newbib : write_bibtex_dict ( newbib , six . viewvalues ( merged ) ) | Merge multiple BibTeX files into a single homogeneously - formatted output using a LaTeX . aux file to know which records are worth paying attention to . |
62,361 | def just_smart_bibtools ( bib_style , aux , bib ) : extradir = Path ( '.bibtex' ) extradir . ensure_dir ( parents = True ) bib_export ( bib_style , aux , extradir / 'ZZ_bibtools.bib' , no_tool_ok = True , quiet = True , ignore_missing = True ) merge_bibtex_with_aux ( aux , bib , extradir ) | Tectonic has taken over most of the features that this tool used to provide but here s a hack to keep my smart . bib file generation working . |
62,362 | def aap_to_bp ( ant1 , ant2 , pol ) : if ant1 < 0 : raise ValueError ( 'first antenna is below 0: %s' % ant1 ) if ant2 < ant1 : raise ValueError ( 'second antenna is below first: %s' % ant2 ) if pol < 1 or pol > 12 : raise ValueError ( 'illegal polarization code %s' % pol ) fps = _pol_to_fpol [ pol ] ap1 = ( ant1 << 3 ) + ( ( fps >> 4 ) & 0x07 ) ap2 = ( ant2 << 3 ) + ( fps & 0x07 ) return ap1 , ap2 | Create a basepol from antenna numbers and a CASA polarization code . |
62,363 | def _finish_timeslot ( self ) : for fpol , aps in self . ap_by_fpol . items ( ) : aps = sorted ( aps ) nap = len ( aps ) for i1 , ap1 in enumerate ( aps ) : for i2 in range ( i1 , nap ) : ap2 = aps [ i2 ] bp1 = ( ap1 , ap2 ) info = self . data_by_bp . get ( bp1 ) if info is None : continue data1 , flags1 = info for i3 in range ( i2 , nap ) : ap3 = aps [ i3 ] bp2 = ( ap2 , ap3 ) info = self . data_by_bp . get ( bp2 ) if info is None : continue data2 , flags2 = info bp3 = ( ap1 , aps [ i3 ] ) info = self . data_by_bp . get ( bp3 ) if info is None : continue data3 , flags3 = info tflags = flags1 & flags2 np . logical_and ( tflags , flags3 , tflags ) if not tflags . any ( ) : continue triple = data3 . conj ( ) np . multiply ( triple , data1 , triple ) np . multiply ( triple , data2 , triple ) self . _process_sample ( ap1 , ap2 , ap3 , triple , tflags ) self . cur_time = - 1. self . bp_by_ap = None self . ap_by_fpol = None | We have loaded in all of the visibilities in one timeslot . We can now compute the phase closure triples . |
62,364 | def _process_sample ( self , ap1 , ap2 , ap3 , triple , tflags ) : np . divide ( triple , np . abs ( triple ) , triple ) phase = np . angle ( triple ) self . ap_spec_stats_by_ddid [ self . cur_ddid ] . accum ( ap1 , phase , tflags + 0. ) self . ap_spec_stats_by_ddid [ self . cur_ddid ] . accum ( ap2 , phase , tflags + 0. ) self . ap_spec_stats_by_ddid [ self . cur_ddid ] . accum ( ap3 , phase , tflags + 0. ) triple = np . dot ( triple , tflags ) / tflags . sum ( ) phase = np . angle ( triple ) self . global_stats_by_time . accum ( self . cur_time , phase ) self . ap_stats_by_ddid [ self . cur_ddid ] . accum ( ap1 , phase ) self . ap_stats_by_ddid [ self . cur_ddid ] . accum ( ap2 , phase ) self . ap_stats_by_ddid [ self . cur_ddid ] . accum ( ap3 , phase ) self . bp_stats_by_ddid [ self . cur_ddid ] . accum ( ( ap1 , ap2 ) , phase ) self . bp_stats_by_ddid [ self . cur_ddid ] . accum ( ( ap1 , ap3 ) , phase ) self . bp_stats_by_ddid [ self . cur_ddid ] . accum ( ( ap2 , ap3 ) , phase ) self . ap_time_stats_by_ddid [ self . cur_ddid ] . accum ( self . cur_time , ap1 , phase ) self . ap_time_stats_by_ddid [ self . cur_ddid ] . accum ( self . cur_time , ap2 , phase ) self . ap_time_stats_by_ddid [ self . cur_ddid ] . accum ( self . cur_time , ap3 , phase ) | We have computed one independent phase closure triple in one timeslot . |
62,365 | def load_spectrum ( path , smoothing = 181 , DF = - 8. ) : try : ang , lflam = np . loadtxt ( path , usecols = ( 0 , 1 ) ) . T except ValueError : with open ( path , 'rb' ) as f : def lines ( ) : for line in f : yield line . replace ( b'D' , b'e' ) ang , lflam = np . genfromtxt ( lines ( ) , delimiter = ( 13 , 12 ) ) . T z = ang . argsort ( ) ang = ang [ z ] flam = 10 ** ( lflam [ z ] + DF ) del z if smoothing is not None : if isinstance ( smoothing , int ) : smoothing = np . hamming ( smoothing ) else : smoothing = np . asarray ( smoothing ) wnorm = np . convolve ( np . ones_like ( smoothing ) , smoothing , mode = 'valid' ) smoothing = smoothing / wnorm smooth = lambda a : np . convolve ( a , smoothing , mode = 'valid' ) [ : : smoothing . size ] ang = smooth ( ang ) flam = smooth ( flam ) return pd . DataFrame ( { 'wlen' : ang , 'flam' : flam } ) | Load a Phoenix model atmosphere spectrum . |
62,366 | def lbol_from_spt_dist_mag ( sptnum , dist_pc , jmag , kmag , format = 'cgs' ) : bcj = bcj_from_spt ( sptnum ) bck = bck_from_spt ( sptnum ) n = np . zeros ( sptnum . shape , dtype = np . int ) app_mbol = np . zeros ( sptnum . shape ) w = np . isfinite ( bcj ) & np . isfinite ( jmag ) app_mbol [ w ] += jmag [ w ] + bcj [ w ] n [ w ] += 1 w = np . isfinite ( bck ) & np . isfinite ( kmag ) app_mbol [ w ] += kmag [ w ] + bck [ w ] n [ w ] += 1 w = ( n != 0 ) abs_mbol = ( app_mbol [ w ] / n [ w ] ) - 5 * ( np . log10 ( dist_pc [ w ] ) - 1 ) lbol = np . empty ( sptnum . shape ) lbol . fill ( np . nan ) lbol [ w ] = lbol_from_mbol ( abs_mbol , format = format ) return lbol | Estimate a UCD s bolometric luminosity given some basic parameters . |
62,367 | def map ( self , func , iterable , chunksize = None ) : r = self . map_async ( func , iterable , chunksize ) while True : try : return r . get ( self . wait_timeout ) except TimeoutError : pass except KeyboardInterrupt : self . terminate ( ) self . join ( ) raise | Equivalent of map built - in without swallowing KeyboardInterrupt . |
62,368 | def fmthours ( radians , norm = 'wrap' , precision = 3 , seps = '::' ) : return _fmtsexagesimal ( radians * R2H , norm , 24 , seps , precision = precision ) | Format an angle as sexagesimal hours in a string . |
62,369 | def fmtdeglon ( radians , norm = 'wrap' , precision = 2 , seps = '::' ) : return _fmtsexagesimal ( radians * R2D , norm , 360 , seps , precision = precision ) | Format a longitudinal angle as sexagesimal degrees in a string . |
62,370 | def fmtdeglat ( radians , norm = 'raise' , precision = 2 , seps = '::' ) : if norm == 'none' : pass elif norm == 'raise' : if radians > halfpi or radians < - halfpi : raise ValueError ( 'illegal latitude of %f radians' % radians ) elif norm == 'wrap' : radians = angcen ( radians ) if radians > halfpi : radians = pi - radians elif radians < - halfpi : radians = - pi - radians else : raise ValueError ( 'unrecognized normalization type "%s"' % norm ) if len ( seps ) < 2 : raise ValueError ( 'there must be at least two sexagesimal separators; ' 'got value "%s"' % seps ) precision = max ( int ( precision ) , 0 ) if precision == 0 : width = 2 else : width = precision + 3 degrees = radians * R2D if degrees >= 0 : sgn = '+' else : sgn = '-' degrees = - degrees deg = int ( np . floor ( degrees ) ) amin = int ( np . floor ( ( degrees - deg ) * 60 ) ) asec = round ( 3600 * ( degrees - deg - amin / 60. ) , precision ) if asec >= 60 : asec -= 60 amin += 1 if amin >= 60 : amin -= 60 deg += 1 if len ( seps ) > 2 : sep2 = seps [ 2 ] else : sep2 = '' return '%s%02d%s%02d%s%0*.*f%s' % ( sgn , deg , seps [ 0 ] , amin , seps [ 1 ] , width , precision , asec , sep2 ) | Format a latitudinal angle as sexagesimal degrees in a string . |
62,371 | def fmtradec ( rarad , decrad , precision = 2 , raseps = '::' , decseps = '::' , intersep = ' ' ) : return ( fmthours ( rarad , precision = precision + 1 , seps = raseps ) + text_type ( intersep ) + fmtdeglat ( decrad , precision = precision , seps = decseps ) ) | Format equatorial coordinates in a single sexagesimal string . |
62,372 | def parsehours ( hrstr ) : hr = _parsesexagesimal ( hrstr , 'hours' , False ) if hr >= 24 : raise ValueError ( 'illegal hour specification: ' + hrstr ) return hr * H2R | Parse a string formatted as sexagesimal hours into an angle . |
62,373 | def parsedeglat ( latstr ) : deg = _parsesexagesimal ( latstr , 'latitude' , True ) if abs ( deg ) > 90 : raise ValueError ( 'illegal latitude specification: ' + latstr ) return deg * D2R | Parse a latitude formatted as sexagesimal degrees into an angle . |
62,374 | def sphdist ( lat1 , lon1 , lat2 , lon2 ) : cd = np . cos ( lon2 - lon1 ) sd = np . sin ( lon2 - lon1 ) c2 = np . cos ( lat2 ) c1 = np . cos ( lat1 ) s2 = np . sin ( lat2 ) s1 = np . sin ( lat1 ) a = np . sqrt ( ( c2 * sd ) ** 2 + ( c1 * s2 - s1 * c2 * cd ) ** 2 ) b = s1 * s2 + c1 * c2 * cd return np . arctan2 ( a , b ) | Calculate the distance between two locations on a sphere . |
62,375 | def sphbear ( lat1 , lon1 , lat2 , lon2 , tol = 1e-15 ) : ocross = lambda a , b : np . cross ( a , b , axisa = 0 , axisb = 0 , axisc = 0 ) v1 = np . asarray ( [ np . cos ( lat1 ) * np . cos ( lon1 ) , np . cos ( lat1 ) * np . sin ( lon1 ) , np . sin ( lat1 ) ] ) v2 = np . asarray ( [ np . cos ( lat2 ) * np . cos ( lon2 ) , np . cos ( lat2 ) * np . sin ( lon2 ) , np . sin ( lat2 ) ] ) is_bad = ( v1 [ 0 ] ** 2 + v1 [ 1 ] ** 2 ) < tol p12 = ocross ( v1 , v2 ) p1z = np . asarray ( [ v1 [ 1 ] , - v1 [ 0 ] , np . zeros_like ( lat1 ) ] ) cm = np . sqrt ( ( ocross ( p12 , p1z ) ** 2 ) . sum ( axis = 0 ) ) bearing = np . arctan2 ( cm , np . sum ( p12 * p1z , axis = 0 ) ) bearing = np . where ( p12 [ 2 ] < 0 , - bearing , bearing ) bearing = np . where ( np . abs ( bearing ) < tol , 0 , bearing ) bearing [ np . where ( is_bad ) ] = np . nan return bearing | Calculate the bearing between two locations on a sphere . |
62,376 | def sphofs ( lat1 , lon1 , r , pa , tol = 1e-2 , rmax = None ) : if rmax is not None and np . abs ( r ) > rmax : raise ValueError ( 'sphofs radius value %f is too big for ' 'our approximation' % r ) lat2 = lat1 + r * np . cos ( pa ) lon2 = lon1 + r * np . sin ( pa ) / np . cos ( lat2 ) if tol is not None : s = sphdist ( lat1 , lon1 , lat2 , lon2 ) if np . any ( np . abs ( ( s - r ) / s ) > tol ) : raise ValueError ( 'sphofs approximation broke down ' '(%s %s %s %s %s %s %s)' % ( lat1 , lon1 , lat2 , lon2 , r , s , pa ) ) return lat2 , lon2 | Offset from one location on the sphere to another . |
62,377 | def parang ( hourangle , declination , latitude ) : return - np . arctan2 ( - np . sin ( hourangle ) , np . cos ( declination ) * np . tan ( latitude ) - np . sin ( declination ) * np . cos ( hourangle ) ) | Calculate the parallactic angle of a sky position . |
62,378 | def gaussian_convolve ( maj1 , min1 , pa1 , maj2 , min2 , pa2 ) : c1 = np . cos ( pa1 ) s1 = np . sin ( pa1 ) c2 = np . cos ( pa2 ) s2 = np . sin ( pa2 ) a = ( maj1 * c1 ) ** 2 + ( min1 * s1 ) ** 2 + ( maj2 * c2 ) ** 2 + ( min2 * s2 ) ** 2 b = ( maj1 * s1 ) ** 2 + ( min1 * c1 ) ** 2 + ( maj2 * s2 ) ** 2 + ( min2 * c2 ) ** 2 g = 2 * ( ( min1 ** 2 - maj1 ** 2 ) * s1 * c1 + ( min2 ** 2 - maj2 ** 2 ) * s2 * c2 ) s = a + b t = np . sqrt ( ( a - b ) ** 2 + g ** 2 ) maj3 = np . sqrt ( 0.5 * ( s + t ) ) min3 = np . sqrt ( 0.5 * ( s - t ) ) if abs ( g ) + abs ( a - b ) == 0 : pa3 = 0. else : pa3 = 0.5 * np . arctan2 ( - g , a - b ) return maj3 , min3 , pa3 | Convolve two Gaussians analytically . |
62,379 | def gaussian_deconvolve ( smaj , smin , spa , bmaj , bmin , bpa ) : from numpy import cos , sin , sqrt , min , abs , arctan2 if smaj < bmaj : smaj = bmaj if smin < bmin : smin = bmin alpha = ( ( smaj * cos ( spa ) ) ** 2 + ( smin * sin ( spa ) ) ** 2 - ( bmaj * cos ( bpa ) ) ** 2 - ( bmin * sin ( bpa ) ) ** 2 ) beta = ( ( smaj * sin ( spa ) ) ** 2 + ( smin * cos ( spa ) ) ** 2 - ( bmaj * sin ( bpa ) ) ** 2 - ( bmin * cos ( bpa ) ) ** 2 ) gamma = 2 * ( ( smin ** 2 - smaj ** 2 ) * sin ( spa ) * cos ( spa ) - ( bmin ** 2 - bmaj ** 2 ) * sin ( bpa ) * cos ( bpa ) ) s = alpha + beta t = sqrt ( ( alpha - beta ) ** 2 + gamma ** 2 ) limit = 0.5 * min ( [ smaj , smin , bmaj , bmin ] ) ** 2 status = 'ok' if alpha < 0 or beta < 0 or s < t : dmaj = dmin = dpa = 0 if 0.5 * ( s - t ) < limit and alpha > - limit and beta > - limit : status = 'pointlike' else : status = 'fail' else : dmaj = sqrt ( 0.5 * ( s + t ) ) dmin = sqrt ( 0.5 * ( s - t ) ) if abs ( gamma ) + abs ( alpha - beta ) == 0 : dpa = 0 else : dpa = 0.5 * arctan2 ( - gamma , alpha - beta ) return dmaj , dmin , dpa , status | Deconvolve two Gaussians analytically . |
62,380 | def load_skyfield_data ( ) : import os . path from astropy . config import paths from skyfield . api import Loader cache_dir = os . path . join ( paths . get_cache_dir ( ) , 'pwkit' ) loader = Loader ( cache_dir ) planets = loader ( 'de421.bsp' ) ts = loader . timescale ( ) return planets , ts | Load data files used in Skyfield . This will download files from the internet if they haven t been downloaded before . |
62,381 | def get_2mass_epoch ( tmra , tmdec , debug = False ) : import codecs try : from urllib . request import urlopen except ImportError : from urllib2 import urlopen postdata = b % ( tmra * R2D , tmdec * R2D ) jd = None for line in codecs . getreader ( 'utf-8' ) ( urlopen ( _vizurl , postdata ) ) : line = line . strip ( ) if debug : print_ ( 'D: 2M >>' , line ) if line . startswith ( '1;' ) : jd = float ( line [ 2 : ] ) if jd is None : import sys print_ ( 'warning: 2MASS epoch lookup failed; astrometry could be very wrong!' , file = sys . stderr ) return J2000 return jd - 2400000.5 | Given a 2MASS position look up the epoch when it was observed . |
62,382 | def verify ( self , complain = True ) : import sys if self . ra is None : raise ValueError ( 'AstrometryInfo missing "ra"' ) if self . dec is None : raise ValueError ( 'AstrometryInfo missing "dec"' ) if self . _partial_info ( self . promo_ra , self . promo_dec ) : raise ValueError ( 'partial proper-motion info in AstrometryInfo' ) if self . _partial_info ( self . pos_u_maj , self . pos_u_min , self . pos_u_pa ) : raise ValueError ( 'partial positional uncertainty info in AstrometryInfo' ) if self . _partial_info ( self . promo_u_maj , self . promo_u_min , self . promo_u_pa ) : raise ValueError ( 'partial proper-motion uncertainty info in AstrometryInfo' ) if self . pos_u_maj is None : if complain : print_ ( 'AstrometryInfo: no positional uncertainty info' , file = sys . stderr ) elif self . pos_u_maj < self . pos_u_min : if complain : print_ ( 'AstrometryInfo: swapped positional uncertainty ' 'major/minor axes' , file = sys . stderr ) self . pos_u_maj , self . pos_u_min = self . pos_u_min , self . pos_u_maj self . pos_u_pa += 0.5 * np . pi if self . pos_epoch is None : if complain : print_ ( 'AstrometryInfo: assuming epoch of position is J2000.0' , file = sys . stderr ) if self . promo_ra is None : if complain : print_ ( 'AstrometryInfo: assuming zero proper motion' , file = sys . stderr ) elif self . promo_u_maj is None : if complain : print_ ( 'AstrometryInfo: no uncertainty on proper motion' , file = sys . stderr ) elif self . promo_u_maj < self . promo_u_min : if complain : print_ ( 'AstrometryInfo: swapped proper motion uncertainty ' 'major/minor axes' , file = sys . stderr ) self . promo_u_maj , self . promo_u_min = self . promo_u_min , self . promo_u_maj self . promo_u_pa += 0.5 * np . pi if self . parallax is None : if complain : print_ ( 'AstrometryInfo: assuming zero parallax' , file = sys . stderr ) else : if self . parallax < 0. : raise ValueError ( 'negative parallax in AstrometryInfo' ) if self . u_parallax is None : if complain : print_ ( 'AstrometryInfo: no uncertainty on parallax' , file = sys . stderr ) if self . vradial is None : pass elif self . u_vradial is None : if complain : print_ ( 'AstrometryInfo: no uncertainty on v_radial' , file = sys . stderr ) return self | Validate that the attributes are self - consistent . |
62,383 | def fill_from_simbad ( self , ident , debug = False ) : info = get_simbad_astrometry_info ( ident , debug = debug ) posref = 'unknown' for k , v in six . iteritems ( info ) : if '~' in v : continue if k == 'COO(d;A)' : self . ra = float ( v ) * D2R elif k == 'COO(d;D)' : self . dec = float ( v ) * D2R elif k == 'COO(E)' : a = v . split ( ) self . pos_u_maj = float ( a [ 0 ] ) * A2R * 1e-3 self . pos_u_min = float ( a [ 1 ] ) * A2R * 1e-3 self . pos_u_pa = float ( a [ 2 ] ) * D2R elif k == 'COO(B)' : posref = v elif k == 'PM(A)' : self . promo_ra = float ( v ) elif k == 'PM(D)' : self . promo_dec = float ( v ) elif k == 'PM(E)' : a = v . split ( ) self . promo_u_maj = float ( a [ 0 ] ) self . promo_u_min = float ( a [ 1 ] ) self . promo_u_pa = float ( a [ 2 ] ) * D2R elif k == 'PLX(V)' : self . parallax = float ( v ) elif k == 'PLX(E)' : self . u_parallax = float ( v ) elif k == 'RV(V)' : self . vradial = float ( v ) elif k == 'RV(E)' : self . u_vradial = float ( v ) if self . ra is None : raise Exception ( 'no position returned by Simbad for "%s"' % ident ) if self . u_parallax == 0 : self . u_parallax = None if self . u_vradial == 0 : self . u_vradial = None if posref == '2003yCat.2246....0C' : self . pos_epoch = get_2mass_epoch ( self . ra , self . dec , debug ) return self | Fill in astrometric information using the Simbad web service . |
62,384 | def fill_from_allwise ( self , ident , catalog_ident = 'II/328/allwise' ) : from astroquery . vizier import Vizier import numpy . ma . core as ma_core table_list = Vizier . query_constraints ( catalog = catalog_ident , AllWISE = ident ) if not len ( table_list ) : raise PKError ( 'Vizier query returned no tables (catalog=%r AllWISE=%r)' , catalog_ident , ident ) table = table_list [ 0 ] if not len ( table ) : raise PKError ( 'Vizier query returned empty %s table (catalog=%r AllWISE=%r)' , table . meta [ 'name' ] , catalog_ident , ident ) row = table [ 0 ] if isinstance ( row [ '_RAJ2000' ] , ma_core . MaskedConstant ) : raise PKError ( 'Vizier query returned flagged row in %s table; your AllWISE ' 'identifier likely does not exist (it should be of the form ' '"J112254.70+255021.9"; catalog=%r AllWISE=%r)' , table . meta [ 'name' ] , catalog_ident , ident ) self . ra = row [ 'RA_pm' ] * D2R self . dec = row [ 'DE_pm' ] * D2R if row [ 'e_RA_pm' ] > row [ 'e_DE_pm' ] : self . pos_u_maj = row [ 'e_RA_pm' ] * A2R self . pos_u_min = row [ 'e_DE_pm' ] * A2R self . pos_u_pa = halfpi else : self . pos_u_maj = row [ 'e_DE_pm' ] * A2R self . pos_u_min = row [ 'e_RA_pm' ] * A2R self . pos_u_pa = 0 self . pos_epoch = 55400. self . promo_ra = row [ 'pmRA' ] self . promo_dec = row [ 'pmDE' ] if row [ 'e_pmRA' ] > row [ 'e_pmDE' ] : self . promo_u_maj = row [ 'e_pmRA' ] * 1. self . promo_u_min = row [ 'e_pmDE' ] * 1. self . promo_u_pa = halfpi else : self . promo_u_maj = row [ 'e_pmDE' ] * 1. self . promo_u_min = row [ 'e_pmRA' ] * 1. self . promo_u_pa = 0. return self | Fill in astrometric information from the AllWISE catalog using Astroquery . |
62,385 | def backtrace_on_usr1 ( ) : import signal try : signal . signal ( signal . SIGUSR1 , _print_backtrace_signal_handler ) except Exception as e : warn ( 'failed to set up Python backtraces on SIGUSR1: %s' , e ) | Install a signal handler such that this program prints a Python traceback upon receipt of SIGUSR1 . This could be useful for checking that long - running programs are behaving properly or for discovering where an infinite loop is occurring . |
62,386 | def fork_detached_process ( ) : import os , struct from . . import Holder payload = struct . Struct ( 'L' ) info = Holder ( ) readfd , writefd = os . pipe ( ) pid1 = os . fork ( ) if pid1 > 0 : info . whoami = 'original' info . pipe = os . fdopen ( readfd , 'rb' ) os . close ( writefd ) retcode = os . waitpid ( pid1 , 0 ) [ 1 ] if retcode : raise Exception ( 'child process exited with error code %d' % retcode ) ( info . forkedpid , ) = payload . unpack ( info . pipe . read ( payload . size ) ) else : os . setsid ( ) pid2 = os . fork ( ) if pid2 > 0 : os . _exit ( 0 ) info . whoami = 'forked' info . pipe = os . fdopen ( writefd , 'wb' ) os . close ( readfd ) info . forkedpid = os . getpid ( ) info . pipe . write ( payload . pack ( info . forkedpid ) ) return info | Fork this process creating a subprocess detached from the current context . |
62,387 | def pop_option ( ident , argv = None ) : if argv is None : from sys import argv if len ( ident ) == 1 : ident = '-' + ident else : ident = '--' + ident found = ident in argv if found : argv . remove ( ident ) return found | A lame routine for grabbing command - line arguments . Returns a boolean indicating whether the option was present . If it was it s removed from the argument string . Because of the lame behavior options can t be combined and non - boolean options aren t supported . Operates on sys . argv by default . |
62,388 | def show_usage ( docstring , short , stream , exitcode ) : if stream is None : from sys import stdout as stream if not short : print ( 'Usage:' , docstring . strip ( ) , file = stream ) else : intext = False for l in docstring . splitlines ( ) : if intext : if not len ( l ) : break print ( l , file = stream ) elif len ( l ) : intext = True print ( 'Usage:' , l , file = stream ) print ( '\nRun with a sole argument --help for more detailed ' 'usage information.' , file = stream ) raise SystemExit ( exitcode ) | Print program usage information and exit . |
62,389 | def wrong_usage ( docstring , * rest ) : intext = False if len ( rest ) == 0 : detail = 'invalid command-line arguments' elif len ( rest ) == 1 : detail = rest [ 0 ] else : detail = rest [ 0 ] % tuple ( rest [ 1 : ] ) print ( 'error:' , detail , '\n' , file = sys . stderr ) show_usage ( docstring , True , sys . stderr , 1 ) | Print a message indicating invalid command - line arguments and exit with an error code . |
62,390 | def excepthook ( self , etype , evalue , etb ) : self . inner_excepthook ( etype , evalue , etb ) if issubclass ( etype , KeyboardInterrupt ) : signal . signal ( signal . SIGINT , signal . SIG_DFL ) os . kill ( os . getpid ( ) , signal . SIGINT ) | Handle an uncaught exception . We always forward the exception on to whatever sys . excepthook was present upon setup . However if the exception is a KeyboardInterrupt we additionally kill ourselves with an uncaught SIGINT so that invoking programs know what happened . |
62,391 | def calc_nu_b ( b ) : return cgs . e * b / ( 2 * cgs . pi * cgs . me * cgs . c ) | Calculate the cyclotron frequency in Hz given a magnetic field strength in Gauss . |
62,392 | def calc_freefree_snu_ujy ( ne , t , width , elongation , dist , ghz ) : hz = ghz * 1e9 eta = calc_freefree_eta ( ne , t , hz ) kappa = calc_freefree_kappa ( ne , t , hz ) snu = calc_snu ( eta , kappa , width , elongation , dist ) ujy = snu * cgs . jypercgs * 1e6 return ujy | Calculate a flux density from pure free - free emission . |
62,393 | def concat ( invises , outvis , timesort = False ) : tb = util . tools . table ( ) ms = util . tools . ms ( ) if os . path . exists ( outvis ) : raise RuntimeError ( 'output "%s" already exists' % outvis ) for invis in invises : if not os . path . isdir ( invis ) : raise RuntimeError ( 'input "%s" does not exist' % invis ) tb . open ( b ( invises [ 0 ] ) ) tb . copy ( b ( outvis ) , deep = True , valuecopy = True ) tb . close ( ) ms . open ( b ( outvis ) , nomodify = False ) for invis in invises [ 1 : ] : ms . concatenate ( msfile = b ( invis ) , freqtol = b ( concat_freqtol ) , dirtol = b ( concat_dirtol ) ) ms . writehistory ( message = b'taskname=tasklib.concat' , origin = b'tasklib.concat' ) ms . writehistory ( message = b ( 'vis = ' + ', ' . join ( invises ) ) , origin = b'tasklib.concat' ) ms . writehistory ( message = b ( 'timesort = ' + 'FT' [ int ( timesort ) ] ) , origin = b'tasklib.concat' ) if timesort : ms . timesort ( ) ms . close ( ) | Concatenate visibility measurement sets . |
62,394 | def delcal ( mspath ) : wantremove = 'MODEL_DATA CORRECTED_DATA' . split ( ) tb = util . tools . table ( ) tb . open ( b ( mspath ) , nomodify = False ) cols = frozenset ( tb . colnames ( ) ) toremove = [ b ( c ) for c in wantremove if c in cols ] if len ( toremove ) : tb . removecols ( toremove ) tb . close ( ) if six . PY2 : return toremove else : return [ c . decode ( 'utf8' ) for c in toremove ] | Delete the MODEL_DATA and CORRECTED_DATA columns from a measurement set . |
62,395 | def delmod_cli ( argv , alter_logger = True ) : check_usage ( delmod_doc , argv , usageifnoargs = True ) if alter_logger : util . logger ( ) cb = util . tools . calibrater ( ) for mspath in argv [ 1 : ] : cb . open ( b ( mspath ) , addcorr = False , addmodel = False ) cb . delmod ( otf = True , scr = False ) cb . close ( ) | Command - line access to delmod functionality . |
62,396 | def extractbpflags ( calpath , deststream ) : tb = util . tools . table ( ) tb . open ( b ( os . path . join ( calpath , 'ANTENNA' ) ) ) antnames = tb . getcol ( b'NAME' ) tb . close ( ) tb . open ( b ( calpath ) ) try : t = tb . getkeyword ( b'VisCal' ) except RuntimeError : raise PKError ( 'no "VisCal" keyword in %s; it doesn\'t seem to be a ' 'bandpass calibration table' , calpath ) if t != 'B Jones' : raise PKError ( 'table %s doesn\'t seem to be a bandpass calibration ' 'table; its type is "%s"' , calpath , t ) def emit ( antidx , spwidx , chanstart , chanend ) : print ( "antenna='%s&*' spw='%d:%d~%d' reason='BANDPASS_FLAGGED'" % ( antnames [ antidx ] , spwidx , chanstart , chanend ) , file = deststream ) for row in range ( tb . nrows ( ) ) : ant = tb . getcell ( b'ANTENNA1' , row ) spw = tb . getcell ( b'SPECTRAL_WINDOW_ID' , row ) flag = tb . getcell ( b'FLAG' , row ) sqflag = ~ ( ( ~ flag ) . prod ( axis = 0 , dtype = np . bool ) ) runstart = None for i in range ( sqflag . size ) : if sqflag [ i ] : if runstart is None : runstart = i elif runstart is not None : emit ( ant , spw , runstart , i - 1 ) runstart = None if runstart is not None : emit ( ant , spw , runstart , i ) tb . close ( ) | Make a flags file out of a bandpass calibration table |
62,397 | def flagmanager_cli ( argv , alter_logger = True ) : check_usage ( flagmanager_doc , argv , usageifnoargs = True ) if len ( argv ) < 3 : wrong_usage ( flagmanager_doc , 'expect at least a mode and an MS name' ) mode = argv [ 1 ] ms = argv [ 2 ] if alter_logger : if mode == 'list' : util . logger ( 'info' ) elif mode == 'delete' : util . logger ( 'severe' ) else : util . logger ( ) try : factory = util . tools . agentflagger except AttributeError : factory = util . tools . testflagger af = factory ( ) af . open ( b ( ms ) ) if mode == 'list' : if len ( argv ) != 3 : wrong_usage ( flagmanager_doc , 'expect exactly one argument in list mode' ) af . getflagversionlist ( ) elif mode == 'save' : if len ( argv ) != 4 : wrong_usage ( flagmanager_doc , 'expect exactly two arguments in save mode' ) from time import strftime name = argv [ 3 ] af . saveflagversion ( versionname = b ( name ) , merge = b'replace' , comment = b ( 'created %s(casatask flagmanager)' % strftime ( '%Y-%m-%dT%H:%M:%SZ' ) ) ) elif mode == 'restore' : if len ( argv ) != 4 : wrong_usage ( flagmanager_doc , 'expect exactly two arguments in restore mode' ) name = argv [ 3 ] af . restoreflagversion ( versionname = b ( name ) , merge = b'replace' ) elif mode == 'delete' : if len ( argv ) != 4 : wrong_usage ( flagmanager_doc , 'expect exactly two arguments in delete mode' ) name = argv [ 3 ] if not os . path . isdir ( os . path . join ( ms + '.flagversions' , 'flags.' + name ) ) : raise RuntimeError ( 'version "%s" doesn\'t exist in "%s.flagversions"' % ( name , ms ) ) af . deleteflagversion ( versionname = b ( name ) ) else : wrong_usage ( flagmanager_doc , 'unknown flagmanager mode "%s"' % mode ) af . done ( ) | Command - line access to flagmanager functionality . |
62,398 | def image2fits ( mspath , fitspath , velocity = False , optical = False , bitpix = - 32 , minpix = 0 , maxpix = - 1 , overwrite = False , dropstokes = False , stokeslast = True , history = True , ** kwargs ) : ia = util . tools . image ( ) ia . open ( b ( mspath ) ) ia . tofits ( outfile = b ( fitspath ) , velocity = velocity , optical = optical , bitpix = bitpix , minpix = minpix , maxpix = maxpix , overwrite = overwrite , dropstokes = dropstokes , stokeslast = stokeslast , history = history , ** kwargs ) ia . close ( ) | Convert an image in MS format to FITS format . |
62,399 | def importalma ( asdm , ms ) : from . scripting import CasapyScript script = os . path . join ( os . path . dirname ( __file__ ) , 'cscript_importalma.py' ) with CasapyScript ( script , asdm = asdm , ms = ms ) as cs : pass | Convert an ALMA low - level ASDM dataset to Measurement Set format . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.