idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
54,600 | def _moments_central ( data , center = None , order = 1 ) : data = np . asarray ( data ) . astype ( float ) if data . ndim != 2 : raise ValueError ( 'data must be a 2D array.' ) if center is None : from . . centroids import centroid_com center = centroid_com ( data ) indices = np . ogrid [ [ slice ( 0 , i ) for i in data . shape ] ] ypowers = ( indices [ 0 ] - center [ 1 ] ) ** np . arange ( order + 1 ) xpowers = np . transpose ( indices [ 1 ] - center [ 0 ] ) ** np . arange ( order + 1 ) return np . dot ( np . dot ( np . transpose ( ypowers ) , data ) , xpowers ) | Calculate the central image moments up to the specified order . |
54,601 | def first_and_second_harmonic_function ( phi , c ) : return ( c [ 0 ] + c [ 1 ] * np . sin ( phi ) + c [ 2 ] * np . cos ( phi ) + c [ 3 ] * np . sin ( 2 * phi ) + c [ 4 ] * np . cos ( 2 * phi ) ) | Compute the harmonic function value used to calculate the corrections for ellipse fitting . |
54,602 | def _radial_distance ( shape ) : if len ( shape ) != 2 : raise ValueError ( 'shape must have only 2 elements' ) position = ( np . asarray ( shape ) - 1 ) / 2. x = np . arange ( shape [ 1 ] ) - position [ 1 ] y = np . arange ( shape [ 0 ] ) - position [ 0 ] xx , yy = np . meshgrid ( x , y ) return np . sqrt ( xx ** 2 + yy ** 2 ) | Return an array where each value is the Euclidean distance from the array center . |
54,603 | def load_spitzer_image ( show_progress = False ) : path = get_path ( 'spitzer_example_image.fits' , location = 'remote' , show_progress = show_progress ) hdu = fits . open ( path ) [ 0 ] return hdu | Load a 4 . 5 micron Spitzer image . |
54,604 | def load_spitzer_catalog ( show_progress = False ) : path = get_path ( 'spitzer_example_catalog.xml' , location = 'remote' , show_progress = show_progress ) table = Table . read ( path ) return table | Load a 4 . 5 micron Spitzer catalog . |
54,605 | def load_irac_psf ( channel , show_progress = False ) : channel = int ( channel ) if channel < 1 or channel > 4 : raise ValueError ( 'channel must be 1, 2, 3, or 4' ) fn = 'irac_ch{0}_flight.fits' . format ( channel ) path = get_path ( fn , location = 'remote' , show_progress = show_progress ) hdu = fits . open ( path ) [ 0 ] return hdu | Load a Spitzer IRAC PSF image . |
54,606 | def fit_image ( self , sma0 = None , minsma = 0. , maxsma = None , step = 0.1 , conver = DEFAULT_CONVERGENCE , minit = DEFAULT_MINIT , maxit = DEFAULT_MAXIT , fflag = DEFAULT_FFLAG , maxgerr = DEFAULT_MAXGERR , sclip = 3. , nclip = 0 , integrmode = BILINEAR , linear = False , maxrit = None ) : isophote_list = [ ] if not sma0 : if self . _geometry : sma = self . _geometry . sma else : sma = 10. else : sma = sma0 noiter = False first_isophote = True while True : minit_a = 2 * minit if first_isophote else minit first_isophote = False isophote = self . fit_isophote ( sma , step , conver , minit_a , maxit , fflag , maxgerr , sclip , nclip , integrmode , linear , maxrit , noniterate = noiter , isophote_list = isophote_list ) if ( isophote . stop_code < 0 or isophote . stop_code == 1 ) : if len ( isophote_list ) == 1 : warnings . warn ( 'No meaningful fit was possible.' , AstropyUserWarning ) return IsophoteList ( [ ] ) self . _fix_last_isophote ( isophote_list , - 1 ) isophote = isophote_list [ - 1 ] if len ( isophote_list ) > 2 : if ( ( isophote . stop_code == 5 and isophote_list [ - 2 ] . stop_code == 5 ) or isophote . stop_code == 1 ) : if maxsma and maxsma > isophote . sma : noiter = True else : break isophote = isophote_list [ - 1 ] sma = isophote . sample . geometry . update_sma ( step ) if maxsma and sma >= maxsma : break first_isophote = isophote_list [ 0 ] sma , step = first_isophote . sample . geometry . reset_sma ( step ) while True : isophote = self . fit_isophote ( sma , step , conver , minit , maxit , fflag , maxgerr , sclip , nclip , integrmode , linear , maxrit , going_inwards = True , isophote_list = isophote_list ) if isophote . stop_code < 0 : self . _fix_last_isophote ( isophote_list , 0 ) isophote = isophote_list [ - 1 ] sma = isophote . sample . geometry . update_sma ( step ) if sma <= max ( minsma , 0.5 ) : break if minsma == 0.0 : isophote = self . fit_isophote ( 0.0 , isophote_list = isophote_list ) isophote_list . sort ( ) return IsophoteList ( isophote_list ) | Fit multiple isophotes to the image array . |
54,607 | def fit_isophote ( self , sma , step = 0.1 , conver = DEFAULT_CONVERGENCE , minit = DEFAULT_MINIT , maxit = DEFAULT_MAXIT , fflag = DEFAULT_FFLAG , maxgerr = DEFAULT_MAXGERR , sclip = 3. , nclip = 0 , integrmode = BILINEAR , linear = False , maxrit = None , noniterate = False , going_inwards = False , isophote_list = None ) : geometry = self . _geometry if isophote_list is not None and len ( isophote_list ) > 0 : geometry = isophote_list [ - 1 ] . sample . geometry if noniterate or ( maxrit and sma > maxrit ) : isophote = self . _non_iterative ( sma , step , linear , geometry , sclip , nclip , integrmode ) else : isophote = self . _iterative ( sma , step , linear , geometry , sclip , nclip , integrmode , conver , minit , maxit , fflag , maxgerr , going_inwards ) if isophote_list is not None and isophote . valid : isophote_list . append ( isophote ) return isophote | Fit a single isophote with a given semimajor axis length . |
54,608 | def to_sky ( self , wcs , mode = 'all' ) : sky_params = self . _to_sky_params ( wcs , mode = mode ) return SkyCircularAperture ( ** sky_params ) | Convert the aperture to a SkyCircularAperture object defined in celestial coordinates . |
54,609 | def to_sky ( self , wcs , mode = 'all' ) : sky_params = self . _to_sky_params ( wcs , mode = mode ) return SkyCircularAnnulus ( ** sky_params ) | Convert the aperture to a SkyCircularAnnulus object defined in celestial coordinates . |
54,610 | def to_pixel ( self , wcs , mode = 'all' ) : pixel_params = self . _to_pixel_params ( wcs , mode = mode ) return CircularAperture ( ** pixel_params ) | Convert the aperture to a CircularAperture object defined in pixel coordinates . |
54,611 | def to_pixel ( self , wcs , mode = 'all' ) : pixel_params = self . _to_pixel_params ( wcs , mode = mode ) return CircularAnnulus ( ** pixel_params ) | Convert the aperture to a CircularAnnulus object defined in pixel coordinates . |
54,612 | def apply_poisson_noise ( data , random_state = None ) : data = np . asanyarray ( data ) if np . any ( data < 0 ) : raise ValueError ( 'data must not contain any negative values' ) prng = check_random_state ( random_state ) return prng . poisson ( data ) | Apply Poisson noise to an array where the value of each element in the input array represents the expected number of counts . |
54,613 | def make_noise_image ( shape , type = 'gaussian' , mean = None , stddev = None , random_state = None ) : if mean is None : raise ValueError ( '"mean" must be input' ) prng = check_random_state ( random_state ) if type == 'gaussian' : if stddev is None : raise ValueError ( '"stddev" must be input for Gaussian noise' ) image = prng . normal ( loc = mean , scale = stddev , size = shape ) elif type == 'poisson' : image = prng . poisson ( lam = mean , size = shape ) else : raise ValueError ( 'Invalid type: {0}. Use one of ' '{"gaussian", "poisson"}.' . format ( type ) ) return image | Make a noise image containing Gaussian or Poisson noise . |
54,614 | def make_random_models_table ( n_sources , param_ranges , random_state = None ) : prng = check_random_state ( random_state ) sources = Table ( ) for param_name , ( lower , upper ) in param_ranges . items ( ) : sources [ param_name ] = prng . uniform ( lower , upper , n_sources ) return sources | Make a ~astropy . table . Table containing randomly generated parameters for an Astropy model to simulate a set of sources . |
54,615 | def make_random_gaussians_table ( n_sources , param_ranges , random_state = None ) : sources = make_random_models_table ( n_sources , param_ranges , random_state = random_state ) if 'flux' in param_ranges and 'amplitude' not in param_ranges : model = Gaussian2D ( x_stddev = 1 , y_stddev = 1 ) if 'x_stddev' in sources . colnames : xstd = sources [ 'x_stddev' ] else : xstd = model . x_stddev . value if 'y_stddev' in sources . colnames : ystd = sources [ 'y_stddev' ] else : ystd = model . y_stddev . value sources = sources . copy ( ) sources [ 'amplitude' ] = sources [ 'flux' ] / ( 2. * np . pi * xstd * ystd ) return sources | Make a ~astropy . table . Table containing randomly generated parameters for 2D Gaussian sources . |
54,616 | def make_model_sources_image ( shape , model , source_table , oversample = 1 ) : image = np . zeros ( shape , dtype = np . float64 ) y , x = np . indices ( shape ) params_to_set = [ ] for param in source_table . colnames : if param in model . param_names : params_to_set . append ( param ) init_params = { param : getattr ( model , param ) for param in params_to_set } try : for i , source in enumerate ( source_table ) : for param in params_to_set : setattr ( model , param , source [ param ] ) if oversample == 1 : image += model ( x , y ) else : image += discretize_model ( model , ( 0 , shape [ 1 ] ) , ( 0 , shape [ 0 ] ) , mode = 'oversample' , factor = oversample ) finally : for param , value in init_params . items ( ) : setattr ( model , param , value ) return image | Make an image containing sources generated from a user - specified model . |
54,617 | def make_4gaussians_image ( noise = True ) : table = Table ( ) table [ 'amplitude' ] = [ 50 , 70 , 150 , 210 ] table [ 'x_mean' ] = [ 160 , 25 , 150 , 90 ] table [ 'y_mean' ] = [ 70 , 40 , 25 , 60 ] table [ 'x_stddev' ] = [ 15.2 , 5.1 , 3. , 8.1 ] table [ 'y_stddev' ] = [ 2.6 , 2.5 , 3. , 4.7 ] table [ 'theta' ] = np . array ( [ 145. , 20. , 0. , 60. ] ) * np . pi / 180. shape = ( 100 , 200 ) data = make_gaussian_sources_image ( shape , table ) + 5. if noise : data += make_noise_image ( shape , type = 'gaussian' , mean = 0. , stddev = 5. , random_state = 12345 ) return data | Make an example image containing four 2D Gaussians plus a constant background . |
54,618 | def make_100gaussians_image ( noise = True ) : n_sources = 100 flux_range = [ 500 , 1000 ] xmean_range = [ 0 , 500 ] ymean_range = [ 0 , 300 ] xstddev_range = [ 1 , 5 ] ystddev_range = [ 1 , 5 ] params = OrderedDict ( [ ( 'flux' , flux_range ) , ( 'x_mean' , xmean_range ) , ( 'y_mean' , ymean_range ) , ( 'x_stddev' , xstddev_range ) , ( 'y_stddev' , ystddev_range ) , ( 'theta' , [ 0 , 2 * np . pi ] ) ] ) sources = make_random_gaussians_table ( n_sources , params , random_state = 12345 ) shape = ( 300 , 500 ) data = make_gaussian_sources_image ( shape , sources ) + 5. if noise : data += make_noise_image ( shape , type = 'gaussian' , mean = 0. , stddev = 2. , random_state = 12345 ) return data | Make an example image containing 100 2D Gaussians plus a constant background . |
54,619 | def make_wcs ( shape , galactic = False ) : wcs = WCS ( naxis = 2 ) rho = np . pi / 3. scale = 0.1 / 3600. if astropy_version < '3.1' : wcs . _naxis1 = shape [ 1 ] wcs . _naxis2 = shape [ 0 ] else : wcs . pixel_shape = shape wcs . wcs . crpix = [ shape [ 1 ] / 2 , shape [ 0 ] / 2 ] wcs . wcs . crval = [ 197.8925 , - 1.36555556 ] wcs . wcs . cunit = [ 'deg' , 'deg' ] wcs . wcs . cd = [ [ - scale * np . cos ( rho ) , scale * np . sin ( rho ) ] , [ scale * np . sin ( rho ) , scale * np . cos ( rho ) ] ] if not galactic : wcs . wcs . radesys = 'ICRS' wcs . wcs . ctype = [ 'RA---TAN' , 'DEC--TAN' ] else : wcs . wcs . ctype = [ 'GLON-CAR' , 'GLAT-CAR' ] return wcs | Create a simple celestial WCS object in either the ICRS or Galactic coordinate frame . |
54,620 | def make_imagehdu ( data , wcs = None ) : data = np . asanyarray ( data ) if data . ndim != 2 : raise ValueError ( 'data must be a 2D array' ) if wcs is not None : header = wcs . to_header ( ) else : header = None return fits . ImageHDU ( data , header = header ) | Create a FITS ~astropy . io . fits . ImageHDU containing the input 2D image . |
54,621 | def centroid_com ( data , mask = None ) : data = data . astype ( np . float ) if mask is not None and mask is not np . ma . nomask : mask = np . asarray ( mask , dtype = bool ) if data . shape != mask . shape : raise ValueError ( 'data and mask must have the same shape.' ) data [ mask ] = 0. badidx = ~ np . isfinite ( data ) if np . any ( badidx ) : warnings . warn ( 'Input data contains input values (e.g. NaNs or infs), ' 'which were automatically masked.' , AstropyUserWarning ) data [ badidx ] = 0. total = np . sum ( data ) indices = np . ogrid [ [ slice ( 0 , i ) for i in data . shape ] ] return np . array ( [ np . sum ( indices [ axis ] * data ) / total for axis in range ( data . ndim ) ] ) [ : : - 1 ] | Calculate the centroid of an n - dimensional array as its center of mass determined from moments . |
54,622 | def gaussian1d_moments ( data , mask = None ) : if np . any ( ~ np . isfinite ( data ) ) : data = np . ma . masked_invalid ( data ) warnings . warn ( 'Input data contains input values (e.g. NaNs or infs), ' 'which were automatically masked.' , AstropyUserWarning ) else : data = np . ma . array ( data ) if mask is not None and mask is not np . ma . nomask : mask = np . asanyarray ( mask ) if data . shape != mask . shape : raise ValueError ( 'data and mask must have the same shape.' ) data . mask |= mask data . fill_value = 0. data = data . filled ( ) x = np . arange ( data . size ) x_mean = np . sum ( x * data ) / np . sum ( data ) x_stddev = np . sqrt ( abs ( np . sum ( data * ( x - x_mean ) ** 2 ) / np . sum ( data ) ) ) amplitude = np . ptp ( data ) return amplitude , x_mean , x_stddev | Estimate 1D Gaussian parameters from the moments of 1D data . |
54,623 | def fit_2dgaussian ( data , error = None , mask = None ) : from . . morphology import data_properties data = np . ma . asanyarray ( data ) if mask is not None and mask is not np . ma . nomask : mask = np . asanyarray ( mask ) if data . shape != mask . shape : raise ValueError ( 'data and mask must have the same shape.' ) data . mask |= mask if np . any ( ~ np . isfinite ( data ) ) : data = np . ma . masked_invalid ( data ) warnings . warn ( 'Input data contains input values (e.g. NaNs or infs), ' 'which were automatically masked.' , AstropyUserWarning ) if error is not None : error = np . ma . masked_invalid ( error ) if data . shape != error . shape : raise ValueError ( 'data and error must have the same shape.' ) data . mask |= error . mask weights = 1.0 / error . clip ( min = 1.e-30 ) else : weights = np . ones ( data . shape ) if np . ma . count ( data ) < 7 : raise ValueError ( 'Input data must have a least 7 unmasked values to ' 'fit a 2D Gaussian plus a constant.' ) if data . mask is not np . ma . nomask : weights [ data . mask ] = 0. mask = data . mask data . fill_value = 0.0 data = data . filled ( ) props = data_properties ( data - np . min ( data ) , mask = mask ) init_const = 0. init_amplitude = np . ptp ( data ) g_init = GaussianConst2D ( constant = init_const , amplitude = init_amplitude , x_mean = props . xcentroid . value , y_mean = props . ycentroid . value , x_stddev = props . semimajor_axis_sigma . value , y_stddev = props . semiminor_axis_sigma . value , theta = props . orientation . value ) fitter = LevMarLSQFitter ( ) y , x = np . indices ( data . shape ) gfit = fitter ( g_init , x , y , data , weights = weights ) return gfit | Fit a 2D Gaussian plus a constant to a 2D image . |
54,624 | def centroid_1dg ( data , error = None , mask = None ) : data = np . ma . asanyarray ( data ) if mask is not None and mask is not np . ma . nomask : mask = np . asanyarray ( mask ) if data . shape != mask . shape : raise ValueError ( 'data and mask must have the same shape.' ) data . mask |= mask if np . any ( ~ np . isfinite ( data ) ) : data = np . ma . masked_invalid ( data ) warnings . warn ( 'Input data contains input values (e.g. NaNs or infs), ' 'which were automatically masked.' , AstropyUserWarning ) if error is not None : error = np . ma . masked_invalid ( error ) if data . shape != error . shape : raise ValueError ( 'data and error must have the same shape.' ) data . mask |= error . mask error . mask = data . mask xy_error = np . array ( [ np . sqrt ( np . ma . sum ( error ** 2 , axis = i ) ) for i in [ 0 , 1 ] ] ) xy_weights = [ ( 1.0 / xy_error [ i ] . clip ( min = 1.e-30 ) ) for i in [ 0 , 1 ] ] else : xy_weights = [ np . ones ( data . shape [ i ] ) for i in [ 1 , 0 ] ] if data . mask is not np . ma . nomask : bad_idx = [ np . all ( data . mask , axis = i ) for i in [ 0 , 1 ] ] for i in [ 0 , 1 ] : xy_weights [ i ] [ bad_idx [ i ] ] = 0. xy_data = np . array ( [ np . ma . sum ( data , axis = i ) for i in [ 0 , 1 ] ] ) constant_init = np . ma . min ( data ) centroid = [ ] for ( data_i , weights_i ) in zip ( xy_data , xy_weights ) : params_init = gaussian1d_moments ( data_i ) g_init = Const1D ( constant_init ) + Gaussian1D ( * params_init ) fitter = LevMarLSQFitter ( ) x = np . arange ( data_i . size ) g_fit = fitter ( g_init , x , data_i , weights = weights_i ) centroid . append ( g_fit . mean_1 . value ) return np . array ( centroid ) | Calculate the centroid of a 2D array by fitting 1D Gaussians to the marginal x and y distributions of the array . |
54,625 | def centroid_sources ( data , xpos , ypos , box_size = 11 , footprint = None , error = None , mask = None , centroid_func = centroid_com ) : xpos = np . atleast_1d ( xpos ) ypos = np . atleast_1d ( ypos ) if xpos . ndim != 1 : raise ValueError ( 'xpos must be a 1D array.' ) if ypos . ndim != 1 : raise ValueError ( 'ypos must be a 1D array.' ) if footprint is None : if box_size is None : raise ValueError ( 'box_size or footprint must be defined.' ) else : box_size = np . atleast_1d ( box_size ) if len ( box_size ) == 1 : box_size = np . repeat ( box_size , 2 ) if len ( box_size ) != 2 : raise ValueError ( 'box_size must have 1 or 2 elements.' ) footprint = np . ones ( box_size , dtype = bool ) else : footprint = np . asanyarray ( footprint , dtype = bool ) if footprint . ndim != 2 : raise ValueError ( 'footprint must be a 2D array.' ) use_error = False spec = inspect . getfullargspec ( centroid_func ) if 'mask' not in spec . args : raise ValueError ( 'The input "centroid_func" must have a "mask" ' 'keyword.' ) if 'error' in spec . args : use_error = True xcentroids = [ ] ycentroids = [ ] for xp , yp in zip ( xpos , ypos ) : slices_large , slices_small = overlap_slices ( data . shape , footprint . shape , ( yp , xp ) ) data_cutout = data [ slices_large ] mask_cutout = None if mask is not None : mask_cutout = mask [ slices_large ] footprint_mask = ~ footprint footprint_mask = footprint_mask [ slices_small ] if mask_cutout is None : mask_cutout = footprint_mask else : mask_cutout = np . logical_or ( mask_cutout , footprint_mask ) if error is not None and use_error : error_cutout = error [ slices_large ] xcen , ycen = centroid_func ( data_cutout , mask = mask_cutout , error = error_cutout ) else : xcen , ycen = centroid_func ( data_cutout , mask = mask_cutout ) xcentroids . append ( xcen + slices_large [ 1 ] . start ) ycentroids . append ( ycen + slices_large [ 0 ] . start ) return np . array ( xcentroids ) , np . array ( ycentroids ) | Calculate the centroid of sources at the defined positions . |
54,626 | def evaluate ( x , y , constant , amplitude , x_mean , y_mean , x_stddev , y_stddev , theta ) : model = Const2D ( constant ) ( x , y ) + Gaussian2D ( amplitude , x_mean , y_mean , x_stddev , y_stddev , theta ) ( x , y ) return model | Two dimensional Gaussian plus constant function . |
54,627 | def evaluate ( self , x , y , flux , x_0 , y_0 ) : x = ( x - x_0 + 0.5 + self . prf_shape [ 1 ] // 2 ) . astype ( 'int' ) y = ( y - y_0 + 0.5 + self . prf_shape [ 0 ] // 2 ) . astype ( 'int' ) y_sub , x_sub = subpixel_indices ( ( y_0 , x_0 ) , self . subsampling ) x_bound = np . logical_or ( x < 0 , x >= self . prf_shape [ 1 ] ) y_bound = np . logical_or ( y < 0 , y >= self . prf_shape [ 0 ] ) out_of_bounds = np . logical_or ( x_bound , y_bound ) x [ x_bound ] = 0 y [ y_bound ] = 0 result = flux * self . _prf_array [ int ( y_sub ) , int ( x_sub ) ] [ y , x ] result [ out_of_bounds ] = 0 return result | Discrete PRF model evaluation . |
54,628 | def _reproject ( wcs1 , wcs2 ) : import gwcs forward_origin = [ ] if isinstance ( wcs1 , fitswcs . WCS ) : forward = wcs1 . all_pix2world forward_origin = [ 0 ] elif isinstance ( wcs2 , gwcs . wcs . WCS ) : forward = wcs1 . forward_transform else : raise ValueError ( 'wcs1 must be an astropy.wcs.WCS or ' 'gwcs.wcs.WCS object.' ) inverse_origin = [ ] if isinstance ( wcs2 , fitswcs . WCS ) : inverse = wcs2 . all_world2pix inverse_origin = [ 0 ] elif isinstance ( wcs2 , gwcs . wcs . WCS ) : inverse = wcs2 . forward_transform . inverse else : raise ValueError ( 'wcs2 must be an astropy.wcs.WCS or ' 'gwcs.wcs.WCS object.' ) def _reproject_func ( x , y ) : forward_args = [ x , y ] + forward_origin sky = forward ( * forward_args ) inverse_args = sky + inverse_origin return inverse ( * inverse_args ) return _reproject_func | Perform the forward transformation of wcs1 followed by the inverse transformation of wcs2 . |
54,629 | def get_version_info ( ) : from astropy import __version__ astropy_version = __version__ from photutils import __version__ photutils_version = __version__ return 'astropy: {0}, photutils: {1}' . format ( astropy_version , photutils_version ) | Return astropy and photutils versions . |
54,630 | def calc_total_error ( data , bkg_error , effective_gain ) : data = np . asanyarray ( data ) bkg_error = np . asanyarray ( bkg_error ) inputs = [ data , bkg_error , effective_gain ] has_unit = [ hasattr ( x , 'unit' ) for x in inputs ] use_units = all ( has_unit ) if any ( has_unit ) and not use_units : raise ValueError ( 'If any of data, bkg_error, or effective_gain has ' 'units, then they all must all have units.' ) if use_units : if data . unit != bkg_error . unit : raise ValueError ( 'data and bkg_error must have the same units.' ) count_units = [ u . electron , u . photon ] datagain_unit = data . unit * effective_gain . unit if datagain_unit not in count_units : raise u . UnitsError ( '(data * effective_gain) has units of "{0}", ' 'but it must have count units (e.g. ' 'u.electron or u.photon).' . format ( datagain_unit ) ) if not isiterable ( effective_gain ) : effective_gain = np . zeros ( data . shape ) + effective_gain else : effective_gain = np . asanyarray ( effective_gain ) if effective_gain . shape != data . shape : raise ValueError ( 'If input effective_gain is 2D, then it must ' 'have the same shape as the input data.' ) if np . any ( effective_gain <= 0 ) : raise ValueError ( 'effective_gain must be strictly positive ' 'everywhere.' ) if use_units : unit = data . unit data = data . value effective_gain = effective_gain . value source_variance = np . maximum ( data / effective_gain , 0 ) * unit ** 2 else : source_variance = np . maximum ( data / effective_gain , 0 ) return np . sqrt ( bkg_error ** 2 + source_variance ) | Calculate a total error array combining a background - only error array with the Poisson noise of sources . |
54,631 | def to_sky ( self , wcs , mode = 'all' ) : sky_params = self . _to_sky_params ( wcs , mode = mode ) return SkyRectangularAperture ( ** sky_params ) | Convert the aperture to a SkyRectangularAperture object defined in celestial coordinates . |
54,632 | def to_sky ( self , wcs , mode = 'all' ) : sky_params = self . _to_sky_params ( wcs , mode = mode ) return SkyRectangularAnnulus ( ** sky_params ) | Convert the aperture to a SkyRectangularAnnulus object defined in celestial coordinates . |
54,633 | def to_pixel ( self , wcs , mode = 'all' ) : pixel_params = self . _to_pixel_params ( wcs , mode = mode ) return RectangularAperture ( ** pixel_params ) | Convert the aperture to a RectangularAperture object defined in pixel coordinates . |
54,634 | def to_pixel ( self , wcs , mode = 'all' ) : pixel_params = self . _to_pixel_params ( wcs , mode = mode ) return RectangularAnnulus ( ** pixel_params ) | Convert the aperture to a RectangularAnnulus object defined in pixel coordinates . |
54,635 | def _py2intround ( a ) : data = np . asanyarray ( a ) value = np . where ( data >= 0 , np . floor ( data + 0.5 ) , np . ceil ( data - 0.5 ) ) . astype ( int ) if not hasattr ( a , '__iter__' ) : value = np . asscalar ( value ) return value | Round the input to the nearest integer . |
54,636 | def _interpolate_missing_data ( data , mask , method = 'cubic' ) : from scipy import interpolate data_interp = np . array ( data , copy = True ) if len ( data_interp . shape ) != 2 : raise ValueError ( 'data must be a 2D array.' ) if mask . shape != data . shape : raise ValueError ( 'mask and data must have the same shape.' ) y , x = np . indices ( data_interp . shape ) xy = np . dstack ( ( x [ ~ mask ] . ravel ( ) , y [ ~ mask ] . ravel ( ) ) ) [ 0 ] z = data_interp [ ~ mask ] . ravel ( ) if method == 'nearest' : interpol = interpolate . NearestNDInterpolator ( xy , z ) elif method == 'cubic' : interpol = interpolate . CloughTocher2DInterpolator ( xy , z ) else : raise ValueError ( 'Unsupported interpolation method.' ) xy_missing = np . dstack ( ( x [ mask ] . ravel ( ) , y [ mask ] . ravel ( ) ) ) [ 0 ] data_interp [ mask ] = interpol ( xy_missing ) return data_interp | Interpolate missing data as identified by the mask keyword . |
54,637 | def _fit_star ( self , epsf , star , fitter , fitter_kwargs , fitter_has_fit_info , fit_boxsize ) : if fit_boxsize is not None : try : xcenter , ycenter = star . cutout_center large_slc , small_slc = overlap_slices ( star . shape , fit_boxsize , ( ycenter , xcenter ) , mode = 'strict' ) except ( PartialOverlapError , NoOverlapError ) : warnings . warn ( 'The star at ({0}, {1}) cannot be fit because ' 'its fitting region extends beyond the star ' 'cutout image.' . format ( star . center [ 0 ] , star . center [ 1 ] ) , AstropyUserWarning ) star = copy . deepcopy ( star ) star . _fit_error_status = 1 return star data = star . data [ large_slc ] weights = star . weights [ large_slc ] x0 = large_slc [ 1 ] . start y0 = large_slc [ 0 ] . start else : data = star . data weights = star . weights x0 = 0 y0 = 0 scaled_data = data / np . prod ( epsf . _oversampling ) yy , xx = np . indices ( data . shape , dtype = np . float ) xx = ( xx - ( star . cutout_center [ 0 ] - x0 ) ) * epsf . _oversampling [ 0 ] yy = ( yy - ( star . cutout_center [ 1 ] - y0 ) ) * epsf . _oversampling [ 1 ] epsf . flux = star . flux epsf . x_0 = 0.0 epsf . y_0 = 0.0 _epsf = epsf . copy ( ) _epsf . _oversampling = np . array ( [ 1. , 1. ] ) try : fitted_epsf = fitter ( model = _epsf , x = xx , y = yy , z = scaled_data , weights = weights , ** fitter_kwargs ) except TypeError : fitted_epsf = fitter ( model = _epsf , x = xx , y = yy , z = scaled_data , ** fitter_kwargs ) fit_error_status = 0 if fitter_has_fit_info : fit_info = copy . copy ( fitter . fit_info ) if 'ierr' in fit_info and fit_info [ 'ierr' ] not in [ 1 , 2 , 3 , 4 ] : fit_error_status = 2 else : fit_info = None x_center = ( star . cutout_center [ 0 ] + ( fitted_epsf . x_0 . value / epsf . _oversampling [ 0 ] ) ) y_center = ( star . cutout_center [ 1 ] + ( fitted_epsf . y_0 . value / epsf . _oversampling [ 1 ] ) ) star = copy . deepcopy ( star ) star . cutout_center = ( x_center , y_center ) star . flux = fitted_epsf . flux . value star . _fit_info = fit_info star . _fit_error_status = fit_error_status return star | Fit an ePSF model to a single star . |
54,638 | def _init_img_params ( param ) : if param is not None : param = np . atleast_1d ( param ) if len ( param ) == 1 : param = np . repeat ( param , 2 ) return param | Initialize 2D image - type parameters that can accept either a single or two values . |
54,639 | def _create_initial_epsf ( self , stars ) : oversampling = self . oversampling shape = self . shape if shape is not None : shape = np . atleast_1d ( shape ) . astype ( int ) if len ( shape ) == 1 : shape = np . repeat ( shape , 2 ) else : x_shape = np . int ( np . ceil ( stars . _max_shape [ 1 ] * oversampling [ 0 ] ) ) y_shape = np . int ( np . ceil ( stars . _max_shape [ 0 ] * oversampling [ 1 ] ) ) shape = np . array ( ( y_shape , x_shape ) ) shape = [ ( i + 1 ) if i % 2 == 0 else i for i in shape ] data = np . zeros ( shape , dtype = np . float ) xcenter = ( shape [ 1 ] - 1 ) / 2. ycenter = ( shape [ 0 ] - 1 ) / 2. epsf = EPSFModel ( data = data , origin = ( xcenter , ycenter ) , normalize = False , oversampling = oversampling ) return epsf | Create an initial EPSFModel object . |
54,640 | def _resample_residual ( self , star , epsf ) : x = epsf . _oversampling [ 0 ] * star . _xidx_centered y = epsf . _oversampling [ 1 ] * star . _yidx_centered epsf_xcenter , epsf_ycenter = epsf . origin xidx = _py2intround ( x + epsf_xcenter ) yidx = _py2intround ( y + epsf_ycenter ) mask = np . logical_and ( np . logical_and ( xidx >= 0 , xidx < epsf . shape [ 1 ] ) , np . logical_and ( yidx >= 0 , yidx < epsf . shape [ 0 ] ) ) xidx = xidx [ mask ] yidx = yidx [ mask ] stardata = ( ( star . _data_values_normalized / np . prod ( epsf . _oversampling ) ) - epsf . evaluate ( x = x , y = y , flux = 1.0 , x_0 = 0.0 , y_0 = 0.0 , use_oversampling = False ) ) resampled_img = np . full ( epsf . shape , np . nan ) resampled_img [ yidx , xidx ] = stardata [ mask ] return resampled_img | Compute a normalized residual image in the oversampled ePSF grid . |
54,641 | def _resample_residuals ( self , stars , epsf ) : shape = ( stars . n_good_stars , epsf . shape [ 0 ] , epsf . shape [ 1 ] ) star_imgs = np . zeros ( shape ) for i , star in enumerate ( stars . all_good_stars ) : star_imgs [ i , : , : ] = self . _resample_residual ( star , epsf ) return star_imgs | Compute normalized residual images for all the input stars . |
54,642 | def _smooth_epsf ( self , epsf_data ) : from scipy . ndimage import convolve if self . smoothing_kernel is None : return epsf_data elif self . smoothing_kernel == 'quartic' : kernel = np . array ( [ [ + 0.041632 , - 0.080816 , 0.078368 , - 0.080816 , + 0.041632 ] , [ - 0.080816 , - 0.019592 , 0.200816 , - 0.019592 , - 0.080816 ] , [ + 0.078368 , + 0.200816 , 0.441632 , + 0.200816 , + 0.078368 ] , [ - 0.080816 , - 0.019592 , 0.200816 , - 0.019592 , - 0.080816 ] , [ + 0.041632 , - 0.080816 , 0.078368 , - 0.080816 , + 0.041632 ] ] ) elif self . smoothing_kernel == 'quadratic' : kernel = np . array ( [ [ - 0.07428311 , 0.01142786 , 0.03999952 , 0.01142786 , - 0.07428311 ] , [ + 0.01142786 , 0.09714283 , 0.12571449 , 0.09714283 , + 0.01142786 ] , [ + 0.03999952 , 0.12571449 , 0.15428215 , 0.12571449 , + 0.03999952 ] , [ + 0.01142786 , 0.09714283 , 0.12571449 , 0.09714283 , + 0.01142786 ] , [ - 0.07428311 , 0.01142786 , 0.03999952 , 0.01142786 , - 0.07428311 ] ] ) elif isinstance ( self . smoothing_kernel , np . ndarray ) : kernel = self . kernel else : raise TypeError ( 'Unsupported kernel.' ) return convolve ( epsf_data , kernel ) | Smooth the ePSF array by convolving it with a kernel . |
54,643 | def _recenter_epsf ( self , epsf_data , epsf , centroid_func = centroid_com , box_size = 5 , maxiters = 20 , center_accuracy = 1.0e-4 ) : epsf = EPSFModel ( data = epsf_data , origin = epsf . origin , normalize = False , oversampling = epsf . oversampling ) epsf . fill_value = 0.0 xcenter , ycenter = epsf . origin dx_total = 0 dy_total = 0 y , x = np . indices ( epsf_data . shape , dtype = np . float ) iter_num = 0 center_accuracy_sq = center_accuracy ** 2 center_dist_sq = center_accuracy_sq + 1.e6 center_dist_sq_prev = center_dist_sq + 1 while ( iter_num < maxiters and center_dist_sq >= center_accuracy_sq ) : iter_num += 1 slices_large , slices_small = overlap_slices ( epsf_data . shape , box_size , ( ycenter , xcenter ) ) epsf_cutout = epsf_data [ slices_large ] mask = ~ np . isfinite ( epsf_cutout ) xcenter_new , ycenter_new = centroid_func ( epsf_cutout , mask = mask ) xcenter_new += slices_large [ 1 ] . start ycenter_new += slices_large [ 0 ] . start dx = xcenter - xcenter_new dy = ycenter - ycenter_new center_dist_sq = dx ** 2 + dy ** 2 if center_dist_sq >= center_dist_sq_prev : break center_dist_sq_prev = center_dist_sq dx_total += dx dy_total += dy epsf_data = epsf . evaluate ( x = x , y = y , flux = 1.0 , x_0 = xcenter + dx_total , y_0 = ycenter + dy_total , use_oversampling = False ) return epsf_data | Calculate the center of the ePSF data and shift the data so the ePSF center is at the center of the ePSF data array . |
54,644 | def _build_epsf_step ( self , stars , epsf = None ) : if len ( stars ) < 1 : raise ValueError ( 'stars must contain at least one EPSFStar or ' 'LinkedEPSFStar object.' ) if epsf is None : epsf = self . _create_initial_epsf ( stars ) else : epsf = copy . deepcopy ( epsf ) residuals = self . _resample_residuals ( stars , epsf ) self . _residuals . append ( residuals ) with warnings . catch_warnings ( ) : warnings . simplefilter ( 'ignore' , category = RuntimeWarning ) warnings . simplefilter ( 'ignore' , category = AstropyUserWarning ) residuals = self . sigclip ( residuals , axis = 0 , masked = False , return_bounds = False ) if HAS_BOTTLENECK : residuals = bottleneck . nanmedian ( residuals , axis = 0 ) else : residuals = np . nanmedian ( residuals , axis = 0 ) self . _residuals_sigclip . append ( residuals ) mask = ~ np . isfinite ( residuals ) if np . any ( mask ) : residuals = _interpolate_missing_data ( residuals , mask , method = 'cubic' ) residuals [ ~ np . isfinite ( residuals ) ] = 0. self . _residuals_interp . append ( residuals ) new_epsf = epsf . normalized_data + residuals new_epsf = self . _smooth_epsf ( new_epsf ) new_epsf = self . _recenter_epsf ( new_epsf , epsf , centroid_func = self . recentering_func , box_size = self . recentering_boxsize , maxiters = self . recentering_maxiters , center_accuracy = 1.0e-4 ) new_epsf /= np . sum ( new_epsf , dtype = np . float64 ) xcenter = ( new_epsf . shape [ 1 ] - 1 ) / 2. ycenter = ( new_epsf . shape [ 0 ] - 1 ) / 2. epsf_new = EPSFModel ( data = new_epsf , origin = ( xcenter , ycenter ) , normalize = False , oversampling = epsf . oversampling ) return epsf_new | A single iteration of improving an ePSF . |
54,645 | def build_epsf ( self , stars , init_epsf = None ) : iter_num = 0 center_dist_sq = self . center_accuracy_sq + 1. centers = stars . cutout_center_flat n_stars = stars . n_stars fit_failed = np . zeros ( n_stars , dtype = bool ) dx_dy = np . zeros ( ( n_stars , 2 ) , dtype = np . float ) epsf = init_epsf dt = 0. while ( iter_num < self . maxiters and np . max ( center_dist_sq ) >= self . center_accuracy_sq and not np . all ( fit_failed ) ) : t_start = time . time ( ) iter_num += 1 if self . progress_bar : if iter_num == 1 : dt_str = ' [? s/iter]' else : dt_str = ' [{:.1f} s/iter]' . format ( dt ) print ( 'PROGRESS: iteration {0:d} (of max {1}){2}' . format ( iter_num , self . maxiters , dt_str ) , end = '\r' ) epsf = self . _build_epsf_step ( stars , epsf = epsf ) with warnings . catch_warnings ( ) : message = '.*The fit may be unsuccessful;.*' warnings . filterwarnings ( 'ignore' , message = message , category = AstropyUserWarning ) stars = self . fitter ( epsf , stars ) fit_failed = np . array ( [ star . _fit_error_status > 0 for star in stars . all_stars ] ) if np . all ( fit_failed ) : raise ValueError ( 'The ePSF fitting failed for all stars.' ) if iter_num > 3 and np . any ( fit_failed ) : idx = fit_failed . nonzero ( ) [ 0 ] for i in idx : stars . all_stars [ i ] . _excluded_from_fit = True dx_dy = stars . cutout_center_flat - centers dx_dy = dx_dy [ np . logical_not ( fit_failed ) ] center_dist_sq = np . sum ( dx_dy * dx_dy , axis = 1 , dtype = np . float64 ) centers = stars . cutout_center_flat self . _nfit_failed . append ( np . count_nonzero ( fit_failed ) ) self . _center_dist_sq . append ( center_dist_sq ) self . _max_center_dist_sq . append ( np . max ( center_dist_sq ) ) self . _epsf . append ( epsf ) dt = time . time ( ) - t_start return epsf , stars | Iteratively build an ePSF from star cutouts . |
54,646 | def _set_oversampling ( self , value ) : try : value = np . atleast_1d ( value ) . astype ( float ) if len ( value ) == 1 : value = np . repeat ( value , 2 ) except ValueError : raise ValueError ( 'Oversampling factors must be float' ) if np . any ( value <= 0 ) : raise ValueError ( 'Oversampling factors must be greater than 0' ) self . _oversampling = value | This is a private method because it s used in the initializer by the oversampling |
54,647 | def evaluate ( self , x , y , flux , x_0 , y_0 , use_oversampling = True ) : if use_oversampling : xi = self . _oversampling [ 0 ] * ( np . asarray ( x ) - x_0 ) yi = self . _oversampling [ 1 ] * ( np . asarray ( y ) - y_0 ) else : xi = np . asarray ( x ) - x_0 yi = np . asarray ( y ) - y_0 xi += self . _x_origin yi += self . _y_origin f = flux * self . _normalization_constant evaluated_model = f * self . interpolator . ev ( xi , yi ) if self . _fill_value is not None : invalid = ( ( ( xi < 0 ) | ( xi > self . _nx - 1 ) ) | ( ( yi < 0 ) | ( yi > self . _ny - 1 ) ) ) evaluated_model [ invalid ] = self . _fill_value return evaluated_model | Evaluate the model on some input variables and provided model parameters . |
54,648 | def _find_bounds_1d ( data , x ) : idx = np . searchsorted ( data , x ) if idx == 0 : idx0 = 0 elif idx == len ( data ) : idx0 = idx - 2 else : idx0 = idx - 1 return idx0 | Find the index of the lower bound where x should be inserted into a to maintain order . |
54,649 | def _bilinear_interp ( xyref , zref , xi , yi ) : if len ( xyref ) != 4 : raise ValueError ( 'xyref must contain only 4 (x, y) pairs' ) if zref . shape [ 0 ] != 4 : raise ValueError ( 'zref must have a length of 4 on the first ' 'axis.' ) xyref = [ tuple ( i ) for i in xyref ] idx = sorted ( range ( len ( xyref ) ) , key = xyref . __getitem__ ) xyref = sorted ( xyref ) ( x0 , y0 ) , ( _x0 , y1 ) , ( x1 , _y0 ) , ( _x1 , _y1 ) = xyref if x0 != _x0 or x1 != _x1 or y0 != _y0 or y1 != _y1 : raise ValueError ( 'The refxy points do not form a rectangle.' ) if not np . isscalar ( xi ) : xi = xi [ 0 ] if not np . isscalar ( yi ) : yi = yi [ 0 ] if not x0 <= xi <= x1 or not y0 <= yi <= y1 : raise ValueError ( 'The (x, y) input is not within the rectangle ' 'defined by xyref.' ) data = np . asarray ( zref ) [ idx ] weights = np . array ( [ ( x1 - xi ) * ( y1 - yi ) , ( x1 - xi ) * ( yi - y0 ) , ( xi - x0 ) * ( y1 - yi ) , ( xi - x0 ) * ( yi - y0 ) ] ) norm = ( x1 - x0 ) * ( y1 - y0 ) return np . sum ( data * weights [ : , None , None ] , axis = 0 ) / norm | Perform bilinear interpolation of four 2D arrays located at points on a regular grid . |
54,650 | def evaluate ( self , x , y , flux , x_0 , y_0 ) : if not np . isscalar ( x_0 ) : x_0 = x_0 [ 0 ] if not np . isscalar ( y_0 ) : y_0 = y_0 [ 0 ] if ( x_0 < self . _xgrid_min or x_0 > self . _xgrid_max or y_0 < self . _ygrid_min or y_0 > self . _ygrid_max ) : self . _ref_indices = np . argsort ( np . hypot ( self . _grid_xpos - x_0 , self . _grid_ypos - y_0 ) ) [ 0 ] self . _psf_interp = self . data [ self . _ref_indices , : , : ] else : self . _ref_indices = self . _find_bounding_points ( x_0 , y_0 ) xyref = np . array ( self . grid_xypos ) [ self . _ref_indices ] psfs = self . data [ self . _ref_indices , : , : ] self . _psf_interp = self . _bilinear_interp ( xyref , psfs , x_0 , y_0 ) psfmodel = FittableImageModel ( self . _psf_interp , oversampling = self . oversampling ) return psfmodel . evaluate ( x , y , flux , x_0 , y_0 ) | Evaluate the GriddedPSFModel for the input parameters . |
54,651 | def evaluate ( self , x , y , flux , x_0 , y_0 , sigma ) : return ( flux / 4 * ( ( self . _erf ( ( x - x_0 + 0.5 ) / ( np . sqrt ( 2 ) * sigma ) ) - self . _erf ( ( x - x_0 - 0.5 ) / ( np . sqrt ( 2 ) * sigma ) ) ) * ( self . _erf ( ( y - y_0 + 0.5 ) / ( np . sqrt ( 2 ) * sigma ) ) - self . _erf ( ( y - y_0 - 0.5 ) / ( np . sqrt ( 2 ) * sigma ) ) ) ) ) | Model function Gaussian PSF model . |
54,652 | def evaluate ( self , x , y , flux , x_0 , y_0 ) : if self . xname is None : dx = x - x_0 else : dx = x setattr ( self . psfmodel , self . xname , x_0 ) if self . xname is None : dy = y - y_0 else : dy = y setattr ( self . psfmodel , self . yname , y_0 ) if self . fluxname is None : return ( flux * self . _psf_scale_factor * self . _integrated_psfmodel ( dx , dy ) ) else : setattr ( self . psfmodel , self . yname , flux * self . _psf_scale_factor ) return self . _integrated_psfmodel ( dx , dy ) | The evaluation function for PRFAdapter . |
54,653 | def _isophote_list_to_table ( isophote_list ) : properties = OrderedDict ( ) properties [ 'sma' ] = 'sma' properties [ 'intens' ] = 'intens' properties [ 'int_err' ] = 'intens_err' properties [ 'eps' ] = 'ellipticity' properties [ 'ellip_err' ] = 'ellipticity_err' properties [ 'pa' ] = 'pa' properties [ 'pa_err' ] = 'pa_err' properties [ 'grad_r_error' ] = 'grad_rerr' properties [ 'ndata' ] = 'ndata' properties [ 'nflag' ] = 'flag' properties [ 'niter' ] = 'niter' properties [ 'stop_code' ] = 'stop_code' isotable = QTable ( ) for k , v in properties . items ( ) : isotable [ v ] = np . array ( [ getattr ( iso , k ) for iso in isophote_list ] ) if k in ( 'pa' , 'pa_err' ) : isotable [ v ] = isotable [ v ] * 180. / np . pi * u . deg return isotable | Convert an ~photutils . isophote . IsophoteList instance to a ~astropy . table . QTable . |
54,654 | def _compute_fluxes ( self ) : sma = self . sample . geometry . sma x0 = self . sample . geometry . x0 y0 = self . sample . geometry . y0 xsize = self . sample . image . shape [ 1 ] ysize = self . sample . image . shape [ 0 ] imin = max ( 0 , int ( x0 - sma - 0.5 ) - 1 ) jmin = max ( 0 , int ( y0 - sma - 0.5 ) - 1 ) imax = min ( xsize , int ( x0 + sma + 0.5 ) + 1 ) jmax = min ( ysize , int ( y0 + sma + 0.5 ) + 1 ) if ( jmax - jmin > 1 ) and ( imax - imin ) > 1 : y , x = np . mgrid [ jmin : jmax , imin : imax ] radius , angle = self . sample . geometry . to_polar ( x , y ) radius_e = self . sample . geometry . radius ( angle ) midx = ( radius <= sma ) values = self . sample . image [ y [ midx ] , x [ midx ] ] tflux_c = np . ma . sum ( values ) npix_c = np . ma . count ( values ) midx2 = ( radius <= radius_e ) values = self . sample . image [ y [ midx2 ] , x [ midx2 ] ] tflux_e = np . ma . sum ( values ) npix_e = np . ma . count ( values ) else : tflux_e = 0. tflux_c = 0. npix_e = 0 npix_c = 0 return tflux_e , tflux_c , npix_e , npix_c | Compute integrated flux inside ellipse as well as inside a circle defined with the same semimajor axis . |
54,655 | def _compute_deviations ( self , sample , n ) : try : coeffs = fit_first_and_second_harmonics ( self . sample . values [ 0 ] , self . sample . values [ 2 ] ) coeffs = coeffs [ 0 ] model = first_and_second_harmonic_function ( self . sample . values [ 0 ] , coeffs ) residual = self . sample . values [ 2 ] - model c = fit_upper_harmonic ( residual , sample . values [ 2 ] , n ) covariance = c [ 1 ] ce = np . diagonal ( covariance ) c = c [ 0 ] a = c [ 1 ] / self . sma / sample . gradient b = c [ 2 ] / self . sma / sample . gradient gre = self . grad_r_error if self . grad_r_error is not None else 0.64 a_err = abs ( a ) * np . sqrt ( ( ce [ 1 ] / c [ 1 ] ) ** 2 + gre ** 2 ) b_err = abs ( b ) * np . sqrt ( ( ce [ 2 ] / c [ 2 ] ) ** 2 + gre ** 2 ) except Exception : a = b = a_err = b_err = None return a , b , a_err , b_err | Compute deviations from a perfect ellipse based on the amplitudes and errors for harmonic n . Note that we first subtract the first and second harmonics from the raw data . |
54,656 | def _compute_errors ( self ) : try : coeffs = fit_first_and_second_harmonics ( self . sample . values [ 0 ] , self . sample . values [ 2 ] ) covariance = coeffs [ 1 ] coeffs = coeffs [ 0 ] model = first_and_second_harmonic_function ( self . sample . values [ 0 ] , coeffs ) residual_rms = np . std ( self . sample . values [ 2 ] - model ) errors = np . diagonal ( covariance ) * residual_rms eps = self . sample . geometry . eps pa = self . sample . geometry . pa ea = abs ( errors [ 2 ] / self . grad ) eb = abs ( errors [ 1 ] * ( 1. - eps ) / self . grad ) self . x0_err = np . sqrt ( ( ea * np . cos ( pa ) ) ** 2 + ( eb * np . sin ( pa ) ) ** 2 ) self . y0_err = np . sqrt ( ( ea * np . sin ( pa ) ) ** 2 + ( eb * np . cos ( pa ) ) ** 2 ) self . ellip_err = ( abs ( 2. * errors [ 4 ] * ( 1. - eps ) / self . sma / self . grad ) ) if ( abs ( eps ) > np . finfo ( float ) . resolution ) : self . pa_err = ( abs ( 2. * errors [ 3 ] * ( 1. - eps ) / self . sma / self . grad / ( 1. - ( 1. - eps ) ** 2 ) ) ) else : self . pa_err = 0. except Exception : self . x0_err = self . y0_err = self . pa_err = self . ellip_err = 0. | Compute parameter errors based on the diagonal of the covariance matrix of the four harmonic coefficients for harmonics n = 1 and n = 2 . |
54,657 | def fix_geometry ( self , isophote ) : self . sample . geometry . eps = isophote . sample . geometry . eps self . sample . geometry . pa = isophote . sample . geometry . pa self . sample . geometry . x0 = isophote . sample . geometry . x0 self . sample . geometry . y0 = isophote . sample . geometry . y0 | Fix the geometry of a problematic isophote to be identical to the input isophote . |
54,658 | def get_closest ( self , sma ) : index = ( np . abs ( self . sma - sma ) ) . argmin ( ) return self . _list [ index ] | Return the ~photutils . isophote . Isophote instance that has the closest semimajor axis length to the input semimajor axis . |
54,659 | def interpolate_masked_data ( data , mask , error = None , background = None ) : if data . shape != mask . shape : raise ValueError ( 'data and mask must have the same shape' ) data_out = np . copy ( data ) mask_idx = mask . nonzero ( ) if mask_idx [ 0 ] . size == 0 : raise ValueError ( 'All items in data are masked' ) for x in zip ( * mask_idx ) : X = np . array ( [ [ max ( x [ i ] - 1 , 0 ) , min ( x [ i ] + 1 , data . shape [ i ] - 1 ) ] for i in range ( len ( data . shape ) ) ] ) goodpix = ~ mask [ X ] if not np . any ( goodpix ) : warnings . warn ( 'The masked pixel at "{}" is completely ' 'surrounded by (connected) masked pixels, ' 'thus unable to interpolate' . format ( x , ) , AstropyUserWarning ) continue data_out [ x ] = np . mean ( data [ X ] [ goodpix ] ) if background is not None : if background . shape != data . shape : raise ValueError ( 'background and data must have the same ' 'shape' ) background_out = np . copy ( background ) background_out [ x ] = np . mean ( background [ X ] [ goodpix ] ) else : background_out = None if error is not None : if error . shape != data . shape : raise ValueError ( 'error and data must have the same ' 'shape' ) error_out = np . copy ( error ) error_out [ x ] = np . sqrt ( np . mean ( error [ X ] [ goodpix ] ** 2 ) ) else : error_out = None return data_out , error_out , background_out | Interpolate over masked pixels in data and optional error or background images . |
54,660 | def ThreadsWithRunningExecServers ( self ) : socket_dir = '/tmp/pyringe_%s' % self . inferior . pid if os . path . isdir ( socket_dir ) : return [ int ( fname [ : - 9 ] ) for fname in os . listdir ( socket_dir ) if fname . endswith ( '.execsock' ) ] return [ ] | Returns a list of tids of inferior threads with open exec servers . |
54,661 | def SendToExecSocket ( self , code , tid = None ) : response = self . _SendToExecSocketRaw ( json . dumps ( code ) , tid ) return json . loads ( response ) | Inject python code into exec socket . |
54,662 | def CloseExecSocket ( self , tid = None ) : response = self . _SendToExecSocketRaw ( '__kill__' , tid ) if response != '__kill_ack__' : logging . warning ( 'May not have succeeded in closing socket, make sure ' 'using execsocks().' ) | Send closing request to exec socket . |
54,663 | def Backtrace ( self , to_string = False ) : if self . inferior . is_running : res = self . inferior . Backtrace ( ) if to_string : return res print res else : logging . error ( 'Not attached to any process.' ) | Get a backtrace of the current position . |
54,664 | def ListThreads ( self ) : if self . inferior . is_running : return self . inferior . threads logging . error ( 'Not attached to any process.' ) return [ ] | List the currently running python threads . |
54,665 | def extract_filename ( self ) : globals_gdbval = self . _gdbval [ 'f_globals' ] . cast ( GdbCache . DICT ) global_dict = libpython . PyDictObjectPtr ( globals_gdbval ) for key , value in global_dict . iteritems ( ) : if str ( key . proxyval ( set ( ) ) ) == '__file__' : return str ( value . proxyval ( set ( ) ) ) | Alternative way of getting the executed file which inspects globals . |
54,666 | def _UnserializableObjectFallback ( self , obj ) : if isinstance ( obj , libpython . PyInstanceObjectPtr ) : in_class = obj . pyop_field ( 'in_class' ) result_dict = in_class . pyop_field ( 'cl_dict' ) . proxyval ( set ( ) ) instanceproxy = obj . proxyval ( set ( ) ) result_dict . update ( instanceproxy . attrdict ) result_dict [ '__pyringe_type_name__' ] = instanceproxy . cl_name result_dict [ '__pyringe_address__' ] = instanceproxy . address return result_dict if isinstance ( obj , libpython . HeapTypeObjectPtr ) : try : type_ptr = obj . field ( 'ob_type' ) tp_dict = type_ptr . cast ( GdbCache . TYPE ) [ 'tp_dict' ] . cast ( GdbCache . DICT ) result_dict = libpython . PyDictObjectPtr ( tp_dict ) . proxyval ( set ( ) ) except gdb . error : result_dict = { } try : result_dict . update ( obj . get_attr_dict ( ) . proxyval ( set ( ) ) ) result_dict [ '__pyringe_type_name__' ] = obj . safe_tp_name ( ) result_dict [ '__pyringe_address__' ] = long ( obj . _gdbval ) return result_dict except TypeError : pass try : proxy = obj . proxyval ( set ( ) ) if isinstance ( proxy , dict ) : return { str ( key ) : val for key , val in proxy . iteritems ( ) } return proxy except AttributeError : return str ( obj ) | Handles sanitizing of unserializable objects for Json . |
54,667 | def _AcceptRPC ( self ) : request = self . _ReadObject ( ) if request [ 'func' ] == '__kill__' : self . ClearBreakpoints ( ) self . _WriteObject ( '__kill_ack__' ) return False if 'func' not in request or request [ 'func' ] . startswith ( '_' ) : raise RpcException ( 'Not a valid public API function.' ) rpc_result = getattr ( self , request [ 'func' ] ) ( * request [ 'args' ] ) self . _WriteObject ( rpc_result ) return True | Reads RPC request from stdin and processes it writing result to stdout . |
54,668 | def _UnpackGdbVal ( self , gdb_value ) : val_type = gdb_value . type . code if val_type == gdb . TYPE_CODE_INT or val_type == gdb . TYPE_CODE_ENUM : return int ( gdb_value ) if val_type == gdb . TYPE_CODE_VOID : return None if val_type == gdb . TYPE_CODE_PTR : return long ( gdb_value ) if val_type == gdb . TYPE_CODE_ARRAY : return str ( gdb_value ) return str ( gdb_value ) | Unpacks gdb . Value objects and returns the best - matched python object . |
54,669 | def EnsureGdbPosition ( self , pid , tid , frame_depth ) : position = [ pid , tid , frame_depth ] if not pid : return if not self . IsAttached ( ) : try : self . Attach ( position ) except gdb . error as exc : raise PositionUnavailableException ( exc . message ) if gdb . selected_inferior ( ) . pid != pid : self . Detach ( ) try : self . Attach ( position ) except gdb . error as exc : raise PositionUnavailableException ( exc . message ) if tid : tstate_head = GdbCache . INTERP_HEAD [ 'tstate_head' ] for tstate in self . _IterateChainedList ( tstate_head , 'next' ) : if tid == tstate [ 'thread_id' ] : self . selected_tstate = tstate break else : raise PositionUnavailableException ( 'Thread %s does not exist.' % str ( tid ) ) stack_head = self . selected_tstate [ 'frame' ] if frame_depth is not None : frames = list ( self . _IterateChainedList ( stack_head , 'f_back' ) ) frames . reverse ( ) try : self . selected_frame = frames [ frame_depth ] except IndexError : raise PositionUnavailableException ( 'Stack is not %s frames deep' % str ( frame_depth + 1 ) ) | Make sure our position matches the request . |
54,670 | def IsSymbolFileSane ( self , position ) : pos = [ position [ 0 ] , None , None ] self . EnsureGdbPosition ( * pos ) try : if GdbCache . DICT and GdbCache . TYPE and GdbCache . INTERP_HEAD : tstate = GdbCache . INTERP_HEAD [ 'tstate_head' ] tstate [ 'thread_id' ] frame = tstate [ 'frame' ] frame_attrs = [ 'f_back' , 'f_locals' , 'f_localsplus' , 'f_globals' , 'f_builtins' , 'f_lineno' , 'f_lasti' ] for attr_name in frame_attrs : frame [ attr_name ] code = frame [ 'f_code' ] code_attrs = [ 'co_name' , 'co_filename' , 'co_nlocals' , 'co_varnames' , 'co_lnotab' , 'co_firstlineno' ] for attr_name in code_attrs : code [ attr_name ] return True except gdb . error : return False return False | Performs basic sanity check by trying to look up a bunch of symbols . |
54,671 | def Detach ( self ) : if not self . IsAttached ( ) : return None pid = gdb . selected_inferior ( ) . pid self . Interrupt ( [ pid , None , None ] ) self . Continue ( [ pid , None , None ] ) result = gdb . execute ( 'detach' , to_string = True ) if not result : return None return result | Detaches from the inferior . If not attached this is a no - op . |
54,672 | def Call ( self , position , function_call ) : self . EnsureGdbPosition ( position [ 0 ] , None , None ) if not gdb . selected_thread ( ) . is_stopped ( ) : self . Interrupt ( position ) result_value = gdb . parse_and_eval ( function_call ) return self . _UnpackGdbVal ( result_value ) | Perform a function call in the inferior . |
54,673 | def ExecuteRaw ( self , position , command ) : self . EnsureGdbPosition ( position [ 0 ] , None , None ) return gdb . execute ( command , to_string = True ) | Send a command string to gdb . |
54,674 | def _GetGdbThreadMapping ( self , position ) : if len ( gdb . selected_inferior ( ) . threads ( ) ) == 1 : return { position [ 1 ] : 1 } thread_line_regexp = r'\s*\**\s*([0-9]+)\s+[a-zA-Z]+\s+([x0-9a-fA-F]+)\s.*' output = gdb . execute ( 'info threads' , to_string = True ) matches = [ re . match ( thread_line_regexp , line ) for line in output . split ( '\n' ) [ 1 : ] ] return { int ( match . group ( 2 ) , 16 ) : int ( match . group ( 1 ) ) for match in matches if match } | Gets a mapping from python tid to gdb thread num . |
54,675 | def _Inject ( self , position , call ) : self . EnsureGdbPosition ( position [ 0 ] , position [ 1 ] , None ) self . ClearBreakpoints ( ) self . _AddThreadSpecificBreakpoint ( position ) gdb . parse_and_eval ( '%s = 1' % GdbCache . PENDINGCALLS_TO_DO ) gdb . parse_and_eval ( '%s = 1' % GdbCache . PENDINGBUSY ) try : self . Continue ( position ) if not gdb . selected_thread ( ) . is_stopped ( ) : raise RuntimeError ( 'Gdb is not acting as expected, is it being run in ' 'async mode?' ) finally : gdb . parse_and_eval ( '%s = 0' % GdbCache . PENDINGBUSY ) self . Call ( position , call ) | Injects evaluation of call in a safe location in the inferior . |
54,676 | def _BacktraceFromFramePtr ( self , frame_ptr ) : frame_objs = [ PyFrameObjectPtr ( frame ) for frame in self . _IterateChainedList ( frame_ptr , 'f_back' ) ] frame_objs . reverse ( ) tb_strings = [ 'Traceback (most recent call last):' ] for frame in frame_objs : line_string = ( ' File "%s", line %s, in %s' % ( frame . filename ( ) , str ( frame . current_line_num ( ) ) , frame . co_name . proxyval ( set ( ) ) ) ) tb_strings . append ( line_string ) line_string = ' %s' % frame . current_line ( ) . strip ( ) tb_strings . append ( line_string ) return '\n' . join ( tb_strings ) | Assembles and returns what looks exactly like python s backtraces . |
54,677 | def Kill ( self ) : try : if self . is_running : self . Detach ( ) if self . _Execute ( '__kill__' ) == '__kill_ack__' : time . sleep ( 0.1 ) except ( TimeoutError , ProxyError ) : logging . debug ( 'Termination request not acknowledged, killing gdb.' ) if self . is_running : os . kill ( self . _process . pid , signal . SIGINT ) self . _process . terminate ( ) self . _process . wait ( ) self . _errfile_r . close ( ) self . _outfile_r . close ( ) | Send death pill to Gdb and forcefully kill it if that doesn t work . |
54,678 | def Version ( ) : output = subprocess . check_output ( [ 'gdb' , '--version' ] ) . split ( '\n' ) [ 0 ] major = None minor = None micro = None for potential_versionstring in output . split ( ) : version = re . split ( '[^0-9]' , potential_versionstring ) try : major = int ( version [ 0 ] ) except ( IndexError , ValueError ) : pass try : minor = int ( version [ 1 ] ) except ( IndexError , ValueError ) : pass try : micro = int ( version [ 2 ] ) except ( IndexError , ValueError ) : pass return ( major , minor , micro ) | Gets the version of gdb as a 3 - tuple . |
54,679 | def _JsonDecodeDict ( self , data ) : rv = { } for key , value in data . iteritems ( ) : if isinstance ( key , unicode ) : key = self . _TryStr ( key ) if isinstance ( value , unicode ) : value = self . _TryStr ( value ) elif isinstance ( value , list ) : value = self . _JsonDecodeList ( value ) rv [ key ] = value if '__pyringe_type_name__' in data : rv = ProxyObject ( rv ) return rv | Json object decode hook that automatically converts unicode objects . |
54,680 | def _Execute ( self , funcname , * args , ** kwargs ) : wait_for_completion = kwargs . get ( 'wait_for_completion' , False ) rpc_dict = { 'func' : funcname , 'args' : args } self . _Send ( json . dumps ( rpc_dict ) ) timeout = TIMEOUT_FOREVER if wait_for_completion else TIMEOUT_DEFAULT result_string = self . _Recv ( timeout ) try : result = json . loads ( result_string , object_hook = self . _JsonDecodeDict ) if isinstance ( result , unicode ) : result = self . _TryStr ( result ) elif isinstance ( result , list ) : result = self . _JsonDecodeList ( result ) except ValueError : raise ValueError ( 'Response JSON invalid: ' + str ( result_string ) ) except TypeError : raise ValueError ( 'Response JSON invalid: ' + str ( result_string ) ) return result | Send an RPC request to the gdb - internal python . |
54,681 | def _Recv ( self , timeout ) : buf = '' wait_for_line = timeout is TIMEOUT_FOREVER deadline = time . time ( ) + ( timeout if not wait_for_line else 0 ) def TimeLeft ( ) : return max ( 1000 * ( deadline - time . time ( ) ) , 0 ) continue_reading = True while continue_reading : poll_timeout = None if wait_for_line else TimeLeft ( ) fd_list = [ event [ 0 ] for event in self . _poller . poll ( poll_timeout ) if event [ 1 ] & ( select . POLLIN | select . POLLPRI ) ] if not wait_for_line and TimeLeft ( ) == 0 : continue_reading = False if self . _outfile_r . fileno ( ) in fd_list : buf += self . _outfile_r . readline ( ) if buf . endswith ( '\n' ) : return buf if self . _errfile_r . fileno ( ) in fd_list : exc = self . _errfile_r . readline ( ) if exc : exc_text = '\n-----------------------------------\n' exc_text += 'Error occurred within GdbService:\n' try : exc_text += json . loads ( exc ) except ValueError : deadline = time . time ( ) + 0.5 while self . is_running and TimeLeft ( ) > 0 : exc += self . _errfile_r . read ( ) try : exc_text += json . loads ( exc ) except ValueError : exc_text = exc raise ProxyError ( exc_text ) raise TimeoutError ( ) | Receive output from gdb . |
54,682 | def needsattached ( func ) : @ functools . wraps ( func ) def wrap ( self , * args , ** kwargs ) : if not self . attached : raise PositionError ( 'Not attached to any process.' ) return func ( self , * args , ** kwargs ) return wrap | Decorator to prevent commands from being used when not attached . |
54,683 | def Reinit ( self , pid , auto_symfile_loading = True ) : self . ShutDownGdb ( ) self . __init__ ( pid , auto_symfile_loading , architecture = self . arch ) | Reinitializes the object with a new pid . |
54,684 | def InjectString ( self , codestring , wait_for_completion = True ) : if self . inferior . is_running and self . inferior . gdb . IsAttached ( ) : try : self . inferior . gdb . InjectString ( self . inferior . position , codestring , wait_for_completion = wait_for_completion ) except RuntimeError : exc_type , exc_value , exc_traceback = sys . exc_info ( ) traceback . print_exception ( exc_type , exc_value , exc_traceback ) else : logging . error ( 'Not attached to any process.' ) | Try to inject python code into current thread . |
54,685 | def field ( self , name ) : if self . is_null ( ) : raise NullPyObjectPtr ( self ) if name == 'ob_type' : pyo_ptr = self . _gdbval . cast ( PyObjectPtr . get_gdb_type ( ) ) return pyo_ptr . dereference ( ) [ name ] if name == 'ob_size' : try : return self . _gdbval . dereference ( ) [ name ] except RuntimeError : return self . _gdbval . dereference ( ) [ 'ob_base' ] [ name ] return self . _gdbval . dereference ( ) [ name ] | Get the gdb . Value for the given field within the PyObject coping with some python 2 versus python 3 differences . |
54,686 | def write_repr ( self , out , visited ) : return out . write ( repr ( self . proxyval ( visited ) ) ) | Write a string representation of the value scraped from the inferior process to out a file - like object . |
54,687 | def from_pyobject_ptr ( cls , gdbval ) : try : p = PyObjectPtr ( gdbval ) cls = cls . subclass_from_type ( p . type ( ) ) return cls ( gdbval , cast_to = cls . get_gdb_type ( ) ) except RuntimeError : pass return cls ( gdbval ) | Try to locate the appropriate derived class dynamically and cast the pointer accordingly . |
54,688 | def proxyval ( self , visited ) : if self . as_address ( ) in visited : return ProxyAlreadyVisited ( '<...>' ) visited . add ( self . as_address ( ) ) pyop_attr_dict = self . get_attr_dict ( ) if pyop_attr_dict : attr_dict = pyop_attr_dict . proxyval ( visited ) else : attr_dict = { } tp_name = self . safe_tp_name ( ) return InstanceProxy ( tp_name , attr_dict , long ( self . _gdbval ) ) | Support for new - style classes . |
54,689 | def addr2line ( self , addrq ) : co_lnotab = self . pyop_field ( 'co_lnotab' ) . proxyval ( set ( ) ) lineno = int_from_int ( self . field ( 'co_firstlineno' ) ) addr = 0 for addr_incr , line_incr in zip ( co_lnotab [ : : 2 ] , co_lnotab [ 1 : : 2 ] ) : addr += ord ( addr_incr ) if addr > addrq : return lineno lineno += ord ( line_incr ) return lineno | Get the line number for a given bytecode offset |
54,690 | def current_line ( self ) : if self . is_optimized_out ( ) : return '(frame information optimized out)' with open ( self . filename ( ) , 'r' ) as f : all_lines = f . readlines ( ) return all_lines [ self . current_line_num ( ) - 1 ] | Get the text of the current source line as a string with a trailing newline character |
54,691 | def select ( self ) : if not hasattr ( self . _gdbframe , 'select' ) : print ( 'Unable to select frame: ' 'this build of gdb does not expose a gdb.Frame.select method' ) return False self . _gdbframe . select ( ) return True | If supported select this frame and return True ; return False if unsupported |
54,692 | def get_index ( self ) : index = 0 iter_frame = self while iter_frame . newer ( ) : index += 1 iter_frame = iter_frame . newer ( ) return index | Calculate index of frame starting at 0 for the newest frame within this thread |
54,693 | def is_evalframeex ( self ) : if self . _gdbframe . name ( ) == 'PyEval_EvalFrameEx' : if self . _gdbframe . type ( ) == gdb . NORMAL_FRAME : return True return False | Is this a PyEval_EvalFrameEx frame? |
54,694 | def get_selected_python_frame ( cls ) : frame = cls . get_selected_frame ( ) while frame : if frame . is_evalframeex ( ) : return frame frame = frame . older ( ) return None | Try to obtain the Frame for the python code in the selected frame or None |
54,695 | def ListCommands ( self ) : print 'Available commands:' commands = dict ( self . commands ) for plugin in self . plugins : commands . update ( plugin . commands ) for com in sorted ( commands ) : if not com . startswith ( '_' ) : self . PrintHelpTextLine ( com , commands [ com ] ) | Print a list of currently available commands and their descriptions . |
54,696 | def StatusLine ( self ) : pid = self . inferior . pid curthread = None threadnum = 0 if pid : if not self . inferior . is_running : logging . warning ( 'Inferior is not running.' ) self . Detach ( ) pid = None else : try : if not self . inferior . attached : self . inferior . StartGdb ( ) curthread = self . inferior . current_thread threadnum = len ( self . inferior . threads ) except ( inferior . ProxyError , inferior . TimeoutError , inferior . PositionError ) as err : logging . debug ( 'Error while getting information in status line:%s' % err . message ) pass status = ( '==> pid:[%s] #threads:[%s] current thread:[%s]' % ( pid , threadnum , curthread ) ) return status | Generate the colored line indicating plugin status . |
54,697 | def Attach ( self , pid ) : if self . inferior . is_running : answer = raw_input ( 'Already attached to process ' + str ( self . inferior . pid ) + '. Detach? [y]/n ' ) if answer and answer != 'y' and answer != 'yes' : return None self . Detach ( ) for plugin in self . plugins : plugin . position = None self . inferior . Reinit ( pid ) | Attach to the process with the given pid . |
54,698 | def StartGdb ( self ) : if self . inferior . is_running : self . inferior . ShutDownGdb ( ) program_arg = 'program %d ' % self . inferior . pid else : program_arg = '' os . system ( 'gdb ' + program_arg + ' ' . join ( self . gdb_args ) ) reset_position = raw_input ( 'Reset debugger position? [y]/n ' ) if not reset_position or reset_position == 'y' or reset_position == 'yes' : self . position = None | Hands control over to a new gdb process . |
54,699 | def __get_node ( self , word ) : node = self . root for c in word : try : node = node . children [ c ] except KeyError : return None return node | Private function retrieving a final node of trie for given word |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.