id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
10,500
astropy/photutils
photutils/extern/sigma_clipping.py
_nanstd
def _nanstd(array, axis=None, ddof=0): """Bottleneck nanstd function that handle tuple axis.""" if isinstance(axis, tuple): array = _move_tuple_axes_first(array, axis=axis) axis = 0 return bottleneck.nanstd(array, axis=axis, ddof=ddof)
python
def _nanstd(array, axis=None, ddof=0): """Bottleneck nanstd function that handle tuple axis.""" if isinstance(axis, tuple): array = _move_tuple_axes_first(array, axis=axis) axis = 0 return bottleneck.nanstd(array, axis=axis, ddof=ddof)
[ "def", "_nanstd", "(", "array", ",", "axis", "=", "None", ",", "ddof", "=", "0", ")", ":", "if", "isinstance", "(", "axis", ",", "tuple", ")", ":", "array", "=", "_move_tuple_axes_first", "(", "array", ",", "axis", "=", "axis", ")", "axis", "=", "0", "return", "bottleneck", ".", "nanstd", "(", "array", ",", "axis", "=", "axis", ",", "ddof", "=", "ddof", ")" ]
Bottleneck nanstd function that handle tuple axis.
[ "Bottleneck", "nanstd", "function", "that", "handle", "tuple", "axis", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/extern/sigma_clipping.py#L69-L75
10,501
astropy/photutils
photutils/extern/sigma_clipping.py
sigma_clip
def sigma_clip(data, sigma=3, sigma_lower=None, sigma_upper=None, maxiters=5, cenfunc='median', stdfunc='std', axis=None, masked=True, return_bounds=False, copy=True): """ Perform sigma-clipping on the provided data. The data will be iterated over, each time rejecting values that are less or more than a specified number of standard deviations from a center value. Clipped (rejected) pixels are those where:: data < cenfunc(data [,axis=int]) - (sigma_lower * stdfunc(data [,axis=int])) data > cenfunc(data [,axis=int]) + (sigma_upper * stdfunc(data [,axis=int])) Invalid data values (i.e. NaN or inf) are automatically clipped. For an object-oriented interface to sigma clipping, see :class:`SigmaClip`. .. note:: `scipy.stats.sigmaclip <https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.sigmaclip.html>`_ provides a subset of the functionality in this class. Also, its input data cannot be a masked array and it does not handle data that contains invalid values (i.e. NaN or inf). Also note that it uses the mean as the centering function. If your data is a `~numpy.ndarray` with no invalid values and you want to use the mean as the centering function with ``axis=None`` and iterate to convergence, then `scipy.stats.sigmaclip` is ~25-30% faster than the equivalent settings here (``sigma_clip(data, cenfunc='mean', maxiters=None, axis=None)``). Parameters ---------- data : array-like or `~numpy.ma.MaskedArray` The data to be sigma clipped. sigma : float, optional The number of standard deviations to use for both the lower and upper clipping limit. These limits are overridden by ``sigma_lower`` and ``sigma_upper``, if input. The default is 3. sigma_lower : float or `None`, optional The number of standard deviations to use as the lower bound for the clipping limit. If `None` then the value of ``sigma`` is used. The default is `None`. sigma_upper : float or `None`, optional The number of standard deviations to use as the upper bound for the clipping limit. If `None` then the value of ``sigma`` is used. The default is `None`. maxiters : int or `None`, optional The maximum number of sigma-clipping iterations to perform or `None` to clip until convergence is achieved (i.e., iterate until the last iteration clips nothing). If convergence is achieved prior to ``maxiters`` iterations, the clipping iterations will stop. The default is 5. cenfunc : {'median', 'mean'} or callable, optional The statistic or callable function/object used to compute the center value for the clipping. If set to ``'median'`` or ``'mean'`` then having the optional `bottleneck`_ package installed will result in the best performance. If using a callable function/object and the ``axis`` keyword is used, then it must be callable that can ignore NaNs (e.g. `numpy.nanmean`) and has an ``axis`` keyword to return an array with axis dimension(s) removed. The default is ``'median'``. .. _bottleneck: https://github.com/kwgoodman/bottleneck stdfunc : {'std'} or callable, optional The statistic or callable function/object used to compute the standard deviation about the center value. If set to ``'std'`` then having the optional `bottleneck`_ package installed will result in the best performance. If using a callable function/object and the ``axis`` keyword is used, then it must be callable that can ignore NaNs (e.g. `numpy.nanstd`) and has an ``axis`` keyword to return an array with axis dimension(s) removed. The default is ``'std'``. axis : `None` or int or tuple of int, optional The axis or axes along which to sigma clip the data. If `None`, then the flattened data will be used. ``axis`` is passed to the ``cenfunc`` and ``stdfunc``. The default is `None`. masked : bool, optional If `True`, then a `~numpy.ma.MaskedArray` is returned, where the mask is `True` for clipped values. If `False`, then a `~numpy.ndarray` and the minimum and maximum clipping thresholds are returned. The default is `True`. return_bounds : bool, optional If `True`, then the minimum and maximum clipping bounds are also returned. copy : bool, optional If `True`, then the ``data`` array will be copied. If `False` and ``masked=True``, then the returned masked array data will contain the same array as the input ``data`` (if ``data`` is a `~numpy.ndarray` or `~numpy.ma.MaskedArray`). The default is `True`. Returns ------- result : flexible If ``masked=True``, then a `~numpy.ma.MaskedArray` is returned, where the mask is `True` for clipped values. If ``masked=False``, then a `~numpy.ndarray` is returned. If ``return_bounds=True``, then in addition to the (masked) array above, the minimum and maximum clipping bounds are returned. If ``masked=False`` and ``axis=None``, then the output array is a flattened 1D `~numpy.ndarray` where the clipped values have been removed. If ``return_bounds=True`` then the returned minimum and maximum thresholds are scalars. If ``masked=False`` and ``axis`` is specified, then the output `~numpy.ndarray` will have the same shape as the input ``data`` and contain ``np.nan`` where values were clipped. If ``return_bounds=True`` then the returned minimum and maximum clipping thresholds will be be `~numpy.ndarray`\\s. See Also -------- SigmaClip, sigma_clipped_stats Examples -------- This example uses a data array of random variates from a Gaussian distribution. We clip all points that are more than 2 sample standard deviations from the median. The result is a masked array, where the mask is `True` for clipped data:: >>> from astropy.stats import sigma_clip >>> from numpy.random import randn >>> randvar = randn(10000) >>> filtered_data = sigma_clip(randvar, sigma=2, maxiters=5) This example clips all points that are more than 3 sigma relative to the sample *mean*, clips until convergence, returns an unmasked `~numpy.ndarray`, and does not copy the data:: >>> from astropy.stats import sigma_clip >>> from numpy.random import randn >>> from numpy import mean >>> randvar = randn(10000) >>> filtered_data = sigma_clip(randvar, sigma=3, maxiters=None, ... cenfunc=mean, masked=False, copy=False) This example sigma clips along one axis:: >>> from astropy.stats import sigma_clip >>> from numpy.random import normal >>> from numpy import arange, diag, ones >>> data = arange(5) + normal(0., 0.05, (5, 5)) + diag(ones(5)) >>> filtered_data = sigma_clip(data, sigma=2.3, axis=0) Note that along the other axis, no points would be clipped, as the standard deviation is higher. """ sigclip = SigmaClip(sigma=sigma, sigma_lower=sigma_lower, sigma_upper=sigma_upper, maxiters=maxiters, cenfunc=cenfunc, stdfunc=stdfunc) return sigclip(data, axis=axis, masked=masked, return_bounds=return_bounds, copy=copy)
python
def sigma_clip(data, sigma=3, sigma_lower=None, sigma_upper=None, maxiters=5, cenfunc='median', stdfunc='std', axis=None, masked=True, return_bounds=False, copy=True): """ Perform sigma-clipping on the provided data. The data will be iterated over, each time rejecting values that are less or more than a specified number of standard deviations from a center value. Clipped (rejected) pixels are those where:: data < cenfunc(data [,axis=int]) - (sigma_lower * stdfunc(data [,axis=int])) data > cenfunc(data [,axis=int]) + (sigma_upper * stdfunc(data [,axis=int])) Invalid data values (i.e. NaN or inf) are automatically clipped. For an object-oriented interface to sigma clipping, see :class:`SigmaClip`. .. note:: `scipy.stats.sigmaclip <https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.sigmaclip.html>`_ provides a subset of the functionality in this class. Also, its input data cannot be a masked array and it does not handle data that contains invalid values (i.e. NaN or inf). Also note that it uses the mean as the centering function. If your data is a `~numpy.ndarray` with no invalid values and you want to use the mean as the centering function with ``axis=None`` and iterate to convergence, then `scipy.stats.sigmaclip` is ~25-30% faster than the equivalent settings here (``sigma_clip(data, cenfunc='mean', maxiters=None, axis=None)``). Parameters ---------- data : array-like or `~numpy.ma.MaskedArray` The data to be sigma clipped. sigma : float, optional The number of standard deviations to use for both the lower and upper clipping limit. These limits are overridden by ``sigma_lower`` and ``sigma_upper``, if input. The default is 3. sigma_lower : float or `None`, optional The number of standard deviations to use as the lower bound for the clipping limit. If `None` then the value of ``sigma`` is used. The default is `None`. sigma_upper : float or `None`, optional The number of standard deviations to use as the upper bound for the clipping limit. If `None` then the value of ``sigma`` is used. The default is `None`. maxiters : int or `None`, optional The maximum number of sigma-clipping iterations to perform or `None` to clip until convergence is achieved (i.e., iterate until the last iteration clips nothing). If convergence is achieved prior to ``maxiters`` iterations, the clipping iterations will stop. The default is 5. cenfunc : {'median', 'mean'} or callable, optional The statistic or callable function/object used to compute the center value for the clipping. If set to ``'median'`` or ``'mean'`` then having the optional `bottleneck`_ package installed will result in the best performance. If using a callable function/object and the ``axis`` keyword is used, then it must be callable that can ignore NaNs (e.g. `numpy.nanmean`) and has an ``axis`` keyword to return an array with axis dimension(s) removed. The default is ``'median'``. .. _bottleneck: https://github.com/kwgoodman/bottleneck stdfunc : {'std'} or callable, optional The statistic or callable function/object used to compute the standard deviation about the center value. If set to ``'std'`` then having the optional `bottleneck`_ package installed will result in the best performance. If using a callable function/object and the ``axis`` keyword is used, then it must be callable that can ignore NaNs (e.g. `numpy.nanstd`) and has an ``axis`` keyword to return an array with axis dimension(s) removed. The default is ``'std'``. axis : `None` or int or tuple of int, optional The axis or axes along which to sigma clip the data. If `None`, then the flattened data will be used. ``axis`` is passed to the ``cenfunc`` and ``stdfunc``. The default is `None`. masked : bool, optional If `True`, then a `~numpy.ma.MaskedArray` is returned, where the mask is `True` for clipped values. If `False`, then a `~numpy.ndarray` and the minimum and maximum clipping thresholds are returned. The default is `True`. return_bounds : bool, optional If `True`, then the minimum and maximum clipping bounds are also returned. copy : bool, optional If `True`, then the ``data`` array will be copied. If `False` and ``masked=True``, then the returned masked array data will contain the same array as the input ``data`` (if ``data`` is a `~numpy.ndarray` or `~numpy.ma.MaskedArray`). The default is `True`. Returns ------- result : flexible If ``masked=True``, then a `~numpy.ma.MaskedArray` is returned, where the mask is `True` for clipped values. If ``masked=False``, then a `~numpy.ndarray` is returned. If ``return_bounds=True``, then in addition to the (masked) array above, the minimum and maximum clipping bounds are returned. If ``masked=False`` and ``axis=None``, then the output array is a flattened 1D `~numpy.ndarray` where the clipped values have been removed. If ``return_bounds=True`` then the returned minimum and maximum thresholds are scalars. If ``masked=False`` and ``axis`` is specified, then the output `~numpy.ndarray` will have the same shape as the input ``data`` and contain ``np.nan`` where values were clipped. If ``return_bounds=True`` then the returned minimum and maximum clipping thresholds will be be `~numpy.ndarray`\\s. See Also -------- SigmaClip, sigma_clipped_stats Examples -------- This example uses a data array of random variates from a Gaussian distribution. We clip all points that are more than 2 sample standard deviations from the median. The result is a masked array, where the mask is `True` for clipped data:: >>> from astropy.stats import sigma_clip >>> from numpy.random import randn >>> randvar = randn(10000) >>> filtered_data = sigma_clip(randvar, sigma=2, maxiters=5) This example clips all points that are more than 3 sigma relative to the sample *mean*, clips until convergence, returns an unmasked `~numpy.ndarray`, and does not copy the data:: >>> from astropy.stats import sigma_clip >>> from numpy.random import randn >>> from numpy import mean >>> randvar = randn(10000) >>> filtered_data = sigma_clip(randvar, sigma=3, maxiters=None, ... cenfunc=mean, masked=False, copy=False) This example sigma clips along one axis:: >>> from astropy.stats import sigma_clip >>> from numpy.random import normal >>> from numpy import arange, diag, ones >>> data = arange(5) + normal(0., 0.05, (5, 5)) + diag(ones(5)) >>> filtered_data = sigma_clip(data, sigma=2.3, axis=0) Note that along the other axis, no points would be clipped, as the standard deviation is higher. """ sigclip = SigmaClip(sigma=sigma, sigma_lower=sigma_lower, sigma_upper=sigma_upper, maxiters=maxiters, cenfunc=cenfunc, stdfunc=stdfunc) return sigclip(data, axis=axis, masked=masked, return_bounds=return_bounds, copy=copy)
[ "def", "sigma_clip", "(", "data", ",", "sigma", "=", "3", ",", "sigma_lower", "=", "None", ",", "sigma_upper", "=", "None", ",", "maxiters", "=", "5", ",", "cenfunc", "=", "'median'", ",", "stdfunc", "=", "'std'", ",", "axis", "=", "None", ",", "masked", "=", "True", ",", "return_bounds", "=", "False", ",", "copy", "=", "True", ")", ":", "sigclip", "=", "SigmaClip", "(", "sigma", "=", "sigma", ",", "sigma_lower", "=", "sigma_lower", ",", "sigma_upper", "=", "sigma_upper", ",", "maxiters", "=", "maxiters", ",", "cenfunc", "=", "cenfunc", ",", "stdfunc", "=", "stdfunc", ")", "return", "sigclip", "(", "data", ",", "axis", "=", "axis", ",", "masked", "=", "masked", ",", "return_bounds", "=", "return_bounds", ",", "copy", "=", "copy", ")" ]
Perform sigma-clipping on the provided data. The data will be iterated over, each time rejecting values that are less or more than a specified number of standard deviations from a center value. Clipped (rejected) pixels are those where:: data < cenfunc(data [,axis=int]) - (sigma_lower * stdfunc(data [,axis=int])) data > cenfunc(data [,axis=int]) + (sigma_upper * stdfunc(data [,axis=int])) Invalid data values (i.e. NaN or inf) are automatically clipped. For an object-oriented interface to sigma clipping, see :class:`SigmaClip`. .. note:: `scipy.stats.sigmaclip <https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.sigmaclip.html>`_ provides a subset of the functionality in this class. Also, its input data cannot be a masked array and it does not handle data that contains invalid values (i.e. NaN or inf). Also note that it uses the mean as the centering function. If your data is a `~numpy.ndarray` with no invalid values and you want to use the mean as the centering function with ``axis=None`` and iterate to convergence, then `scipy.stats.sigmaclip` is ~25-30% faster than the equivalent settings here (``sigma_clip(data, cenfunc='mean', maxiters=None, axis=None)``). Parameters ---------- data : array-like or `~numpy.ma.MaskedArray` The data to be sigma clipped. sigma : float, optional The number of standard deviations to use for both the lower and upper clipping limit. These limits are overridden by ``sigma_lower`` and ``sigma_upper``, if input. The default is 3. sigma_lower : float or `None`, optional The number of standard deviations to use as the lower bound for the clipping limit. If `None` then the value of ``sigma`` is used. The default is `None`. sigma_upper : float or `None`, optional The number of standard deviations to use as the upper bound for the clipping limit. If `None` then the value of ``sigma`` is used. The default is `None`. maxiters : int or `None`, optional The maximum number of sigma-clipping iterations to perform or `None` to clip until convergence is achieved (i.e., iterate until the last iteration clips nothing). If convergence is achieved prior to ``maxiters`` iterations, the clipping iterations will stop. The default is 5. cenfunc : {'median', 'mean'} or callable, optional The statistic or callable function/object used to compute the center value for the clipping. If set to ``'median'`` or ``'mean'`` then having the optional `bottleneck`_ package installed will result in the best performance. If using a callable function/object and the ``axis`` keyword is used, then it must be callable that can ignore NaNs (e.g. `numpy.nanmean`) and has an ``axis`` keyword to return an array with axis dimension(s) removed. The default is ``'median'``. .. _bottleneck: https://github.com/kwgoodman/bottleneck stdfunc : {'std'} or callable, optional The statistic or callable function/object used to compute the standard deviation about the center value. If set to ``'std'`` then having the optional `bottleneck`_ package installed will result in the best performance. If using a callable function/object and the ``axis`` keyword is used, then it must be callable that can ignore NaNs (e.g. `numpy.nanstd`) and has an ``axis`` keyword to return an array with axis dimension(s) removed. The default is ``'std'``. axis : `None` or int or tuple of int, optional The axis or axes along which to sigma clip the data. If `None`, then the flattened data will be used. ``axis`` is passed to the ``cenfunc`` and ``stdfunc``. The default is `None`. masked : bool, optional If `True`, then a `~numpy.ma.MaskedArray` is returned, where the mask is `True` for clipped values. If `False`, then a `~numpy.ndarray` and the minimum and maximum clipping thresholds are returned. The default is `True`. return_bounds : bool, optional If `True`, then the minimum and maximum clipping bounds are also returned. copy : bool, optional If `True`, then the ``data`` array will be copied. If `False` and ``masked=True``, then the returned masked array data will contain the same array as the input ``data`` (if ``data`` is a `~numpy.ndarray` or `~numpy.ma.MaskedArray`). The default is `True`. Returns ------- result : flexible If ``masked=True``, then a `~numpy.ma.MaskedArray` is returned, where the mask is `True` for clipped values. If ``masked=False``, then a `~numpy.ndarray` is returned. If ``return_bounds=True``, then in addition to the (masked) array above, the minimum and maximum clipping bounds are returned. If ``masked=False`` and ``axis=None``, then the output array is a flattened 1D `~numpy.ndarray` where the clipped values have been removed. If ``return_bounds=True`` then the returned minimum and maximum thresholds are scalars. If ``masked=False`` and ``axis`` is specified, then the output `~numpy.ndarray` will have the same shape as the input ``data`` and contain ``np.nan`` where values were clipped. If ``return_bounds=True`` then the returned minimum and maximum clipping thresholds will be be `~numpy.ndarray`\\s. See Also -------- SigmaClip, sigma_clipped_stats Examples -------- This example uses a data array of random variates from a Gaussian distribution. We clip all points that are more than 2 sample standard deviations from the median. The result is a masked array, where the mask is `True` for clipped data:: >>> from astropy.stats import sigma_clip >>> from numpy.random import randn >>> randvar = randn(10000) >>> filtered_data = sigma_clip(randvar, sigma=2, maxiters=5) This example clips all points that are more than 3 sigma relative to the sample *mean*, clips until convergence, returns an unmasked `~numpy.ndarray`, and does not copy the data:: >>> from astropy.stats import sigma_clip >>> from numpy.random import randn >>> from numpy import mean >>> randvar = randn(10000) >>> filtered_data = sigma_clip(randvar, sigma=3, maxiters=None, ... cenfunc=mean, masked=False, copy=False) This example sigma clips along one axis:: >>> from astropy.stats import sigma_clip >>> from numpy.random import normal >>> from numpy import arange, diag, ones >>> data = arange(5) + normal(0., 0.05, (5, 5)) + diag(ones(5)) >>> filtered_data = sigma_clip(data, sigma=2.3, axis=0) Note that along the other axis, no points would be clipped, as the standard deviation is higher.
[ "Perform", "sigma", "-", "clipping", "on", "the", "provided", "data", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/extern/sigma_clipping.py#L467-L640
10,502
astropy/photutils
photutils/extern/sigma_clipping.py
sigma_clipped_stats
def sigma_clipped_stats(data, mask=None, mask_value=None, sigma=3.0, sigma_lower=None, sigma_upper=None, maxiters=5, cenfunc='median', stdfunc='std', std_ddof=0, axis=None): """ Calculate sigma-clipped statistics on the provided data. Parameters ---------- data : array-like or `~numpy.ma.MaskedArray` Data array or object that can be converted to an array. mask : `numpy.ndarray` (bool), optional A boolean mask with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked pixels are excluded when computing the statistics. mask_value : float, optional A data value (e.g., ``0.0``) that is ignored when computing the statistics. ``mask_value`` will be masked in addition to any input ``mask``. sigma : float, optional The number of standard deviations to use for both the lower and upper clipping limit. These limits are overridden by ``sigma_lower`` and ``sigma_upper``, if input. The default is 3. sigma_lower : float or `None`, optional The number of standard deviations to use as the lower bound for the clipping limit. If `None` then the value of ``sigma`` is used. The default is `None`. sigma_upper : float or `None`, optional The number of standard deviations to use as the upper bound for the clipping limit. If `None` then the value of ``sigma`` is used. The default is `None`. maxiters : int or `None`, optional The maximum number of sigma-clipping iterations to perform or `None` to clip until convergence is achieved (i.e., iterate until the last iteration clips nothing). If convergence is achieved prior to ``maxiters`` iterations, the clipping iterations will stop. The default is 5. cenfunc : {'median', 'mean'} or callable, optional The statistic or callable function/object used to compute the center value for the clipping. If set to ``'median'`` or ``'mean'`` then having the optional `bottleneck`_ package installed will result in the best performance. If using a callable function/object and the ``axis`` keyword is used, then it must be callable that can ignore NaNs (e.g. `numpy.nanmean`) and has an ``axis`` keyword to return an array with axis dimension(s) removed. The default is ``'median'``. .. _bottleneck: https://github.com/kwgoodman/bottleneck stdfunc : {'std'} or callable, optional The statistic or callable function/object used to compute the standard deviation about the center value. If set to ``'std'`` then having the optional `bottleneck`_ package installed will result in the best performance. If using a callable function/object and the ``axis`` keyword is used, then it must be callable that can ignore NaNs (e.g. `numpy.nanstd`) and has an ``axis`` keyword to return an array with axis dimension(s) removed. The default is ``'std'``. std_ddof : int, optional The delta degrees of freedom for the standard deviation calculation. The divisor used in the calculation is ``N - std_ddof``, where ``N`` represents the number of elements. The default is 0. axis : `None` or int or tuple of int, optional The axis or axes along which to sigma clip the data. If `None`, then the flattened data will be used. ``axis`` is passed to the ``cenfunc`` and ``stdfunc``. The default is `None`. Returns ------- mean, median, stddev : float The mean, median, and standard deviation of the sigma-clipped data. See Also -------- SigmaClip, sigma_clip """ if mask is not None: data = np.ma.MaskedArray(data, mask) if mask_value is not None: data = np.ma.masked_values(data, mask_value) sigclip = SigmaClip(sigma=sigma, sigma_lower=sigma_lower, sigma_upper=sigma_upper, maxiters=maxiters, cenfunc=cenfunc, stdfunc=stdfunc) data_clipped = sigclip(data, axis=axis, masked=False, return_bounds=False, copy=False) if HAS_BOTTLENECK: mean = _nanmean(data_clipped, axis=axis) median = _nanmedian(data_clipped, axis=axis) std = _nanstd(data_clipped, ddof=std_ddof, axis=axis) else: # pragma: no cover mean = np.nanmean(data_clipped, axis=axis) median = np.nanmedian(data_clipped, axis=axis) std = np.nanstd(data_clipped, ddof=std_ddof, axis=axis) return mean, median, std
python
def sigma_clipped_stats(data, mask=None, mask_value=None, sigma=3.0, sigma_lower=None, sigma_upper=None, maxiters=5, cenfunc='median', stdfunc='std', std_ddof=0, axis=None): """ Calculate sigma-clipped statistics on the provided data. Parameters ---------- data : array-like or `~numpy.ma.MaskedArray` Data array or object that can be converted to an array. mask : `numpy.ndarray` (bool), optional A boolean mask with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked pixels are excluded when computing the statistics. mask_value : float, optional A data value (e.g., ``0.0``) that is ignored when computing the statistics. ``mask_value`` will be masked in addition to any input ``mask``. sigma : float, optional The number of standard deviations to use for both the lower and upper clipping limit. These limits are overridden by ``sigma_lower`` and ``sigma_upper``, if input. The default is 3. sigma_lower : float or `None`, optional The number of standard deviations to use as the lower bound for the clipping limit. If `None` then the value of ``sigma`` is used. The default is `None`. sigma_upper : float or `None`, optional The number of standard deviations to use as the upper bound for the clipping limit. If `None` then the value of ``sigma`` is used. The default is `None`. maxiters : int or `None`, optional The maximum number of sigma-clipping iterations to perform or `None` to clip until convergence is achieved (i.e., iterate until the last iteration clips nothing). If convergence is achieved prior to ``maxiters`` iterations, the clipping iterations will stop. The default is 5. cenfunc : {'median', 'mean'} or callable, optional The statistic or callable function/object used to compute the center value for the clipping. If set to ``'median'`` or ``'mean'`` then having the optional `bottleneck`_ package installed will result in the best performance. If using a callable function/object and the ``axis`` keyword is used, then it must be callable that can ignore NaNs (e.g. `numpy.nanmean`) and has an ``axis`` keyword to return an array with axis dimension(s) removed. The default is ``'median'``. .. _bottleneck: https://github.com/kwgoodman/bottleneck stdfunc : {'std'} or callable, optional The statistic or callable function/object used to compute the standard deviation about the center value. If set to ``'std'`` then having the optional `bottleneck`_ package installed will result in the best performance. If using a callable function/object and the ``axis`` keyword is used, then it must be callable that can ignore NaNs (e.g. `numpy.nanstd`) and has an ``axis`` keyword to return an array with axis dimension(s) removed. The default is ``'std'``. std_ddof : int, optional The delta degrees of freedom for the standard deviation calculation. The divisor used in the calculation is ``N - std_ddof``, where ``N`` represents the number of elements. The default is 0. axis : `None` or int or tuple of int, optional The axis or axes along which to sigma clip the data. If `None`, then the flattened data will be used. ``axis`` is passed to the ``cenfunc`` and ``stdfunc``. The default is `None`. Returns ------- mean, median, stddev : float The mean, median, and standard deviation of the sigma-clipped data. See Also -------- SigmaClip, sigma_clip """ if mask is not None: data = np.ma.MaskedArray(data, mask) if mask_value is not None: data = np.ma.masked_values(data, mask_value) sigclip = SigmaClip(sigma=sigma, sigma_lower=sigma_lower, sigma_upper=sigma_upper, maxiters=maxiters, cenfunc=cenfunc, stdfunc=stdfunc) data_clipped = sigclip(data, axis=axis, masked=False, return_bounds=False, copy=False) if HAS_BOTTLENECK: mean = _nanmean(data_clipped, axis=axis) median = _nanmedian(data_clipped, axis=axis) std = _nanstd(data_clipped, ddof=std_ddof, axis=axis) else: # pragma: no cover mean = np.nanmean(data_clipped, axis=axis) median = np.nanmedian(data_clipped, axis=axis) std = np.nanstd(data_clipped, ddof=std_ddof, axis=axis) return mean, median, std
[ "def", "sigma_clipped_stats", "(", "data", ",", "mask", "=", "None", ",", "mask_value", "=", "None", ",", "sigma", "=", "3.0", ",", "sigma_lower", "=", "None", ",", "sigma_upper", "=", "None", ",", "maxiters", "=", "5", ",", "cenfunc", "=", "'median'", ",", "stdfunc", "=", "'std'", ",", "std_ddof", "=", "0", ",", "axis", "=", "None", ")", ":", "if", "mask", "is", "not", "None", ":", "data", "=", "np", ".", "ma", ".", "MaskedArray", "(", "data", ",", "mask", ")", "if", "mask_value", "is", "not", "None", ":", "data", "=", "np", ".", "ma", ".", "masked_values", "(", "data", ",", "mask_value", ")", "sigclip", "=", "SigmaClip", "(", "sigma", "=", "sigma", ",", "sigma_lower", "=", "sigma_lower", ",", "sigma_upper", "=", "sigma_upper", ",", "maxiters", "=", "maxiters", ",", "cenfunc", "=", "cenfunc", ",", "stdfunc", "=", "stdfunc", ")", "data_clipped", "=", "sigclip", "(", "data", ",", "axis", "=", "axis", ",", "masked", "=", "False", ",", "return_bounds", "=", "False", ",", "copy", "=", "False", ")", "if", "HAS_BOTTLENECK", ":", "mean", "=", "_nanmean", "(", "data_clipped", ",", "axis", "=", "axis", ")", "median", "=", "_nanmedian", "(", "data_clipped", ",", "axis", "=", "axis", ")", "std", "=", "_nanstd", "(", "data_clipped", ",", "ddof", "=", "std_ddof", ",", "axis", "=", "axis", ")", "else", ":", "# pragma: no cover", "mean", "=", "np", ".", "nanmean", "(", "data_clipped", ",", "axis", "=", "axis", ")", "median", "=", "np", ".", "nanmedian", "(", "data_clipped", ",", "axis", "=", "axis", ")", "std", "=", "np", ".", "nanstd", "(", "data_clipped", ",", "ddof", "=", "std_ddof", ",", "axis", "=", "axis", ")", "return", "mean", ",", "median", ",", "std" ]
Calculate sigma-clipped statistics on the provided data. Parameters ---------- data : array-like or `~numpy.ma.MaskedArray` Data array or object that can be converted to an array. mask : `numpy.ndarray` (bool), optional A boolean mask with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked pixels are excluded when computing the statistics. mask_value : float, optional A data value (e.g., ``0.0``) that is ignored when computing the statistics. ``mask_value`` will be masked in addition to any input ``mask``. sigma : float, optional The number of standard deviations to use for both the lower and upper clipping limit. These limits are overridden by ``sigma_lower`` and ``sigma_upper``, if input. The default is 3. sigma_lower : float or `None`, optional The number of standard deviations to use as the lower bound for the clipping limit. If `None` then the value of ``sigma`` is used. The default is `None`. sigma_upper : float or `None`, optional The number of standard deviations to use as the upper bound for the clipping limit. If `None` then the value of ``sigma`` is used. The default is `None`. maxiters : int or `None`, optional The maximum number of sigma-clipping iterations to perform or `None` to clip until convergence is achieved (i.e., iterate until the last iteration clips nothing). If convergence is achieved prior to ``maxiters`` iterations, the clipping iterations will stop. The default is 5. cenfunc : {'median', 'mean'} or callable, optional The statistic or callable function/object used to compute the center value for the clipping. If set to ``'median'`` or ``'mean'`` then having the optional `bottleneck`_ package installed will result in the best performance. If using a callable function/object and the ``axis`` keyword is used, then it must be callable that can ignore NaNs (e.g. `numpy.nanmean`) and has an ``axis`` keyword to return an array with axis dimension(s) removed. The default is ``'median'``. .. _bottleneck: https://github.com/kwgoodman/bottleneck stdfunc : {'std'} or callable, optional The statistic or callable function/object used to compute the standard deviation about the center value. If set to ``'std'`` then having the optional `bottleneck`_ package installed will result in the best performance. If using a callable function/object and the ``axis`` keyword is used, then it must be callable that can ignore NaNs (e.g. `numpy.nanstd`) and has an ``axis`` keyword to return an array with axis dimension(s) removed. The default is ``'std'``. std_ddof : int, optional The delta degrees of freedom for the standard deviation calculation. The divisor used in the calculation is ``N - std_ddof``, where ``N`` represents the number of elements. The default is 0. axis : `None` or int or tuple of int, optional The axis or axes along which to sigma clip the data. If `None`, then the flattened data will be used. ``axis`` is passed to the ``cenfunc`` and ``stdfunc``. The default is `None`. Returns ------- mean, median, stddev : float The mean, median, and standard deviation of the sigma-clipped data. See Also -------- SigmaClip, sigma_clip
[ "Calculate", "sigma", "-", "clipped", "statistics", "on", "the", "provided", "data", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/extern/sigma_clipping.py#L644-L753
10,503
astropy/photutils
photutils/extern/sigma_clipping.py
SigmaClip._sigmaclip_noaxis
def _sigmaclip_noaxis(self, data, masked=True, return_bounds=False, copy=True): """ Sigma clip the data when ``axis`` is None. In this simple case, we remove clipped elements from the flattened array during each iteration. """ filtered_data = data.ravel() # remove masked values and convert to ndarray if isinstance(filtered_data, np.ma.MaskedArray): filtered_data = filtered_data.data[~filtered_data.mask] # remove invalid values good_mask = np.isfinite(filtered_data) if np.any(~good_mask): filtered_data = filtered_data[good_mask] warnings.warn('Input data contains invalid values (NaNs or ' 'infs), which were automatically clipped.', AstropyUserWarning) nchanged = 1 iteration = 0 while nchanged != 0 and (iteration < self.maxiters): iteration += 1 size = filtered_data.size self._compute_bounds(filtered_data, axis=None) filtered_data = filtered_data[(filtered_data >= self._min_value) & (filtered_data <= self._max_value)] nchanged = size - filtered_data.size self._niterations = iteration if masked: # return a masked array and optional bounds filtered_data = np.ma.masked_invalid(data, copy=copy) # update the mask in place, ignoring RuntimeWarnings for # comparisons with NaN data values with np.errstate(invalid='ignore'): filtered_data.mask |= np.logical_or(data < self._min_value, data > self._max_value) if return_bounds: return filtered_data, self._min_value, self._max_value else: return filtered_data
python
def _sigmaclip_noaxis(self, data, masked=True, return_bounds=False, copy=True): """ Sigma clip the data when ``axis`` is None. In this simple case, we remove clipped elements from the flattened array during each iteration. """ filtered_data = data.ravel() # remove masked values and convert to ndarray if isinstance(filtered_data, np.ma.MaskedArray): filtered_data = filtered_data.data[~filtered_data.mask] # remove invalid values good_mask = np.isfinite(filtered_data) if np.any(~good_mask): filtered_data = filtered_data[good_mask] warnings.warn('Input data contains invalid values (NaNs or ' 'infs), which were automatically clipped.', AstropyUserWarning) nchanged = 1 iteration = 0 while nchanged != 0 and (iteration < self.maxiters): iteration += 1 size = filtered_data.size self._compute_bounds(filtered_data, axis=None) filtered_data = filtered_data[(filtered_data >= self._min_value) & (filtered_data <= self._max_value)] nchanged = size - filtered_data.size self._niterations = iteration if masked: # return a masked array and optional bounds filtered_data = np.ma.masked_invalid(data, copy=copy) # update the mask in place, ignoring RuntimeWarnings for # comparisons with NaN data values with np.errstate(invalid='ignore'): filtered_data.mask |= np.logical_or(data < self._min_value, data > self._max_value) if return_bounds: return filtered_data, self._min_value, self._max_value else: return filtered_data
[ "def", "_sigmaclip_noaxis", "(", "self", ",", "data", ",", "masked", "=", "True", ",", "return_bounds", "=", "False", ",", "copy", "=", "True", ")", ":", "filtered_data", "=", "data", ".", "ravel", "(", ")", "# remove masked values and convert to ndarray", "if", "isinstance", "(", "filtered_data", ",", "np", ".", "ma", ".", "MaskedArray", ")", ":", "filtered_data", "=", "filtered_data", ".", "data", "[", "~", "filtered_data", ".", "mask", "]", "# remove invalid values", "good_mask", "=", "np", ".", "isfinite", "(", "filtered_data", ")", "if", "np", ".", "any", "(", "~", "good_mask", ")", ":", "filtered_data", "=", "filtered_data", "[", "good_mask", "]", "warnings", ".", "warn", "(", "'Input data contains invalid values (NaNs or '", "'infs), which were automatically clipped.'", ",", "AstropyUserWarning", ")", "nchanged", "=", "1", "iteration", "=", "0", "while", "nchanged", "!=", "0", "and", "(", "iteration", "<", "self", ".", "maxiters", ")", ":", "iteration", "+=", "1", "size", "=", "filtered_data", ".", "size", "self", ".", "_compute_bounds", "(", "filtered_data", ",", "axis", "=", "None", ")", "filtered_data", "=", "filtered_data", "[", "(", "filtered_data", ">=", "self", ".", "_min_value", ")", "&", "(", "filtered_data", "<=", "self", ".", "_max_value", ")", "]", "nchanged", "=", "size", "-", "filtered_data", ".", "size", "self", ".", "_niterations", "=", "iteration", "if", "masked", ":", "# return a masked array and optional bounds", "filtered_data", "=", "np", ".", "ma", ".", "masked_invalid", "(", "data", ",", "copy", "=", "copy", ")", "# update the mask in place, ignoring RuntimeWarnings for", "# comparisons with NaN data values", "with", "np", ".", "errstate", "(", "invalid", "=", "'ignore'", ")", ":", "filtered_data", ".", "mask", "|=", "np", ".", "logical_or", "(", "data", "<", "self", ".", "_min_value", ",", "data", ">", "self", ".", "_max_value", ")", "if", "return_bounds", ":", "return", "filtered_data", ",", "self", ".", "_min_value", ",", "self", ".", "_max_value", "else", ":", "return", "filtered_data" ]
Sigma clip the data when ``axis`` is None. In this simple case, we remove clipped elements from the flattened array during each iteration.
[ "Sigma", "clip", "the", "data", "when", "axis", "is", "None", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/extern/sigma_clipping.py#L265-L313
10,504
astropy/photutils
photutils/extern/sigma_clipping.py
SigmaClip._sigmaclip_withaxis
def _sigmaclip_withaxis(self, data, axis=None, masked=True, return_bounds=False, copy=True): """ Sigma clip the data when ``axis`` is specified. In this case, we replace clipped values with NaNs as placeholder values. """ # float array type is needed to insert nans into the array filtered_data = data.astype(float) # also makes a copy # remove invalid values bad_mask = ~np.isfinite(filtered_data) if np.any(bad_mask): filtered_data[bad_mask] = np.nan warnings.warn('Input data contains invalid values (NaNs or ' 'infs), which were automatically clipped.', AstropyUserWarning) # remove masked values and convert to plain ndarray if isinstance(filtered_data, np.ma.MaskedArray): filtered_data = np.ma.masked_invalid(filtered_data).astype(float) filtered_data = filtered_data.filled(np.nan) # convert negative axis/axes if not isiterable(axis): axis = (axis,) axis = tuple(filtered_data.ndim + n if n < 0 else n for n in axis) # define the shape of min/max arrays so that they can be broadcast # with the data mshape = tuple(1 if dim in axis else size for dim, size in enumerate(filtered_data.shape)) nchanged = 1 iteration = 0 while nchanged != 0 and (iteration < self.maxiters): iteration += 1 n_nan = np.count_nonzero(np.isnan(filtered_data)) self._compute_bounds(filtered_data, axis=axis) if not np.isscalar(self._min_value): self._min_value = self._min_value.reshape(mshape) self._max_value = self._max_value.reshape(mshape) with np.errstate(invalid='ignore'): filtered_data[(filtered_data < self._min_value) | (filtered_data > self._max_value)] = np.nan nchanged = n_nan - np.count_nonzero(np.isnan(filtered_data)) self._niterations = iteration if masked: # create an output masked array if copy: filtered_data = np.ma.masked_invalid(filtered_data) else: # ignore RuntimeWarnings for comparisons with NaN data values with np.errstate(invalid='ignore'): out = np.ma.masked_invalid(data, copy=False) filtered_data = np.ma.masked_where(np.logical_or( out < self._min_value, out > self._max_value), out, copy=False) if return_bounds: return filtered_data, self._min_value, self._max_value else: return filtered_data
python
def _sigmaclip_withaxis(self, data, axis=None, masked=True, return_bounds=False, copy=True): """ Sigma clip the data when ``axis`` is specified. In this case, we replace clipped values with NaNs as placeholder values. """ # float array type is needed to insert nans into the array filtered_data = data.astype(float) # also makes a copy # remove invalid values bad_mask = ~np.isfinite(filtered_data) if np.any(bad_mask): filtered_data[bad_mask] = np.nan warnings.warn('Input data contains invalid values (NaNs or ' 'infs), which were automatically clipped.', AstropyUserWarning) # remove masked values and convert to plain ndarray if isinstance(filtered_data, np.ma.MaskedArray): filtered_data = np.ma.masked_invalid(filtered_data).astype(float) filtered_data = filtered_data.filled(np.nan) # convert negative axis/axes if not isiterable(axis): axis = (axis,) axis = tuple(filtered_data.ndim + n if n < 0 else n for n in axis) # define the shape of min/max arrays so that they can be broadcast # with the data mshape = tuple(1 if dim in axis else size for dim, size in enumerate(filtered_data.shape)) nchanged = 1 iteration = 0 while nchanged != 0 and (iteration < self.maxiters): iteration += 1 n_nan = np.count_nonzero(np.isnan(filtered_data)) self._compute_bounds(filtered_data, axis=axis) if not np.isscalar(self._min_value): self._min_value = self._min_value.reshape(mshape) self._max_value = self._max_value.reshape(mshape) with np.errstate(invalid='ignore'): filtered_data[(filtered_data < self._min_value) | (filtered_data > self._max_value)] = np.nan nchanged = n_nan - np.count_nonzero(np.isnan(filtered_data)) self._niterations = iteration if masked: # create an output masked array if copy: filtered_data = np.ma.masked_invalid(filtered_data) else: # ignore RuntimeWarnings for comparisons with NaN data values with np.errstate(invalid='ignore'): out = np.ma.masked_invalid(data, copy=False) filtered_data = np.ma.masked_where(np.logical_or( out < self._min_value, out > self._max_value), out, copy=False) if return_bounds: return filtered_data, self._min_value, self._max_value else: return filtered_data
[ "def", "_sigmaclip_withaxis", "(", "self", ",", "data", ",", "axis", "=", "None", ",", "masked", "=", "True", ",", "return_bounds", "=", "False", ",", "copy", "=", "True", ")", ":", "# float array type is needed to insert nans into the array", "filtered_data", "=", "data", ".", "astype", "(", "float", ")", "# also makes a copy", "# remove invalid values", "bad_mask", "=", "~", "np", ".", "isfinite", "(", "filtered_data", ")", "if", "np", ".", "any", "(", "bad_mask", ")", ":", "filtered_data", "[", "bad_mask", "]", "=", "np", ".", "nan", "warnings", ".", "warn", "(", "'Input data contains invalid values (NaNs or '", "'infs), which were automatically clipped.'", ",", "AstropyUserWarning", ")", "# remove masked values and convert to plain ndarray", "if", "isinstance", "(", "filtered_data", ",", "np", ".", "ma", ".", "MaskedArray", ")", ":", "filtered_data", "=", "np", ".", "ma", ".", "masked_invalid", "(", "filtered_data", ")", ".", "astype", "(", "float", ")", "filtered_data", "=", "filtered_data", ".", "filled", "(", "np", ".", "nan", ")", "# convert negative axis/axes", "if", "not", "isiterable", "(", "axis", ")", ":", "axis", "=", "(", "axis", ",", ")", "axis", "=", "tuple", "(", "filtered_data", ".", "ndim", "+", "n", "if", "n", "<", "0", "else", "n", "for", "n", "in", "axis", ")", "# define the shape of min/max arrays so that they can be broadcast", "# with the data", "mshape", "=", "tuple", "(", "1", "if", "dim", "in", "axis", "else", "size", "for", "dim", ",", "size", "in", "enumerate", "(", "filtered_data", ".", "shape", ")", ")", "nchanged", "=", "1", "iteration", "=", "0", "while", "nchanged", "!=", "0", "and", "(", "iteration", "<", "self", ".", "maxiters", ")", ":", "iteration", "+=", "1", "n_nan", "=", "np", ".", "count_nonzero", "(", "np", ".", "isnan", "(", "filtered_data", ")", ")", "self", ".", "_compute_bounds", "(", "filtered_data", ",", "axis", "=", "axis", ")", "if", "not", "np", ".", "isscalar", "(", "self", ".", "_min_value", ")", ":", "self", ".", "_min_value", "=", "self", ".", "_min_value", ".", "reshape", "(", "mshape", ")", "self", ".", "_max_value", "=", "self", ".", "_max_value", ".", "reshape", "(", "mshape", ")", "with", "np", ".", "errstate", "(", "invalid", "=", "'ignore'", ")", ":", "filtered_data", "[", "(", "filtered_data", "<", "self", ".", "_min_value", ")", "|", "(", "filtered_data", ">", "self", ".", "_max_value", ")", "]", "=", "np", ".", "nan", "nchanged", "=", "n_nan", "-", "np", ".", "count_nonzero", "(", "np", ".", "isnan", "(", "filtered_data", ")", ")", "self", ".", "_niterations", "=", "iteration", "if", "masked", ":", "# create an output masked array", "if", "copy", ":", "filtered_data", "=", "np", ".", "ma", ".", "masked_invalid", "(", "filtered_data", ")", "else", ":", "# ignore RuntimeWarnings for comparisons with NaN data values", "with", "np", ".", "errstate", "(", "invalid", "=", "'ignore'", ")", ":", "out", "=", "np", ".", "ma", ".", "masked_invalid", "(", "data", ",", "copy", "=", "False", ")", "filtered_data", "=", "np", ".", "ma", ".", "masked_where", "(", "np", ".", "logical_or", "(", "out", "<", "self", ".", "_min_value", ",", "out", ">", "self", ".", "_max_value", ")", ",", "out", ",", "copy", "=", "False", ")", "if", "return_bounds", ":", "return", "filtered_data", ",", "self", ".", "_min_value", ",", "self", ".", "_max_value", "else", ":", "return", "filtered_data" ]
Sigma clip the data when ``axis`` is specified. In this case, we replace clipped values with NaNs as placeholder values.
[ "Sigma", "clip", "the", "data", "when", "axis", "is", "specified", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/extern/sigma_clipping.py#L315-L385
10,505
astropy/photutils
photutils/aperture/core.py
PixelAperture.do_photometry
def do_photometry(self, data, error=None, mask=None, method='exact', subpixels=5, unit=None): """ Perform aperture photometry on the input data. Parameters ---------- data : array_like or `~astropy.units.Quantity` instance The 2D array on which to perform photometry. ``data`` should be background subtracted. error : array_like or `~astropy.units.Quantity`, optional The pixel-wise Gaussian 1-sigma errors of the input ``data``. ``error`` is assumed to include *all* sources of error, including the Poisson error of the sources (see `~photutils.utils.calc_total_error`) . ``error`` must have the same shape as the input ``data``. mask : array_like (bool), optional A boolean mask with the same shape as ``data`` where a `True` value indicates the corresponding element of ``data`` is masked. Masked data are excluded from all calculations. method : {'exact', 'center', 'subpixel'}, optional The method used to determine the overlap of the aperture on the pixel grid. Not all options are available for all aperture types. Note that the more precise methods are generally slower. The following methods are available: * ``'exact'`` (default): The the exact fractional overlap of the aperture and each pixel is calculated. The returned mask will contain values between 0 and 1. * ``'center'``: A pixel is considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. The returned mask will contain values only of 0 (out) and 1 (in). * ``'subpixel'`` A pixel is divided into subpixels (see the ``subpixels`` keyword), each of which are considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. If ``subpixels=1``, this method is equivalent to ``'center'``. The returned mask will contain values between 0 and 1. subpixels : int, optional For the ``'subpixel'`` method, resample pixels by this factor in each dimension. That is, each pixel is divided into ``subpixels ** 2`` subpixels. unit : `~astropy.units.UnitBase` object or str, optional An object that represents the unit associated with the input ``data`` and ``error`` arrays. Must be a `~astropy.units.UnitBase` object or a string parseable by the :mod:`~astropy.units` package. If ``data`` or ``error`` already have a different unit, the input ``unit`` will not be used and a warning will be raised. Returns ------- aperture_sums : `~numpy.ndarray` or `~astropy.units.Quantity` The sums within each aperture. aperture_sum_errs : `~numpy.ndarray` or `~astropy.units.Quantity` The errors on the sums within each aperture. """ data = np.asanyarray(data) if mask is not None: mask = np.asanyarray(mask) data = copy.deepcopy(data) # do not modify input data data[mask] = 0 if error is not None: # do not modify input data error = copy.deepcopy(np.asanyarray(error)) error[mask] = 0. aperture_sums = [] aperture_sum_errs = [] for mask in self.to_mask(method=method, subpixels=subpixels): data_cutout = mask.cutout(data) if data_cutout is None: aperture_sums.append(np.nan) else: aperture_sums.append(np.sum(data_cutout * mask.data)) if error is not None: error_cutout = mask.cutout(error) if error_cutout is None: aperture_sum_errs.append(np.nan) else: aperture_var = np.sum(error_cutout ** 2 * mask.data) aperture_sum_errs.append(np.sqrt(aperture_var)) # handle Quantity objects and input units aperture_sums = self._prepare_photometry_output(aperture_sums, unit=unit) aperture_sum_errs = self._prepare_photometry_output(aperture_sum_errs, unit=unit) return aperture_sums, aperture_sum_errs
python
def do_photometry(self, data, error=None, mask=None, method='exact', subpixels=5, unit=None): """ Perform aperture photometry on the input data. Parameters ---------- data : array_like or `~astropy.units.Quantity` instance The 2D array on which to perform photometry. ``data`` should be background subtracted. error : array_like or `~astropy.units.Quantity`, optional The pixel-wise Gaussian 1-sigma errors of the input ``data``. ``error`` is assumed to include *all* sources of error, including the Poisson error of the sources (see `~photutils.utils.calc_total_error`) . ``error`` must have the same shape as the input ``data``. mask : array_like (bool), optional A boolean mask with the same shape as ``data`` where a `True` value indicates the corresponding element of ``data`` is masked. Masked data are excluded from all calculations. method : {'exact', 'center', 'subpixel'}, optional The method used to determine the overlap of the aperture on the pixel grid. Not all options are available for all aperture types. Note that the more precise methods are generally slower. The following methods are available: * ``'exact'`` (default): The the exact fractional overlap of the aperture and each pixel is calculated. The returned mask will contain values between 0 and 1. * ``'center'``: A pixel is considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. The returned mask will contain values only of 0 (out) and 1 (in). * ``'subpixel'`` A pixel is divided into subpixels (see the ``subpixels`` keyword), each of which are considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. If ``subpixels=1``, this method is equivalent to ``'center'``. The returned mask will contain values between 0 and 1. subpixels : int, optional For the ``'subpixel'`` method, resample pixels by this factor in each dimension. That is, each pixel is divided into ``subpixels ** 2`` subpixels. unit : `~astropy.units.UnitBase` object or str, optional An object that represents the unit associated with the input ``data`` and ``error`` arrays. Must be a `~astropy.units.UnitBase` object or a string parseable by the :mod:`~astropy.units` package. If ``data`` or ``error`` already have a different unit, the input ``unit`` will not be used and a warning will be raised. Returns ------- aperture_sums : `~numpy.ndarray` or `~astropy.units.Quantity` The sums within each aperture. aperture_sum_errs : `~numpy.ndarray` or `~astropy.units.Quantity` The errors on the sums within each aperture. """ data = np.asanyarray(data) if mask is not None: mask = np.asanyarray(mask) data = copy.deepcopy(data) # do not modify input data data[mask] = 0 if error is not None: # do not modify input data error = copy.deepcopy(np.asanyarray(error)) error[mask] = 0. aperture_sums = [] aperture_sum_errs = [] for mask in self.to_mask(method=method, subpixels=subpixels): data_cutout = mask.cutout(data) if data_cutout is None: aperture_sums.append(np.nan) else: aperture_sums.append(np.sum(data_cutout * mask.data)) if error is not None: error_cutout = mask.cutout(error) if error_cutout is None: aperture_sum_errs.append(np.nan) else: aperture_var = np.sum(error_cutout ** 2 * mask.data) aperture_sum_errs.append(np.sqrt(aperture_var)) # handle Quantity objects and input units aperture_sums = self._prepare_photometry_output(aperture_sums, unit=unit) aperture_sum_errs = self._prepare_photometry_output(aperture_sum_errs, unit=unit) return aperture_sums, aperture_sum_errs
[ "def", "do_photometry", "(", "self", ",", "data", ",", "error", "=", "None", ",", "mask", "=", "None", ",", "method", "=", "'exact'", ",", "subpixels", "=", "5", ",", "unit", "=", "None", ")", ":", "data", "=", "np", ".", "asanyarray", "(", "data", ")", "if", "mask", "is", "not", "None", ":", "mask", "=", "np", ".", "asanyarray", "(", "mask", ")", "data", "=", "copy", ".", "deepcopy", "(", "data", ")", "# do not modify input data", "data", "[", "mask", "]", "=", "0", "if", "error", "is", "not", "None", ":", "# do not modify input data", "error", "=", "copy", ".", "deepcopy", "(", "np", ".", "asanyarray", "(", "error", ")", ")", "error", "[", "mask", "]", "=", "0.", "aperture_sums", "=", "[", "]", "aperture_sum_errs", "=", "[", "]", "for", "mask", "in", "self", ".", "to_mask", "(", "method", "=", "method", ",", "subpixels", "=", "subpixels", ")", ":", "data_cutout", "=", "mask", ".", "cutout", "(", "data", ")", "if", "data_cutout", "is", "None", ":", "aperture_sums", ".", "append", "(", "np", ".", "nan", ")", "else", ":", "aperture_sums", ".", "append", "(", "np", ".", "sum", "(", "data_cutout", "*", "mask", ".", "data", ")", ")", "if", "error", "is", "not", "None", ":", "error_cutout", "=", "mask", ".", "cutout", "(", "error", ")", "if", "error_cutout", "is", "None", ":", "aperture_sum_errs", ".", "append", "(", "np", ".", "nan", ")", "else", ":", "aperture_var", "=", "np", ".", "sum", "(", "error_cutout", "**", "2", "*", "mask", ".", "data", ")", "aperture_sum_errs", ".", "append", "(", "np", ".", "sqrt", "(", "aperture_var", ")", ")", "# handle Quantity objects and input units", "aperture_sums", "=", "self", ".", "_prepare_photometry_output", "(", "aperture_sums", ",", "unit", "=", "unit", ")", "aperture_sum_errs", "=", "self", ".", "_prepare_photometry_output", "(", "aperture_sum_errs", ",", "unit", "=", "unit", ")", "return", "aperture_sums", ",", "aperture_sum_errs" ]
Perform aperture photometry on the input data. Parameters ---------- data : array_like or `~astropy.units.Quantity` instance The 2D array on which to perform photometry. ``data`` should be background subtracted. error : array_like or `~astropy.units.Quantity`, optional The pixel-wise Gaussian 1-sigma errors of the input ``data``. ``error`` is assumed to include *all* sources of error, including the Poisson error of the sources (see `~photutils.utils.calc_total_error`) . ``error`` must have the same shape as the input ``data``. mask : array_like (bool), optional A boolean mask with the same shape as ``data`` where a `True` value indicates the corresponding element of ``data`` is masked. Masked data are excluded from all calculations. method : {'exact', 'center', 'subpixel'}, optional The method used to determine the overlap of the aperture on the pixel grid. Not all options are available for all aperture types. Note that the more precise methods are generally slower. The following methods are available: * ``'exact'`` (default): The the exact fractional overlap of the aperture and each pixel is calculated. The returned mask will contain values between 0 and 1. * ``'center'``: A pixel is considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. The returned mask will contain values only of 0 (out) and 1 (in). * ``'subpixel'`` A pixel is divided into subpixels (see the ``subpixels`` keyword), each of which are considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. If ``subpixels=1``, this method is equivalent to ``'center'``. The returned mask will contain values between 0 and 1. subpixels : int, optional For the ``'subpixel'`` method, resample pixels by this factor in each dimension. That is, each pixel is divided into ``subpixels ** 2`` subpixels. unit : `~astropy.units.UnitBase` object or str, optional An object that represents the unit associated with the input ``data`` and ``error`` arrays. Must be a `~astropy.units.UnitBase` object or a string parseable by the :mod:`~astropy.units` package. If ``data`` or ``error`` already have a different unit, the input ``unit`` will not be used and a warning will be raised. Returns ------- aperture_sums : `~numpy.ndarray` or `~astropy.units.Quantity` The sums within each aperture. aperture_sum_errs : `~numpy.ndarray` or `~astropy.units.Quantity` The errors on the sums within each aperture.
[ "Perform", "aperture", "photometry", "on", "the", "input", "data", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/aperture/core.py#L301-L410
10,506
astropy/photutils
photutils/aperture/core.py
PixelAperture._to_sky_params
def _to_sky_params(self, wcs, mode='all'): """ Convert the pixel aperture parameters to those for a sky aperture. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- sky_params : dict A dictionary of parameters for an equivalent sky aperture. """ sky_params = {} x, y = np.transpose(self.positions) sky_params['positions'] = pixel_to_skycoord(x, y, wcs, mode=mode) # The aperture object must have a single value for each shape # parameter so we must use a single pixel scale for all positions. # Here, we define the scale at the WCS CRVAL position. crval = SkyCoord([wcs.wcs.crval], frame=wcs_to_celestial_frame(wcs), unit=wcs.wcs.cunit) scale, angle = pixel_scale_angle_at_skycoord(crval, wcs) params = self._params[:] theta_key = 'theta' if theta_key in self._params: sky_params[theta_key] = (self.theta * u.rad) - angle.to(u.rad) params.remove(theta_key) param_vals = [getattr(self, param) for param in params] for param, param_val in zip(params, param_vals): sky_params[param] = (param_val * u.pix * scale).to(u.arcsec) return sky_params
python
def _to_sky_params(self, wcs, mode='all'): """ Convert the pixel aperture parameters to those for a sky aperture. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- sky_params : dict A dictionary of parameters for an equivalent sky aperture. """ sky_params = {} x, y = np.transpose(self.positions) sky_params['positions'] = pixel_to_skycoord(x, y, wcs, mode=mode) # The aperture object must have a single value for each shape # parameter so we must use a single pixel scale for all positions. # Here, we define the scale at the WCS CRVAL position. crval = SkyCoord([wcs.wcs.crval], frame=wcs_to_celestial_frame(wcs), unit=wcs.wcs.cunit) scale, angle = pixel_scale_angle_at_skycoord(crval, wcs) params = self._params[:] theta_key = 'theta' if theta_key in self._params: sky_params[theta_key] = (self.theta * u.rad) - angle.to(u.rad) params.remove(theta_key) param_vals = [getattr(self, param) for param in params] for param, param_val in zip(params, param_vals): sky_params[param] = (param_val * u.pix * scale).to(u.arcsec) return sky_params
[ "def", "_to_sky_params", "(", "self", ",", "wcs", ",", "mode", "=", "'all'", ")", ":", "sky_params", "=", "{", "}", "x", ",", "y", "=", "np", ".", "transpose", "(", "self", ".", "positions", ")", "sky_params", "[", "'positions'", "]", "=", "pixel_to_skycoord", "(", "x", ",", "y", ",", "wcs", ",", "mode", "=", "mode", ")", "# The aperture object must have a single value for each shape", "# parameter so we must use a single pixel scale for all positions.", "# Here, we define the scale at the WCS CRVAL position.", "crval", "=", "SkyCoord", "(", "[", "wcs", ".", "wcs", ".", "crval", "]", ",", "frame", "=", "wcs_to_celestial_frame", "(", "wcs", ")", ",", "unit", "=", "wcs", ".", "wcs", ".", "cunit", ")", "scale", ",", "angle", "=", "pixel_scale_angle_at_skycoord", "(", "crval", ",", "wcs", ")", "params", "=", "self", ".", "_params", "[", ":", "]", "theta_key", "=", "'theta'", "if", "theta_key", "in", "self", ".", "_params", ":", "sky_params", "[", "theta_key", "]", "=", "(", "self", ".", "theta", "*", "u", ".", "rad", ")", "-", "angle", ".", "to", "(", "u", ".", "rad", ")", "params", ".", "remove", "(", "theta_key", ")", "param_vals", "=", "[", "getattr", "(", "self", ",", "param", ")", "for", "param", "in", "params", "]", "for", "param", ",", "param_val", "in", "zip", "(", "params", ",", "param_vals", ")", ":", "sky_params", "[", "param", "]", "=", "(", "param_val", "*", "u", ".", "pix", "*", "scale", ")", ".", "to", "(", "u", ".", "arcsec", ")", "return", "sky_params" ]
Convert the pixel aperture parameters to those for a sky aperture. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- sky_params : dict A dictionary of parameters for an equivalent sky aperture.
[ "Convert", "the", "pixel", "aperture", "parameters", "to", "those", "for", "a", "sky", "aperture", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/aperture/core.py#L526-L568
10,507
astropy/photutils
photutils/aperture/core.py
SkyAperture._to_pixel_params
def _to_pixel_params(self, wcs, mode='all'): """ Convert the sky aperture parameters to those for a pixel aperture. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- pixel_params : dict A dictionary of parameters for an equivalent pixel aperture. """ pixel_params = {} x, y = skycoord_to_pixel(self.positions, wcs, mode=mode) pixel_params['positions'] = np.array([x, y]).transpose() # The aperture object must have a single value for each shape # parameter so we must use a single pixel scale for all positions. # Here, we define the scale at the WCS CRVAL position. crval = SkyCoord([wcs.wcs.crval], frame=wcs_to_celestial_frame(wcs), unit=wcs.wcs.cunit) scale, angle = pixel_scale_angle_at_skycoord(crval, wcs) params = self._params[:] theta_key = 'theta' if theta_key in self._params: pixel_params[theta_key] = (self.theta + angle).to(u.radian).value params.remove(theta_key) param_vals = [getattr(self, param) for param in params] if param_vals[0].unit.physical_type == 'angle': for param, param_val in zip(params, param_vals): pixel_params[param] = (param_val / scale).to(u.pixel).value else: # pixels for param, param_val in zip(params, param_vals): pixel_params[param] = param_val.value return pixel_params
python
def _to_pixel_params(self, wcs, mode='all'): """ Convert the sky aperture parameters to those for a pixel aperture. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- pixel_params : dict A dictionary of parameters for an equivalent pixel aperture. """ pixel_params = {} x, y = skycoord_to_pixel(self.positions, wcs, mode=mode) pixel_params['positions'] = np.array([x, y]).transpose() # The aperture object must have a single value for each shape # parameter so we must use a single pixel scale for all positions. # Here, we define the scale at the WCS CRVAL position. crval = SkyCoord([wcs.wcs.crval], frame=wcs_to_celestial_frame(wcs), unit=wcs.wcs.cunit) scale, angle = pixel_scale_angle_at_skycoord(crval, wcs) params = self._params[:] theta_key = 'theta' if theta_key in self._params: pixel_params[theta_key] = (self.theta + angle).to(u.radian).value params.remove(theta_key) param_vals = [getattr(self, param) for param in params] if param_vals[0].unit.physical_type == 'angle': for param, param_val in zip(params, param_vals): pixel_params[param] = (param_val / scale).to(u.pixel).value else: # pixels for param, param_val in zip(params, param_vals): pixel_params[param] = param_val.value return pixel_params
[ "def", "_to_pixel_params", "(", "self", ",", "wcs", ",", "mode", "=", "'all'", ")", ":", "pixel_params", "=", "{", "}", "x", ",", "y", "=", "skycoord_to_pixel", "(", "self", ".", "positions", ",", "wcs", ",", "mode", "=", "mode", ")", "pixel_params", "[", "'positions'", "]", "=", "np", ".", "array", "(", "[", "x", ",", "y", "]", ")", ".", "transpose", "(", ")", "# The aperture object must have a single value for each shape", "# parameter so we must use a single pixel scale for all positions.", "# Here, we define the scale at the WCS CRVAL position.", "crval", "=", "SkyCoord", "(", "[", "wcs", ".", "wcs", ".", "crval", "]", ",", "frame", "=", "wcs_to_celestial_frame", "(", "wcs", ")", ",", "unit", "=", "wcs", ".", "wcs", ".", "cunit", ")", "scale", ",", "angle", "=", "pixel_scale_angle_at_skycoord", "(", "crval", ",", "wcs", ")", "params", "=", "self", ".", "_params", "[", ":", "]", "theta_key", "=", "'theta'", "if", "theta_key", "in", "self", ".", "_params", ":", "pixel_params", "[", "theta_key", "]", "=", "(", "self", ".", "theta", "+", "angle", ")", ".", "to", "(", "u", ".", "radian", ")", ".", "value", "params", ".", "remove", "(", "theta_key", ")", "param_vals", "=", "[", "getattr", "(", "self", ",", "param", ")", "for", "param", "in", "params", "]", "if", "param_vals", "[", "0", "]", ".", "unit", ".", "physical_type", "==", "'angle'", ":", "for", "param", ",", "param_val", "in", "zip", "(", "params", ",", "param_vals", ")", ":", "pixel_params", "[", "param", "]", "=", "(", "param_val", "/", "scale", ")", ".", "to", "(", "u", ".", "pixel", ")", ".", "value", "else", ":", "# pixels", "for", "param", ",", "param_val", "in", "zip", "(", "params", ",", "param_vals", ")", ":", "pixel_params", "[", "param", "]", "=", "param_val", ".", "value", "return", "pixel_params" ]
Convert the sky aperture parameters to those for a pixel aperture. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- pixel_params : dict A dictionary of parameters for an equivalent pixel aperture.
[ "Convert", "the", "sky", "aperture", "parameters", "to", "those", "for", "a", "pixel", "aperture", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/aperture/core.py#L601-L647
10,508
astropy/photutils
photutils/segmentation/properties.py
source_properties
def source_properties(data, segment_img, error=None, mask=None, background=None, filter_kernel=None, wcs=None, labels=None): """ Calculate photometry and morphological properties of sources defined by a labeled segmentation image. Parameters ---------- data : array_like or `~astropy.units.Quantity` The 2D array from which to calculate the source photometry and properties. ``data`` should be background-subtracted. Non-finite ``data`` values (e.g. NaN or inf) are automatically masked. segment_img : `SegmentationImage` or array_like (int) A 2D segmentation image, either as a `SegmentationImage` object or an `~numpy.ndarray`, with the same shape as ``data`` where sources are labeled by different positive integer values. A value of zero is reserved for the background. error : array_like or `~astropy.units.Quantity`, optional The total error array corresponding to the input ``data`` array. ``error`` is assumed to include *all* sources of error, including the Poisson error of the sources (see `~photutils.utils.calc_total_error`) . ``error`` must have the same shape as the input ``data``. Non-finite ``error`` values (e.g. NaN or inf) are not automatically masked, unless they are at the same position of non-finite values in the input ``data`` array. Such pixels can be masked using the ``mask`` keyword. See the Notes section below for details on the error propagation. mask : array_like (bool), optional A boolean mask with the same shape as ``data`` where a `True` value indicates the corresponding element of ``data`` is masked. Masked data are excluded from all calculations. Non-finite values (e.g. NaN or inf) in the input ``data`` are automatically masked. background : float, array_like, or `~astropy.units.Quantity`, optional The background level that was *previously* present in the input ``data``. ``background`` may either be a scalar value or a 2D image with the same shape as the input ``data``. Inputting the ``background`` merely allows for its properties to be measured within each source segment. The input ``background`` does *not* get subtracted from the input ``data``, which should already be background-subtracted. Non-finite ``background`` values (e.g. NaN or inf) are not automatically masked, unless they are at the same position of non-finite values in the input ``data`` array. Such pixels can be masked using the ``mask`` keyword. filter_kernel : array-like (2D) or `~astropy.convolution.Kernel2D`, optional The 2D array of the kernel used to filter the data prior to calculating the source centroid and morphological parameters. The kernel should be the same one used in defining the source segments, i.e. the detection image (e.g., see :func:`~photutils.detect_sources`). If `None`, then the unfiltered ``data`` will be used instead. wcs : `~astropy.wcs.WCS` The WCS transformation to use. If `None`, then any sky-based properties will be set to `None`. labels : int, array-like (1D, int) The segmentation labels for which to calculate source properties. If `None` (default), then the properties will be calculated for all labeled sources. Returns ------- output : `SourceCatalog` instance A `SourceCatalog` instance containing the properties of each source. Notes ----- `SExtractor`_'s centroid and morphological parameters are always calculated from a filtered "detection" image, i.e. the image used to define the segmentation image. The usual downside of the filtering is the sources will be made more circular than they actually are. If you wish to reproduce `SExtractor`_ centroid and morphology results, then input a filtered and background-subtracted "detection" image into the ``filtered_data`` keyword. If ``filtered_data`` is `None`, then the unfiltered ``data`` will be used for the source centroid and morphological parameters. Negative data values (``filtered_data`` or ``data``) within the source segment are set to zero when calculating morphological properties based on image moments. Negative values could occur, for example, if the segmentation image was defined from a different image (e.g., different bandpass) or if the background was oversubtracted. Note that `~photutils.SourceProperties.source_sum` always includes the contribution of negative ``data`` values. The input ``error`` is assumed to include *all* sources of error, including the Poisson error of the sources. `~photutils.SourceProperties.source_sum_err` is simply the quadrature sum of the pixel-wise total errors over the non-masked pixels within the source segment: .. math:: \\Delta F = \\sqrt{\\sum_{i \\in S} \\sigma_{\\mathrm{tot}, i}^2} where :math:`\\Delta F` is `~photutils.SourceProperties.source_sum_err`, :math:`S` are the non-masked pixels in the source segment, and :math:`\\sigma_{\\mathrm{tot}, i}` is the input ``error`` array. .. _SExtractor: http://www.astromatic.net/software/sextractor See Also -------- SegmentationImage, SourceProperties, detect_sources Examples -------- >>> import numpy as np >>> from photutils import SegmentationImage, source_properties >>> image = np.arange(16.).reshape(4, 4) >>> print(image) # doctest: +SKIP [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.] [12. 13. 14. 15.]] >>> segm = SegmentationImage([[1, 1, 0, 0], ... [1, 0, 0, 2], ... [0, 0, 2, 2], ... [0, 2, 2, 0]]) >>> props = source_properties(image, segm) Print some properties of the first object (labeled with ``1`` in the segmentation image): >>> props[0].id # id corresponds to segment label number 1 >>> props[0].centroid # doctest: +FLOAT_CMP <Quantity [0.8, 0.2] pix> >>> props[0].source_sum # doctest: +FLOAT_CMP 5.0 >>> props[0].area # doctest: +FLOAT_CMP <Quantity 3. pix2> >>> props[0].max_value # doctest: +FLOAT_CMP 4.0 Print some properties of the second object (labeled with ``2`` in the segmentation image): >>> props[1].id # id corresponds to segment label number 2 >>> props[1].centroid # doctest: +FLOAT_CMP <Quantity [2.36363636, 2.09090909] pix> >>> props[1].perimeter # doctest: +FLOAT_CMP <Quantity 5.41421356 pix> >>> props[1].orientation # doctest: +FLOAT_CMP <Quantity -0.74175931 rad> """ if not isinstance(segment_img, SegmentationImage): segment_img = SegmentationImage(segment_img) if segment_img.shape != data.shape: raise ValueError('segment_img and data must have the same shape.') # filter the data once, instead of repeating for each source if filter_kernel is not None: filtered_data = filter_data(data, filter_kernel, mode='constant', fill_value=0.0, check_normalization=True) else: filtered_data = None if labels is None: labels = segment_img.labels labels = np.atleast_1d(labels) sources_props = [] for label in labels: if label not in segment_img.labels: warnings.warn('label {} is not in the segmentation image.' .format(label), AstropyUserWarning) continue # skip invalid labels sources_props.append(SourceProperties( data, segment_img, label, filtered_data=filtered_data, error=error, mask=mask, background=background, wcs=wcs)) if len(sources_props) == 0: raise ValueError('No sources are defined.') return SourceCatalog(sources_props, wcs=wcs)
python
def source_properties(data, segment_img, error=None, mask=None, background=None, filter_kernel=None, wcs=None, labels=None): """ Calculate photometry and morphological properties of sources defined by a labeled segmentation image. Parameters ---------- data : array_like or `~astropy.units.Quantity` The 2D array from which to calculate the source photometry and properties. ``data`` should be background-subtracted. Non-finite ``data`` values (e.g. NaN or inf) are automatically masked. segment_img : `SegmentationImage` or array_like (int) A 2D segmentation image, either as a `SegmentationImage` object or an `~numpy.ndarray`, with the same shape as ``data`` where sources are labeled by different positive integer values. A value of zero is reserved for the background. error : array_like or `~astropy.units.Quantity`, optional The total error array corresponding to the input ``data`` array. ``error`` is assumed to include *all* sources of error, including the Poisson error of the sources (see `~photutils.utils.calc_total_error`) . ``error`` must have the same shape as the input ``data``. Non-finite ``error`` values (e.g. NaN or inf) are not automatically masked, unless they are at the same position of non-finite values in the input ``data`` array. Such pixels can be masked using the ``mask`` keyword. See the Notes section below for details on the error propagation. mask : array_like (bool), optional A boolean mask with the same shape as ``data`` where a `True` value indicates the corresponding element of ``data`` is masked. Masked data are excluded from all calculations. Non-finite values (e.g. NaN or inf) in the input ``data`` are automatically masked. background : float, array_like, or `~astropy.units.Quantity`, optional The background level that was *previously* present in the input ``data``. ``background`` may either be a scalar value or a 2D image with the same shape as the input ``data``. Inputting the ``background`` merely allows for its properties to be measured within each source segment. The input ``background`` does *not* get subtracted from the input ``data``, which should already be background-subtracted. Non-finite ``background`` values (e.g. NaN or inf) are not automatically masked, unless they are at the same position of non-finite values in the input ``data`` array. Such pixels can be masked using the ``mask`` keyword. filter_kernel : array-like (2D) or `~astropy.convolution.Kernel2D`, optional The 2D array of the kernel used to filter the data prior to calculating the source centroid and morphological parameters. The kernel should be the same one used in defining the source segments, i.e. the detection image (e.g., see :func:`~photutils.detect_sources`). If `None`, then the unfiltered ``data`` will be used instead. wcs : `~astropy.wcs.WCS` The WCS transformation to use. If `None`, then any sky-based properties will be set to `None`. labels : int, array-like (1D, int) The segmentation labels for which to calculate source properties. If `None` (default), then the properties will be calculated for all labeled sources. Returns ------- output : `SourceCatalog` instance A `SourceCatalog` instance containing the properties of each source. Notes ----- `SExtractor`_'s centroid and morphological parameters are always calculated from a filtered "detection" image, i.e. the image used to define the segmentation image. The usual downside of the filtering is the sources will be made more circular than they actually are. If you wish to reproduce `SExtractor`_ centroid and morphology results, then input a filtered and background-subtracted "detection" image into the ``filtered_data`` keyword. If ``filtered_data`` is `None`, then the unfiltered ``data`` will be used for the source centroid and morphological parameters. Negative data values (``filtered_data`` or ``data``) within the source segment are set to zero when calculating morphological properties based on image moments. Negative values could occur, for example, if the segmentation image was defined from a different image (e.g., different bandpass) or if the background was oversubtracted. Note that `~photutils.SourceProperties.source_sum` always includes the contribution of negative ``data`` values. The input ``error`` is assumed to include *all* sources of error, including the Poisson error of the sources. `~photutils.SourceProperties.source_sum_err` is simply the quadrature sum of the pixel-wise total errors over the non-masked pixels within the source segment: .. math:: \\Delta F = \\sqrt{\\sum_{i \\in S} \\sigma_{\\mathrm{tot}, i}^2} where :math:`\\Delta F` is `~photutils.SourceProperties.source_sum_err`, :math:`S` are the non-masked pixels in the source segment, and :math:`\\sigma_{\\mathrm{tot}, i}` is the input ``error`` array. .. _SExtractor: http://www.astromatic.net/software/sextractor See Also -------- SegmentationImage, SourceProperties, detect_sources Examples -------- >>> import numpy as np >>> from photutils import SegmentationImage, source_properties >>> image = np.arange(16.).reshape(4, 4) >>> print(image) # doctest: +SKIP [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.] [12. 13. 14. 15.]] >>> segm = SegmentationImage([[1, 1, 0, 0], ... [1, 0, 0, 2], ... [0, 0, 2, 2], ... [0, 2, 2, 0]]) >>> props = source_properties(image, segm) Print some properties of the first object (labeled with ``1`` in the segmentation image): >>> props[0].id # id corresponds to segment label number 1 >>> props[0].centroid # doctest: +FLOAT_CMP <Quantity [0.8, 0.2] pix> >>> props[0].source_sum # doctest: +FLOAT_CMP 5.0 >>> props[0].area # doctest: +FLOAT_CMP <Quantity 3. pix2> >>> props[0].max_value # doctest: +FLOAT_CMP 4.0 Print some properties of the second object (labeled with ``2`` in the segmentation image): >>> props[1].id # id corresponds to segment label number 2 >>> props[1].centroid # doctest: +FLOAT_CMP <Quantity [2.36363636, 2.09090909] pix> >>> props[1].perimeter # doctest: +FLOAT_CMP <Quantity 5.41421356 pix> >>> props[1].orientation # doctest: +FLOAT_CMP <Quantity -0.74175931 rad> """ if not isinstance(segment_img, SegmentationImage): segment_img = SegmentationImage(segment_img) if segment_img.shape != data.shape: raise ValueError('segment_img and data must have the same shape.') # filter the data once, instead of repeating for each source if filter_kernel is not None: filtered_data = filter_data(data, filter_kernel, mode='constant', fill_value=0.0, check_normalization=True) else: filtered_data = None if labels is None: labels = segment_img.labels labels = np.atleast_1d(labels) sources_props = [] for label in labels: if label not in segment_img.labels: warnings.warn('label {} is not in the segmentation image.' .format(label), AstropyUserWarning) continue # skip invalid labels sources_props.append(SourceProperties( data, segment_img, label, filtered_data=filtered_data, error=error, mask=mask, background=background, wcs=wcs)) if len(sources_props) == 0: raise ValueError('No sources are defined.') return SourceCatalog(sources_props, wcs=wcs)
[ "def", "source_properties", "(", "data", ",", "segment_img", ",", "error", "=", "None", ",", "mask", "=", "None", ",", "background", "=", "None", ",", "filter_kernel", "=", "None", ",", "wcs", "=", "None", ",", "labels", "=", "None", ")", ":", "if", "not", "isinstance", "(", "segment_img", ",", "SegmentationImage", ")", ":", "segment_img", "=", "SegmentationImage", "(", "segment_img", ")", "if", "segment_img", ".", "shape", "!=", "data", ".", "shape", ":", "raise", "ValueError", "(", "'segment_img and data must have the same shape.'", ")", "# filter the data once, instead of repeating for each source", "if", "filter_kernel", "is", "not", "None", ":", "filtered_data", "=", "filter_data", "(", "data", ",", "filter_kernel", ",", "mode", "=", "'constant'", ",", "fill_value", "=", "0.0", ",", "check_normalization", "=", "True", ")", "else", ":", "filtered_data", "=", "None", "if", "labels", "is", "None", ":", "labels", "=", "segment_img", ".", "labels", "labels", "=", "np", ".", "atleast_1d", "(", "labels", ")", "sources_props", "=", "[", "]", "for", "label", "in", "labels", ":", "if", "label", "not", "in", "segment_img", ".", "labels", ":", "warnings", ".", "warn", "(", "'label {} is not in the segmentation image.'", ".", "format", "(", "label", ")", ",", "AstropyUserWarning", ")", "continue", "# skip invalid labels", "sources_props", ".", "append", "(", "SourceProperties", "(", "data", ",", "segment_img", ",", "label", ",", "filtered_data", "=", "filtered_data", ",", "error", "=", "error", ",", "mask", "=", "mask", ",", "background", "=", "background", ",", "wcs", "=", "wcs", ")", ")", "if", "len", "(", "sources_props", ")", "==", "0", ":", "raise", "ValueError", "(", "'No sources are defined.'", ")", "return", "SourceCatalog", "(", "sources_props", ",", "wcs", "=", "wcs", ")" ]
Calculate photometry and morphological properties of sources defined by a labeled segmentation image. Parameters ---------- data : array_like or `~astropy.units.Quantity` The 2D array from which to calculate the source photometry and properties. ``data`` should be background-subtracted. Non-finite ``data`` values (e.g. NaN or inf) are automatically masked. segment_img : `SegmentationImage` or array_like (int) A 2D segmentation image, either as a `SegmentationImage` object or an `~numpy.ndarray`, with the same shape as ``data`` where sources are labeled by different positive integer values. A value of zero is reserved for the background. error : array_like or `~astropy.units.Quantity`, optional The total error array corresponding to the input ``data`` array. ``error`` is assumed to include *all* sources of error, including the Poisson error of the sources (see `~photutils.utils.calc_total_error`) . ``error`` must have the same shape as the input ``data``. Non-finite ``error`` values (e.g. NaN or inf) are not automatically masked, unless they are at the same position of non-finite values in the input ``data`` array. Such pixels can be masked using the ``mask`` keyword. See the Notes section below for details on the error propagation. mask : array_like (bool), optional A boolean mask with the same shape as ``data`` where a `True` value indicates the corresponding element of ``data`` is masked. Masked data are excluded from all calculations. Non-finite values (e.g. NaN or inf) in the input ``data`` are automatically masked. background : float, array_like, or `~astropy.units.Quantity`, optional The background level that was *previously* present in the input ``data``. ``background`` may either be a scalar value or a 2D image with the same shape as the input ``data``. Inputting the ``background`` merely allows for its properties to be measured within each source segment. The input ``background`` does *not* get subtracted from the input ``data``, which should already be background-subtracted. Non-finite ``background`` values (e.g. NaN or inf) are not automatically masked, unless they are at the same position of non-finite values in the input ``data`` array. Such pixels can be masked using the ``mask`` keyword. filter_kernel : array-like (2D) or `~astropy.convolution.Kernel2D`, optional The 2D array of the kernel used to filter the data prior to calculating the source centroid and morphological parameters. The kernel should be the same one used in defining the source segments, i.e. the detection image (e.g., see :func:`~photutils.detect_sources`). If `None`, then the unfiltered ``data`` will be used instead. wcs : `~astropy.wcs.WCS` The WCS transformation to use. If `None`, then any sky-based properties will be set to `None`. labels : int, array-like (1D, int) The segmentation labels for which to calculate source properties. If `None` (default), then the properties will be calculated for all labeled sources. Returns ------- output : `SourceCatalog` instance A `SourceCatalog` instance containing the properties of each source. Notes ----- `SExtractor`_'s centroid and morphological parameters are always calculated from a filtered "detection" image, i.e. the image used to define the segmentation image. The usual downside of the filtering is the sources will be made more circular than they actually are. If you wish to reproduce `SExtractor`_ centroid and morphology results, then input a filtered and background-subtracted "detection" image into the ``filtered_data`` keyword. If ``filtered_data`` is `None`, then the unfiltered ``data`` will be used for the source centroid and morphological parameters. Negative data values (``filtered_data`` or ``data``) within the source segment are set to zero when calculating morphological properties based on image moments. Negative values could occur, for example, if the segmentation image was defined from a different image (e.g., different bandpass) or if the background was oversubtracted. Note that `~photutils.SourceProperties.source_sum` always includes the contribution of negative ``data`` values. The input ``error`` is assumed to include *all* sources of error, including the Poisson error of the sources. `~photutils.SourceProperties.source_sum_err` is simply the quadrature sum of the pixel-wise total errors over the non-masked pixels within the source segment: .. math:: \\Delta F = \\sqrt{\\sum_{i \\in S} \\sigma_{\\mathrm{tot}, i}^2} where :math:`\\Delta F` is `~photutils.SourceProperties.source_sum_err`, :math:`S` are the non-masked pixels in the source segment, and :math:`\\sigma_{\\mathrm{tot}, i}` is the input ``error`` array. .. _SExtractor: http://www.astromatic.net/software/sextractor See Also -------- SegmentationImage, SourceProperties, detect_sources Examples -------- >>> import numpy as np >>> from photutils import SegmentationImage, source_properties >>> image = np.arange(16.).reshape(4, 4) >>> print(image) # doctest: +SKIP [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.] [12. 13. 14. 15.]] >>> segm = SegmentationImage([[1, 1, 0, 0], ... [1, 0, 0, 2], ... [0, 0, 2, 2], ... [0, 2, 2, 0]]) >>> props = source_properties(image, segm) Print some properties of the first object (labeled with ``1`` in the segmentation image): >>> props[0].id # id corresponds to segment label number 1 >>> props[0].centroid # doctest: +FLOAT_CMP <Quantity [0.8, 0.2] pix> >>> props[0].source_sum # doctest: +FLOAT_CMP 5.0 >>> props[0].area # doctest: +FLOAT_CMP <Quantity 3. pix2> >>> props[0].max_value # doctest: +FLOAT_CMP 4.0 Print some properties of the second object (labeled with ``2`` in the segmentation image): >>> props[1].id # id corresponds to segment label number 2 >>> props[1].centroid # doctest: +FLOAT_CMP <Quantity [2.36363636, 2.09090909] pix> >>> props[1].perimeter # doctest: +FLOAT_CMP <Quantity 5.41421356 pix> >>> props[1].orientation # doctest: +FLOAT_CMP <Quantity -0.74175931 rad>
[ "Calculate", "photometry", "and", "morphological", "properties", "of", "sources", "defined", "by", "a", "labeled", "segmentation", "image", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/properties.py#L1223-L1412
10,509
astropy/photutils
photutils/segmentation/properties.py
_properties_table
def _properties_table(obj, columns=None, exclude_columns=None): """ Construct a `~astropy.table.QTable` of source properties from a `SourceProperties` or `SourceCatalog` object. Parameters ---------- obj : `SourceProperties` or `SourceCatalog` instance The object containing the source properties. columns : str or list of str, optional Names of columns, in order, to include in the output `~astropy.table.QTable`. The allowed column names are any of the attributes of `SourceProperties`. exclude_columns : str or list of str, optional Names of columns to exclude from the default properties list in the output `~astropy.table.QTable`. Returns ------- table : `~astropy.table.QTable` A table of source properties with one row per source. """ # default properties columns_all = ['id', 'xcentroid', 'ycentroid', 'sky_centroid', 'sky_centroid_icrs', 'source_sum', 'source_sum_err', 'background_sum', 'background_mean', 'background_at_centroid', 'xmin', 'xmax', 'ymin', 'ymax', 'min_value', 'max_value', 'minval_xpos', 'minval_ypos', 'maxval_xpos', 'maxval_ypos', 'area', 'equivalent_radius', 'perimeter', 'semimajor_axis_sigma', 'semiminor_axis_sigma', 'eccentricity', 'orientation', 'ellipticity', 'elongation', 'covar_sigx2', 'covar_sigxy', 'covar_sigy2', 'cxx', 'cxy', 'cyy'] table_columns = None if exclude_columns is not None: table_columns = [s for s in columns_all if s not in exclude_columns] if columns is not None: table_columns = np.atleast_1d(columns) if table_columns is None: table_columns = columns_all tbl = QTable() for column in table_columns: values = getattr(obj, column) if isinstance(obj, SourceProperties): # turn scalar values into length-1 arrays because QTable # column assignment requires an object with a length values = np.atleast_1d(values) # Unfortunately np.atleast_1d creates an array of SkyCoord # instead of a SkyCoord array (Quantity does work correctly # with np.atleast_1d). Here we make a SkyCoord array for # the output table column. if isinstance(values[0], SkyCoord): values = SkyCoord(values) # length-1 SkyCoord array tbl[column] = values return tbl
python
def _properties_table(obj, columns=None, exclude_columns=None): """ Construct a `~astropy.table.QTable` of source properties from a `SourceProperties` or `SourceCatalog` object. Parameters ---------- obj : `SourceProperties` or `SourceCatalog` instance The object containing the source properties. columns : str or list of str, optional Names of columns, in order, to include in the output `~astropy.table.QTable`. The allowed column names are any of the attributes of `SourceProperties`. exclude_columns : str or list of str, optional Names of columns to exclude from the default properties list in the output `~astropy.table.QTable`. Returns ------- table : `~astropy.table.QTable` A table of source properties with one row per source. """ # default properties columns_all = ['id', 'xcentroid', 'ycentroid', 'sky_centroid', 'sky_centroid_icrs', 'source_sum', 'source_sum_err', 'background_sum', 'background_mean', 'background_at_centroid', 'xmin', 'xmax', 'ymin', 'ymax', 'min_value', 'max_value', 'minval_xpos', 'minval_ypos', 'maxval_xpos', 'maxval_ypos', 'area', 'equivalent_radius', 'perimeter', 'semimajor_axis_sigma', 'semiminor_axis_sigma', 'eccentricity', 'orientation', 'ellipticity', 'elongation', 'covar_sigx2', 'covar_sigxy', 'covar_sigy2', 'cxx', 'cxy', 'cyy'] table_columns = None if exclude_columns is not None: table_columns = [s for s in columns_all if s not in exclude_columns] if columns is not None: table_columns = np.atleast_1d(columns) if table_columns is None: table_columns = columns_all tbl = QTable() for column in table_columns: values = getattr(obj, column) if isinstance(obj, SourceProperties): # turn scalar values into length-1 arrays because QTable # column assignment requires an object with a length values = np.atleast_1d(values) # Unfortunately np.atleast_1d creates an array of SkyCoord # instead of a SkyCoord array (Quantity does work correctly # with np.atleast_1d). Here we make a SkyCoord array for # the output table column. if isinstance(values[0], SkyCoord): values = SkyCoord(values) # length-1 SkyCoord array tbl[column] = values return tbl
[ "def", "_properties_table", "(", "obj", ",", "columns", "=", "None", ",", "exclude_columns", "=", "None", ")", ":", "# default properties", "columns_all", "=", "[", "'id'", ",", "'xcentroid'", ",", "'ycentroid'", ",", "'sky_centroid'", ",", "'sky_centroid_icrs'", ",", "'source_sum'", ",", "'source_sum_err'", ",", "'background_sum'", ",", "'background_mean'", ",", "'background_at_centroid'", ",", "'xmin'", ",", "'xmax'", ",", "'ymin'", ",", "'ymax'", ",", "'min_value'", ",", "'max_value'", ",", "'minval_xpos'", ",", "'minval_ypos'", ",", "'maxval_xpos'", ",", "'maxval_ypos'", ",", "'area'", ",", "'equivalent_radius'", ",", "'perimeter'", ",", "'semimajor_axis_sigma'", ",", "'semiminor_axis_sigma'", ",", "'eccentricity'", ",", "'orientation'", ",", "'ellipticity'", ",", "'elongation'", ",", "'covar_sigx2'", ",", "'covar_sigxy'", ",", "'covar_sigy2'", ",", "'cxx'", ",", "'cxy'", ",", "'cyy'", "]", "table_columns", "=", "None", "if", "exclude_columns", "is", "not", "None", ":", "table_columns", "=", "[", "s", "for", "s", "in", "columns_all", "if", "s", "not", "in", "exclude_columns", "]", "if", "columns", "is", "not", "None", ":", "table_columns", "=", "np", ".", "atleast_1d", "(", "columns", ")", "if", "table_columns", "is", "None", ":", "table_columns", "=", "columns_all", "tbl", "=", "QTable", "(", ")", "for", "column", "in", "table_columns", ":", "values", "=", "getattr", "(", "obj", ",", "column", ")", "if", "isinstance", "(", "obj", ",", "SourceProperties", ")", ":", "# turn scalar values into length-1 arrays because QTable", "# column assignment requires an object with a length", "values", "=", "np", ".", "atleast_1d", "(", "values", ")", "# Unfortunately np.atleast_1d creates an array of SkyCoord", "# instead of a SkyCoord array (Quantity does work correctly", "# with np.atleast_1d). Here we make a SkyCoord array for", "# the output table column.", "if", "isinstance", "(", "values", "[", "0", "]", ",", "SkyCoord", ")", ":", "values", "=", "SkyCoord", "(", "values", ")", "# length-1 SkyCoord array", "tbl", "[", "column", "]", "=", "values", "return", "tbl" ]
Construct a `~astropy.table.QTable` of source properties from a `SourceProperties` or `SourceCatalog` object. Parameters ---------- obj : `SourceProperties` or `SourceCatalog` instance The object containing the source properties. columns : str or list of str, optional Names of columns, in order, to include in the output `~astropy.table.QTable`. The allowed column names are any of the attributes of `SourceProperties`. exclude_columns : str or list of str, optional Names of columns to exclude from the default properties list in the output `~astropy.table.QTable`. Returns ------- table : `~astropy.table.QTable` A table of source properties with one row per source.
[ "Construct", "a", "~astropy", ".", "table", ".", "QTable", "of", "source", "properties", "from", "a", "SourceProperties", "or", "SourceCatalog", "object", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/properties.py#L1609-L1673
10,510
astropy/photutils
photutils/segmentation/properties.py
SourceProperties._total_mask
def _total_mask(self): """ Combination of the _segment_mask, _input_mask, and _data_mask. This mask is applied to ``data``, ``error``, and ``background`` inputs when calculating properties. """ mask = self._segment_mask | self._data_mask if self._input_mask is not None: mask |= self._input_mask return mask
python
def _total_mask(self): """ Combination of the _segment_mask, _input_mask, and _data_mask. This mask is applied to ``data``, ``error``, and ``background`` inputs when calculating properties. """ mask = self._segment_mask | self._data_mask if self._input_mask is not None: mask |= self._input_mask return mask
[ "def", "_total_mask", "(", "self", ")", ":", "mask", "=", "self", ".", "_segment_mask", "|", "self", ".", "_data_mask", "if", "self", ".", "_input_mask", "is", "not", "None", ":", "mask", "|=", "self", ".", "_input_mask", "return", "mask" ]
Combination of the _segment_mask, _input_mask, and _data_mask. This mask is applied to ``data``, ``error``, and ``background`` inputs when calculating properties.
[ "Combination", "of", "the", "_segment_mask", "_input_mask", "and", "_data_mask", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/properties.py#L228-L241
10,511
astropy/photutils
photutils/segmentation/properties.py
SourceProperties.to_table
def to_table(self, columns=None, exclude_columns=None): """ Create a `~astropy.table.QTable` of properties. If ``columns`` or ``exclude_columns`` are not input, then the `~astropy.table.QTable` will include a default list of scalar-valued properties. Parameters ---------- columns : str or list of str, optional Names of columns, in order, to include in the output `~astropy.table.QTable`. The allowed column names are any of the attributes of `SourceProperties`. exclude_columns : str or list of str, optional Names of columns to exclude from the default properties list in the output `~astropy.table.QTable`. Returns ------- table : `~astropy.table.QTable` A single-row table of properties of the source. """ return _properties_table(self, columns=columns, exclude_columns=exclude_columns)
python
def to_table(self, columns=None, exclude_columns=None): """ Create a `~astropy.table.QTable` of properties. If ``columns`` or ``exclude_columns`` are not input, then the `~astropy.table.QTable` will include a default list of scalar-valued properties. Parameters ---------- columns : str or list of str, optional Names of columns, in order, to include in the output `~astropy.table.QTable`. The allowed column names are any of the attributes of `SourceProperties`. exclude_columns : str or list of str, optional Names of columns to exclude from the default properties list in the output `~astropy.table.QTable`. Returns ------- table : `~astropy.table.QTable` A single-row table of properties of the source. """ return _properties_table(self, columns=columns, exclude_columns=exclude_columns)
[ "def", "to_table", "(", "self", ",", "columns", "=", "None", ",", "exclude_columns", "=", "None", ")", ":", "return", "_properties_table", "(", "self", ",", "columns", "=", "columns", ",", "exclude_columns", "=", "exclude_columns", ")" ]
Create a `~astropy.table.QTable` of properties. If ``columns`` or ``exclude_columns`` are not input, then the `~astropy.table.QTable` will include a default list of scalar-valued properties. Parameters ---------- columns : str or list of str, optional Names of columns, in order, to include in the output `~astropy.table.QTable`. The allowed column names are any of the attributes of `SourceProperties`. exclude_columns : str or list of str, optional Names of columns to exclude from the default properties list in the output `~astropy.table.QTable`. Returns ------- table : `~astropy.table.QTable` A single-row table of properties of the source.
[ "Create", "a", "~astropy", ".", "table", ".", "QTable", "of", "properties", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/properties.py#L330-L356
10,512
astropy/photutils
photutils/segmentation/properties.py
SourceProperties.data_cutout_ma
def data_cutout_ma(self): """ A 2D `~numpy.ma.MaskedArray` cutout from the data. The mask is `True` for pixels outside of the source segment (labeled region of interest), masked pixels from the ``mask`` input, or any non-finite ``data`` values (e.g. NaN or inf). """ return np.ma.masked_array(self._data[self._slice], mask=self._total_mask)
python
def data_cutout_ma(self): """ A 2D `~numpy.ma.MaskedArray` cutout from the data. The mask is `True` for pixels outside of the source segment (labeled region of interest), masked pixels from the ``mask`` input, or any non-finite ``data`` values (e.g. NaN or inf). """ return np.ma.masked_array(self._data[self._slice], mask=self._total_mask)
[ "def", "data_cutout_ma", "(", "self", ")", ":", "return", "np", ".", "ma", ".", "masked_array", "(", "self", ".", "_data", "[", "self", ".", "_slice", "]", ",", "mask", "=", "self", ".", "_total_mask", ")" ]
A 2D `~numpy.ma.MaskedArray` cutout from the data. The mask is `True` for pixels outside of the source segment (labeled region of interest), masked pixels from the ``mask`` input, or any non-finite ``data`` values (e.g. NaN or inf).
[ "A", "2D", "~numpy", ".", "ma", ".", "MaskedArray", "cutout", "from", "the", "data", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/properties.py#L368-L378
10,513
astropy/photutils
photutils/segmentation/properties.py
SourceProperties.error_cutout_ma
def error_cutout_ma(self): """ A 2D `~numpy.ma.MaskedArray` cutout from the input ``error`` image. The mask is `True` for pixels outside of the source segment (labeled region of interest), masked pixels from the ``mask`` input, or any non-finite ``data`` values (e.g. NaN or inf). If ``error`` is `None`, then ``error_cutout_ma`` is also `None`. """ if self._error is None: return None else: return np.ma.masked_array(self._error[self._slice], mask=self._total_mask)
python
def error_cutout_ma(self): """ A 2D `~numpy.ma.MaskedArray` cutout from the input ``error`` image. The mask is `True` for pixels outside of the source segment (labeled region of interest), masked pixels from the ``mask`` input, or any non-finite ``data`` values (e.g. NaN or inf). If ``error`` is `None`, then ``error_cutout_ma`` is also `None`. """ if self._error is None: return None else: return np.ma.masked_array(self._error[self._slice], mask=self._total_mask)
[ "def", "error_cutout_ma", "(", "self", ")", ":", "if", "self", ".", "_error", "is", "None", ":", "return", "None", "else", ":", "return", "np", ".", "ma", ".", "masked_array", "(", "self", ".", "_error", "[", "self", ".", "_slice", "]", ",", "mask", "=", "self", ".", "_total_mask", ")" ]
A 2D `~numpy.ma.MaskedArray` cutout from the input ``error`` image. The mask is `True` for pixels outside of the source segment (labeled region of interest), masked pixels from the ``mask`` input, or any non-finite ``data`` values (e.g. NaN or inf). If ``error`` is `None`, then ``error_cutout_ma`` is also `None`.
[ "A", "2D", "~numpy", ".", "ma", ".", "MaskedArray", "cutout", "from", "the", "input", "error", "image", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/properties.py#L381-L397
10,514
astropy/photutils
photutils/segmentation/properties.py
SourceProperties.background_cutout_ma
def background_cutout_ma(self): """ A 2D `~numpy.ma.MaskedArray` cutout from the input ``background``. The mask is `True` for pixels outside of the source segment (labeled region of interest), masked pixels from the ``mask`` input, or any non-finite ``data`` values (e.g. NaN or inf). If ``background`` is `None`, then ``background_cutout_ma`` is also `None`. """ if self._background is None: return None else: return np.ma.masked_array(self._background[self._slice], mask=self._total_mask)
python
def background_cutout_ma(self): """ A 2D `~numpy.ma.MaskedArray` cutout from the input ``background``. The mask is `True` for pixels outside of the source segment (labeled region of interest), masked pixels from the ``mask`` input, or any non-finite ``data`` values (e.g. NaN or inf). If ``background`` is `None`, then ``background_cutout_ma`` is also `None`. """ if self._background is None: return None else: return np.ma.masked_array(self._background[self._slice], mask=self._total_mask)
[ "def", "background_cutout_ma", "(", "self", ")", ":", "if", "self", ".", "_background", "is", "None", ":", "return", "None", "else", ":", "return", "np", ".", "ma", ".", "masked_array", "(", "self", ".", "_background", "[", "self", ".", "_slice", "]", ",", "mask", "=", "self", ".", "_total_mask", ")" ]
A 2D `~numpy.ma.MaskedArray` cutout from the input ``background``. The mask is `True` for pixels outside of the source segment (labeled region of interest), masked pixels from the ``mask`` input, or any non-finite ``data`` values (e.g. NaN or inf). If ``background`` is `None`, then ``background_cutout_ma`` is also `None`.
[ "A", "2D", "~numpy", ".", "ma", ".", "MaskedArray", "cutout", "from", "the", "input", "background", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/properties.py#L400-L417
10,515
astropy/photutils
photutils/segmentation/properties.py
SourceProperties.coords
def coords(self): """ A tuple of two `~numpy.ndarray` containing the ``y`` and ``x`` pixel coordinates of unmasked pixels within the source segment. Non-finite pixel values (e.g. NaN, infs) are excluded (automatically masked). If all pixels are masked, ``coords`` will be a tuple of two empty arrays. """ yy, xx = np.nonzero(self.data_cutout_ma) return (yy + self._slice[0].start, xx + self._slice[1].start)
python
def coords(self): """ A tuple of two `~numpy.ndarray` containing the ``y`` and ``x`` pixel coordinates of unmasked pixels within the source segment. Non-finite pixel values (e.g. NaN, infs) are excluded (automatically masked). If all pixels are masked, ``coords`` will be a tuple of two empty arrays. """ yy, xx = np.nonzero(self.data_cutout_ma) return (yy + self._slice[0].start, xx + self._slice[1].start)
[ "def", "coords", "(", "self", ")", ":", "yy", ",", "xx", "=", "np", ".", "nonzero", "(", "self", ".", "data_cutout_ma", ")", "return", "(", "yy", "+", "self", ".", "_slice", "[", "0", "]", ".", "start", ",", "xx", "+", "self", ".", "_slice", "[", "1", "]", ".", "start", ")" ]
A tuple of two `~numpy.ndarray` containing the ``y`` and ``x`` pixel coordinates of unmasked pixels within the source segment. Non-finite pixel values (e.g. NaN, infs) are excluded (automatically masked). If all pixels are masked, ``coords`` will be a tuple of two empty arrays.
[ "A", "tuple", "of", "two", "~numpy", ".", "ndarray", "containing", "the", "y", "and", "x", "pixel", "coordinates", "of", "unmasked", "pixels", "within", "the", "source", "segment", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/properties.py#L442-L455
10,516
astropy/photutils
photutils/segmentation/properties.py
SourceProperties.sky_centroid
def sky_centroid(self): """ The sky coordinates of the centroid within the source segment, returned as a `~astropy.coordinates.SkyCoord` object. The output coordinate frame is the same as the input WCS. """ if self._wcs is not None: return pixel_to_skycoord(self.xcentroid.value, self.ycentroid.value, self._wcs, origin=0) else: return None
python
def sky_centroid(self): """ The sky coordinates of the centroid within the source segment, returned as a `~astropy.coordinates.SkyCoord` object. The output coordinate frame is the same as the input WCS. """ if self._wcs is not None: return pixel_to_skycoord(self.xcentroid.value, self.ycentroid.value, self._wcs, origin=0) else: return None
[ "def", "sky_centroid", "(", "self", ")", ":", "if", "self", ".", "_wcs", "is", "not", "None", ":", "return", "pixel_to_skycoord", "(", "self", ".", "xcentroid", ".", "value", ",", "self", ".", "ycentroid", ".", "value", ",", "self", ".", "_wcs", ",", "origin", "=", "0", ")", "else", ":", "return", "None" ]
The sky coordinates of the centroid within the source segment, returned as a `~astropy.coordinates.SkyCoord` object. The output coordinate frame is the same as the input WCS.
[ "The", "sky", "coordinates", "of", "the", "centroid", "within", "the", "source", "segment", "returned", "as", "a", "~astropy", ".", "coordinates", ".", "SkyCoord", "object", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/properties.py#L526-L539
10,517
astropy/photutils
photutils/segmentation/properties.py
SourceProperties.sky_bbox_ll
def sky_bbox_ll(self): """ The sky coordinates of the lower-left vertex of the minimal bounding box of the source segment, returned as a `~astropy.coordinates.SkyCoord` object. The bounding box encloses all of the source segment pixels in their entirety, thus the vertices are at the pixel *corners*. """ if self._wcs is not None: return pixel_to_skycoord(self.xmin.value - 0.5, self.ymin.value - 0.5, self._wcs, origin=0) else: return None
python
def sky_bbox_ll(self): """ The sky coordinates of the lower-left vertex of the minimal bounding box of the source segment, returned as a `~astropy.coordinates.SkyCoord` object. The bounding box encloses all of the source segment pixels in their entirety, thus the vertices are at the pixel *corners*. """ if self._wcs is not None: return pixel_to_skycoord(self.xmin.value - 0.5, self.ymin.value - 0.5, self._wcs, origin=0) else: return None
[ "def", "sky_bbox_ll", "(", "self", ")", ":", "if", "self", ".", "_wcs", "is", "not", "None", ":", "return", "pixel_to_skycoord", "(", "self", ".", "xmin", ".", "value", "-", "0.5", ",", "self", ".", "ymin", ".", "value", "-", "0.5", ",", "self", ".", "_wcs", ",", "origin", "=", "0", ")", "else", ":", "return", "None" ]
The sky coordinates of the lower-left vertex of the minimal bounding box of the source segment, returned as a `~astropy.coordinates.SkyCoord` object. The bounding box encloses all of the source segment pixels in their entirety, thus the vertices are at the pixel *corners*.
[ "The", "sky", "coordinates", "of", "the", "lower", "-", "left", "vertex", "of", "the", "minimal", "bounding", "box", "of", "the", "source", "segment", "returned", "as", "a", "~astropy", ".", "coordinates", ".", "SkyCoord", "object", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/properties.py#L602-L617
10,518
astropy/photutils
photutils/segmentation/properties.py
SourceProperties.sky_bbox_ul
def sky_bbox_ul(self): """ The sky coordinates of the upper-left vertex of the minimal bounding box of the source segment, returned as a `~astropy.coordinates.SkyCoord` object. The bounding box encloses all of the source segment pixels in their entirety, thus the vertices are at the pixel *corners*. """ if self._wcs is not None: return pixel_to_skycoord(self.xmin.value - 0.5, self.ymax.value + 0.5, self._wcs, origin=0) else: return None
python
def sky_bbox_ul(self): """ The sky coordinates of the upper-left vertex of the minimal bounding box of the source segment, returned as a `~astropy.coordinates.SkyCoord` object. The bounding box encloses all of the source segment pixels in their entirety, thus the vertices are at the pixel *corners*. """ if self._wcs is not None: return pixel_to_skycoord(self.xmin.value - 0.5, self.ymax.value + 0.5, self._wcs, origin=0) else: return None
[ "def", "sky_bbox_ul", "(", "self", ")", ":", "if", "self", ".", "_wcs", "is", "not", "None", ":", "return", "pixel_to_skycoord", "(", "self", ".", "xmin", ".", "value", "-", "0.5", ",", "self", ".", "ymax", ".", "value", "+", "0.5", ",", "self", ".", "_wcs", ",", "origin", "=", "0", ")", "else", ":", "return", "None" ]
The sky coordinates of the upper-left vertex of the minimal bounding box of the source segment, returned as a `~astropy.coordinates.SkyCoord` object. The bounding box encloses all of the source segment pixels in their entirety, thus the vertices are at the pixel *corners*.
[ "The", "sky", "coordinates", "of", "the", "upper", "-", "left", "vertex", "of", "the", "minimal", "bounding", "box", "of", "the", "source", "segment", "returned", "as", "a", "~astropy", ".", "coordinates", ".", "SkyCoord", "object", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/properties.py#L620-L635
10,519
astropy/photutils
photutils/segmentation/properties.py
SourceProperties.sky_bbox_lr
def sky_bbox_lr(self): """ The sky coordinates of the lower-right vertex of the minimal bounding box of the source segment, returned as a `~astropy.coordinates.SkyCoord` object. The bounding box encloses all of the source segment pixels in their entirety, thus the vertices are at the pixel *corners*. """ if self._wcs is not None: return pixel_to_skycoord(self.xmax.value + 0.5, self.ymin.value - 0.5, self._wcs, origin=0) else: return None
python
def sky_bbox_lr(self): """ The sky coordinates of the lower-right vertex of the minimal bounding box of the source segment, returned as a `~astropy.coordinates.SkyCoord` object. The bounding box encloses all of the source segment pixels in their entirety, thus the vertices are at the pixel *corners*. """ if self._wcs is not None: return pixel_to_skycoord(self.xmax.value + 0.5, self.ymin.value - 0.5, self._wcs, origin=0) else: return None
[ "def", "sky_bbox_lr", "(", "self", ")", ":", "if", "self", ".", "_wcs", "is", "not", "None", ":", "return", "pixel_to_skycoord", "(", "self", ".", "xmax", ".", "value", "+", "0.5", ",", "self", ".", "ymin", ".", "value", "-", "0.5", ",", "self", ".", "_wcs", ",", "origin", "=", "0", ")", "else", ":", "return", "None" ]
The sky coordinates of the lower-right vertex of the minimal bounding box of the source segment, returned as a `~astropy.coordinates.SkyCoord` object. The bounding box encloses all of the source segment pixels in their entirety, thus the vertices are at the pixel *corners*.
[ "The", "sky", "coordinates", "of", "the", "lower", "-", "right", "vertex", "of", "the", "minimal", "bounding", "box", "of", "the", "source", "segment", "returned", "as", "a", "~astropy", ".", "coordinates", ".", "SkyCoord", "object", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/properties.py#L638-L653
10,520
astropy/photutils
photutils/segmentation/properties.py
SourceProperties.sky_bbox_ur
def sky_bbox_ur(self): """ The sky coordinates of the upper-right vertex of the minimal bounding box of the source segment, returned as a `~astropy.coordinates.SkyCoord` object. The bounding box encloses all of the source segment pixels in their entirety, thus the vertices are at the pixel *corners*. """ if self._wcs is not None: return pixel_to_skycoord(self.xmax.value + 0.5, self.ymax.value + 0.5, self._wcs, origin=0) else: return None
python
def sky_bbox_ur(self): """ The sky coordinates of the upper-right vertex of the minimal bounding box of the source segment, returned as a `~astropy.coordinates.SkyCoord` object. The bounding box encloses all of the source segment pixels in their entirety, thus the vertices are at the pixel *corners*. """ if self._wcs is not None: return pixel_to_skycoord(self.xmax.value + 0.5, self.ymax.value + 0.5, self._wcs, origin=0) else: return None
[ "def", "sky_bbox_ur", "(", "self", ")", ":", "if", "self", ".", "_wcs", "is", "not", "None", ":", "return", "pixel_to_skycoord", "(", "self", ".", "xmax", ".", "value", "+", "0.5", ",", "self", ".", "ymax", ".", "value", "+", "0.5", ",", "self", ".", "_wcs", ",", "origin", "=", "0", ")", "else", ":", "return", "None" ]
The sky coordinates of the upper-right vertex of the minimal bounding box of the source segment, returned as a `~astropy.coordinates.SkyCoord` object. The bounding box encloses all of the source segment pixels in their entirety, thus the vertices are at the pixel *corners*.
[ "The", "sky", "coordinates", "of", "the", "upper", "-", "right", "vertex", "of", "the", "minimal", "bounding", "box", "of", "the", "source", "segment", "returned", "as", "a", "~astropy", ".", "coordinates", ".", "SkyCoord", "object", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/properties.py#L656-L671
10,521
astropy/photutils
photutils/segmentation/properties.py
SourceProperties.min_value
def min_value(self): """ The minimum pixel value of the ``data`` within the source segment. """ if self._is_completely_masked: return np.nan * self._data_unit else: return np.min(self.values)
python
def min_value(self): """ The minimum pixel value of the ``data`` within the source segment. """ if self._is_completely_masked: return np.nan * self._data_unit else: return np.min(self.values)
[ "def", "min_value", "(", "self", ")", ":", "if", "self", ".", "_is_completely_masked", ":", "return", "np", ".", "nan", "*", "self", ".", "_data_unit", "else", ":", "return", "np", ".", "min", "(", "self", ".", "values", ")" ]
The minimum pixel value of the ``data`` within the source segment.
[ "The", "minimum", "pixel", "value", "of", "the", "data", "within", "the", "source", "segment", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/properties.py#L674-L683
10,522
astropy/photutils
photutils/segmentation/properties.py
SourceProperties.max_value
def max_value(self): """ The maximum pixel value of the ``data`` within the source segment. """ if self._is_completely_masked: return np.nan * self._data_unit else: return np.max(self.values)
python
def max_value(self): """ The maximum pixel value of the ``data`` within the source segment. """ if self._is_completely_masked: return np.nan * self._data_unit else: return np.max(self.values)
[ "def", "max_value", "(", "self", ")", ":", "if", "self", ".", "_is_completely_masked", ":", "return", "np", ".", "nan", "*", "self", ".", "_data_unit", "else", ":", "return", "np", ".", "max", "(", "self", ".", "values", ")" ]
The maximum pixel value of the ``data`` within the source segment.
[ "The", "maximum", "pixel", "value", "of", "the", "data", "within", "the", "source", "segment", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/properties.py#L686-L695
10,523
astropy/photutils
photutils/segmentation/properties.py
SourceProperties.source_sum
def source_sum(self): """ The sum of the unmasked ``data`` values within the source segment. .. math:: F = \\sum_{i \\in S} (I_i - B_i) where :math:`F` is ``source_sum``, :math:`(I_i - B_i)` is the ``data``, and :math:`S` are the unmasked pixels in the source segment. Non-finite pixel values (e.g. NaN, infs) are excluded (automatically masked). """ if self._is_completely_masked: return np.nan * self._data_unit # table output needs unit else: return np.sum(self.values)
python
def source_sum(self): """ The sum of the unmasked ``data`` values within the source segment. .. math:: F = \\sum_{i \\in S} (I_i - B_i) where :math:`F` is ``source_sum``, :math:`(I_i - B_i)` is the ``data``, and :math:`S` are the unmasked pixels in the source segment. Non-finite pixel values (e.g. NaN, infs) are excluded (automatically masked). """ if self._is_completely_masked: return np.nan * self._data_unit # table output needs unit else: return np.sum(self.values)
[ "def", "source_sum", "(", "self", ")", ":", "if", "self", ".", "_is_completely_masked", ":", "return", "np", ".", "nan", "*", "self", ".", "_data_unit", "# table output needs unit", "else", ":", "return", "np", ".", "sum", "(", "self", ".", "values", ")" ]
The sum of the unmasked ``data`` values within the source segment. .. math:: F = \\sum_{i \\in S} (I_i - B_i) where :math:`F` is ``source_sum``, :math:`(I_i - B_i)` is the ``data``, and :math:`S` are the unmasked pixels in the source segment. Non-finite pixel values (e.g. NaN, infs) are excluded (automatically masked).
[ "The", "sum", "of", "the", "unmasked", "data", "values", "within", "the", "source", "segment", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/properties.py#L818-L835
10,524
astropy/photutils
photutils/segmentation/properties.py
SourceProperties.source_sum_err
def source_sum_err(self): """ The uncertainty of `~photutils.SourceProperties.source_sum`, propagated from the input ``error`` array. ``source_sum_err`` is the quadrature sum of the total errors over the non-masked pixels within the source segment: .. math:: \\Delta F = \\sqrt{\\sum_{i \\in S} \\sigma_{\\mathrm{tot}, i}^2} where :math:`\\Delta F` is ``source_sum_err``, :math:`\\sigma_{\\mathrm{tot, i}}` are the pixel-wise total errors, and :math:`S` are the non-masked pixels in the source segment. Pixel values that are masked in the input ``data``, including any non-finite pixel values (i.e. NaN, infs) that are automatically masked, are also masked in the error array. """ if self._error is not None: if self._is_completely_masked: return np.nan * self._error_unit # table output needs unit else: return np.sqrt(np.sum(self._error_values ** 2)) else: return None
python
def source_sum_err(self): """ The uncertainty of `~photutils.SourceProperties.source_sum`, propagated from the input ``error`` array. ``source_sum_err`` is the quadrature sum of the total errors over the non-masked pixels within the source segment: .. math:: \\Delta F = \\sqrt{\\sum_{i \\in S} \\sigma_{\\mathrm{tot}, i}^2} where :math:`\\Delta F` is ``source_sum_err``, :math:`\\sigma_{\\mathrm{tot, i}}` are the pixel-wise total errors, and :math:`S` are the non-masked pixels in the source segment. Pixel values that are masked in the input ``data``, including any non-finite pixel values (i.e. NaN, infs) that are automatically masked, are also masked in the error array. """ if self._error is not None: if self._is_completely_masked: return np.nan * self._error_unit # table output needs unit else: return np.sqrt(np.sum(self._error_values ** 2)) else: return None
[ "def", "source_sum_err", "(", "self", ")", ":", "if", "self", ".", "_error", "is", "not", "None", ":", "if", "self", ".", "_is_completely_masked", ":", "return", "np", ".", "nan", "*", "self", ".", "_error_unit", "# table output needs unit", "else", ":", "return", "np", ".", "sqrt", "(", "np", ".", "sum", "(", "self", ".", "_error_values", "**", "2", ")", ")", "else", ":", "return", "None" ]
The uncertainty of `~photutils.SourceProperties.source_sum`, propagated from the input ``error`` array. ``source_sum_err`` is the quadrature sum of the total errors over the non-masked pixels within the source segment: .. math:: \\Delta F = \\sqrt{\\sum_{i \\in S} \\sigma_{\\mathrm{tot}, i}^2} where :math:`\\Delta F` is ``source_sum_err``, :math:`\\sigma_{\\mathrm{tot, i}}` are the pixel-wise total errors, and :math:`S` are the non-masked pixels in the source segment. Pixel values that are masked in the input ``data``, including any non-finite pixel values (i.e. NaN, infs) that are automatically masked, are also masked in the error array.
[ "The", "uncertainty", "of", "~photutils", ".", "SourceProperties", ".", "source_sum", "propagated", "from", "the", "input", "error", "array", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/properties.py#L838-L865
10,525
astropy/photutils
photutils/segmentation/properties.py
SourceProperties.background_sum
def background_sum(self): """ The sum of ``background`` values within the source segment. Pixel values that are masked in the input ``data``, including any non-finite pixel values (i.e. NaN, infs) that are automatically masked, are also masked in the background array. """ if self._background is not None: if self._is_completely_masked: return np.nan * self._background_unit # unit for table else: return np.sum(self._background_values) else: return None
python
def background_sum(self): """ The sum of ``background`` values within the source segment. Pixel values that are masked in the input ``data``, including any non-finite pixel values (i.e. NaN, infs) that are automatically masked, are also masked in the background array. """ if self._background is not None: if self._is_completely_masked: return np.nan * self._background_unit # unit for table else: return np.sum(self._background_values) else: return None
[ "def", "background_sum", "(", "self", ")", ":", "if", "self", ".", "_background", "is", "not", "None", ":", "if", "self", ".", "_is_completely_masked", ":", "return", "np", ".", "nan", "*", "self", ".", "_background_unit", "# unit for table", "else", ":", "return", "np", ".", "sum", "(", "self", ".", "_background_values", ")", "else", ":", "return", "None" ]
The sum of ``background`` values within the source segment. Pixel values that are masked in the input ``data``, including any non-finite pixel values (i.e. NaN, infs) that are automatically masked, are also masked in the background array.
[ "The", "sum", "of", "background", "values", "within", "the", "source", "segment", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/properties.py#L868-L883
10,526
astropy/photutils
photutils/segmentation/properties.py
SourceProperties.background_mean
def background_mean(self): """ The mean of ``background`` values within the source segment. Pixel values that are masked in the input ``data``, including any non-finite pixel values (i.e. NaN, infs) that are automatically masked, are also masked in the background array. """ if self._background is not None: if self._is_completely_masked: return np.nan * self._background_unit # unit for table else: return np.mean(self._background_values) else: return None
python
def background_mean(self): """ The mean of ``background`` values within the source segment. Pixel values that are masked in the input ``data``, including any non-finite pixel values (i.e. NaN, infs) that are automatically masked, are also masked in the background array. """ if self._background is not None: if self._is_completely_masked: return np.nan * self._background_unit # unit for table else: return np.mean(self._background_values) else: return None
[ "def", "background_mean", "(", "self", ")", ":", "if", "self", ".", "_background", "is", "not", "None", ":", "if", "self", ".", "_is_completely_masked", ":", "return", "np", ".", "nan", "*", "self", ".", "_background_unit", "# unit for table", "else", ":", "return", "np", ".", "mean", "(", "self", ".", "_background_values", ")", "else", ":", "return", "None" ]
The mean of ``background`` values within the source segment. Pixel values that are masked in the input ``data``, including any non-finite pixel values (i.e. NaN, infs) that are automatically masked, are also masked in the background array.
[ "The", "mean", "of", "background", "values", "within", "the", "source", "segment", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/properties.py#L886-L901
10,527
astropy/photutils
photutils/segmentation/properties.py
SourceProperties.background_at_centroid
def background_at_centroid(self): """ The value of the ``background`` at the position of the source centroid. The background value at fractional position values are determined using bilinear interpolation. """ from scipy.ndimage import map_coordinates if self._background is not None: # centroid can still be NaN if all data values are <= 0 if (self._is_completely_masked or np.any(~np.isfinite(self.centroid))): return np.nan * self._background_unit # unit for table else: value = map_coordinates(self._background, [[self.ycentroid.value], [self.xcentroid.value]], order=1, mode='nearest')[0] return value * self._background_unit else: return None
python
def background_at_centroid(self): """ The value of the ``background`` at the position of the source centroid. The background value at fractional position values are determined using bilinear interpolation. """ from scipy.ndimage import map_coordinates if self._background is not None: # centroid can still be NaN if all data values are <= 0 if (self._is_completely_masked or np.any(~np.isfinite(self.centroid))): return np.nan * self._background_unit # unit for table else: value = map_coordinates(self._background, [[self.ycentroid.value], [self.xcentroid.value]], order=1, mode='nearest')[0] return value * self._background_unit else: return None
[ "def", "background_at_centroid", "(", "self", ")", ":", "from", "scipy", ".", "ndimage", "import", "map_coordinates", "if", "self", ".", "_background", "is", "not", "None", ":", "# centroid can still be NaN if all data values are <= 0", "if", "(", "self", ".", "_is_completely_masked", "or", "np", ".", "any", "(", "~", "np", ".", "isfinite", "(", "self", ".", "centroid", ")", ")", ")", ":", "return", "np", ".", "nan", "*", "self", ".", "_background_unit", "# unit for table", "else", ":", "value", "=", "map_coordinates", "(", "self", ".", "_background", ",", "[", "[", "self", ".", "ycentroid", ".", "value", "]", ",", "[", "self", ".", "xcentroid", ".", "value", "]", "]", ",", "order", "=", "1", ",", "mode", "=", "'nearest'", ")", "[", "0", "]", "return", "value", "*", "self", ".", "_background_unit", "else", ":", "return", "None" ]
The value of the ``background`` at the position of the source centroid. The background value at fractional position values are determined using bilinear interpolation.
[ "The", "value", "of", "the", "background", "at", "the", "position", "of", "the", "source", "centroid", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/properties.py#L904-L928
10,528
astropy/photutils
photutils/segmentation/properties.py
SourceProperties.perimeter
def perimeter(self): """ The total perimeter of the source segment, approximated lines through the centers of the border pixels using a 4-connectivity. If any masked pixels make holes within the source segment, then the perimeter around the inner hole (e.g. an annulus) will also contribute to the total perimeter. """ if self._is_completely_masked: return np.nan * u.pix # unit for table else: from skimage.measure import perimeter return perimeter(~self._total_mask, neighbourhood=4) * u.pix
python
def perimeter(self): """ The total perimeter of the source segment, approximated lines through the centers of the border pixels using a 4-connectivity. If any masked pixels make holes within the source segment, then the perimeter around the inner hole (e.g. an annulus) will also contribute to the total perimeter. """ if self._is_completely_masked: return np.nan * u.pix # unit for table else: from skimage.measure import perimeter return perimeter(~self._total_mask, neighbourhood=4) * u.pix
[ "def", "perimeter", "(", "self", ")", ":", "if", "self", ".", "_is_completely_masked", ":", "return", "np", ".", "nan", "*", "u", ".", "pix", "# unit for table", "else", ":", "from", "skimage", ".", "measure", "import", "perimeter", "return", "perimeter", "(", "~", "self", ".", "_total_mask", ",", "neighbourhood", "=", "4", ")", "*", "u", ".", "pix" ]
The total perimeter of the source segment, approximated lines through the centers of the border pixels using a 4-connectivity. If any masked pixels make holes within the source segment, then the perimeter around the inner hole (e.g. an annulus) will also contribute to the total perimeter.
[ "The", "total", "perimeter", "of", "the", "source", "segment", "approximated", "lines", "through", "the", "centers", "of", "the", "border", "pixels", "using", "a", "4", "-", "connectivity", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/properties.py#L957-L971
10,529
astropy/photutils
photutils/segmentation/properties.py
SourceProperties.inertia_tensor
def inertia_tensor(self): """ The inertia tensor of the source for the rotation around its center of mass. """ mu = self.moments_central a = mu[0, 2] b = -mu[1, 1] c = mu[2, 0] return np.array([[a, b], [b, c]]) * u.pix**2
python
def inertia_tensor(self): """ The inertia tensor of the source for the rotation around its center of mass. """ mu = self.moments_central a = mu[0, 2] b = -mu[1, 1] c = mu[2, 0] return np.array([[a, b], [b, c]]) * u.pix**2
[ "def", "inertia_tensor", "(", "self", ")", ":", "mu", "=", "self", ".", "moments_central", "a", "=", "mu", "[", "0", ",", "2", "]", "b", "=", "-", "mu", "[", "1", ",", "1", "]", "c", "=", "mu", "[", "2", ",", "0", "]", "return", "np", ".", "array", "(", "[", "[", "a", ",", "b", "]", ",", "[", "b", ",", "c", "]", "]", ")", "*", "u", ".", "pix", "**", "2" ]
The inertia tensor of the source for the rotation around its center of mass.
[ "The", "inertia", "tensor", "of", "the", "source", "for", "the", "rotation", "around", "its", "center", "of", "mass", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/properties.py#L974-L984
10,530
astropy/photutils
photutils/segmentation/properties.py
SourceProperties.covariance
def covariance(self): """ The covariance matrix of the 2D Gaussian function that has the same second-order moments as the source. """ mu = self.moments_central if mu[0, 0] != 0: m = mu / mu[0, 0] covariance = self._check_covariance( np.array([[m[0, 2], m[1, 1]], [m[1, 1], m[2, 0]]])) return covariance * u.pix**2 else: return np.empty((2, 2)) * np.nan * u.pix**2
python
def covariance(self): """ The covariance matrix of the 2D Gaussian function that has the same second-order moments as the source. """ mu = self.moments_central if mu[0, 0] != 0: m = mu / mu[0, 0] covariance = self._check_covariance( np.array([[m[0, 2], m[1, 1]], [m[1, 1], m[2, 0]]])) return covariance * u.pix**2 else: return np.empty((2, 2)) * np.nan * u.pix**2
[ "def", "covariance", "(", "self", ")", ":", "mu", "=", "self", ".", "moments_central", "if", "mu", "[", "0", ",", "0", "]", "!=", "0", ":", "m", "=", "mu", "/", "mu", "[", "0", ",", "0", "]", "covariance", "=", "self", ".", "_check_covariance", "(", "np", ".", "array", "(", "[", "[", "m", "[", "0", ",", "2", "]", ",", "m", "[", "1", ",", "1", "]", "]", ",", "[", "m", "[", "1", ",", "1", "]", ",", "m", "[", "2", ",", "0", "]", "]", "]", ")", ")", "return", "covariance", "*", "u", ".", "pix", "**", "2", "else", ":", "return", "np", ".", "empty", "(", "(", "2", ",", "2", ")", ")", "*", "np", ".", "nan", "*", "u", ".", "pix", "**", "2" ]
The covariance matrix of the 2D Gaussian function that has the same second-order moments as the source.
[ "The", "covariance", "matrix", "of", "the", "2D", "Gaussian", "function", "that", "has", "the", "same", "second", "-", "order", "moments", "as", "the", "source", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/properties.py#L987-L1000
10,531
astropy/photutils
photutils/segmentation/properties.py
SourceProperties.covariance_eigvals
def covariance_eigvals(self): """ The two eigenvalues of the `covariance` matrix in decreasing order. """ if not np.isnan(np.sum(self.covariance)): eigvals = np.linalg.eigvals(self.covariance) if np.any(eigvals < 0): # negative variance return (np.nan, np.nan) * u.pix**2 # pragma: no cover return (np.max(eigvals), np.min(eigvals)) * u.pix**2 else: return (np.nan, np.nan) * u.pix**2
python
def covariance_eigvals(self): """ The two eigenvalues of the `covariance` matrix in decreasing order. """ if not np.isnan(np.sum(self.covariance)): eigvals = np.linalg.eigvals(self.covariance) if np.any(eigvals < 0): # negative variance return (np.nan, np.nan) * u.pix**2 # pragma: no cover return (np.max(eigvals), np.min(eigvals)) * u.pix**2 else: return (np.nan, np.nan) * u.pix**2
[ "def", "covariance_eigvals", "(", "self", ")", ":", "if", "not", "np", ".", "isnan", "(", "np", ".", "sum", "(", "self", ".", "covariance", ")", ")", ":", "eigvals", "=", "np", ".", "linalg", ".", "eigvals", "(", "self", ".", "covariance", ")", "if", "np", ".", "any", "(", "eigvals", "<", "0", ")", ":", "# negative variance", "return", "(", "np", ".", "nan", ",", "np", ".", "nan", ")", "*", "u", ".", "pix", "**", "2", "# pragma: no cover", "return", "(", "np", ".", "max", "(", "eigvals", ")", ",", "np", ".", "min", "(", "eigvals", ")", ")", "*", "u", ".", "pix", "**", "2", "else", ":", "return", "(", "np", ".", "nan", ",", "np", ".", "nan", ")", "*", "u", ".", "pix", "**", "2" ]
The two eigenvalues of the `covariance` matrix in decreasing order.
[ "The", "two", "eigenvalues", "of", "the", "covariance", "matrix", "in", "decreasing", "order", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/properties.py#L1024-L1036
10,532
astropy/photutils
photutils/segmentation/properties.py
SourceProperties.eccentricity
def eccentricity(self): """ The eccentricity of the 2D Gaussian function that has the same second-order moments as the source. The eccentricity is the fraction of the distance along the semimajor axis at which the focus lies. .. math:: e = \\sqrt{1 - \\frac{b^2}{a^2}} where :math:`a` and :math:`b` are the lengths of the semimajor and semiminor axes, respectively. """ l1, l2 = self.covariance_eigvals if l1 == 0: return 0. # pragma: no cover return np.sqrt(1. - (l2 / l1))
python
def eccentricity(self): """ The eccentricity of the 2D Gaussian function that has the same second-order moments as the source. The eccentricity is the fraction of the distance along the semimajor axis at which the focus lies. .. math:: e = \\sqrt{1 - \\frac{b^2}{a^2}} where :math:`a` and :math:`b` are the lengths of the semimajor and semiminor axes, respectively. """ l1, l2 = self.covariance_eigvals if l1 == 0: return 0. # pragma: no cover return np.sqrt(1. - (l2 / l1))
[ "def", "eccentricity", "(", "self", ")", ":", "l1", ",", "l2", "=", "self", ".", "covariance_eigvals", "if", "l1", "==", "0", ":", "return", "0.", "# pragma: no cover", "return", "np", ".", "sqrt", "(", "1.", "-", "(", "l2", "/", "l1", ")", ")" ]
The eccentricity of the 2D Gaussian function that has the same second-order moments as the source. The eccentricity is the fraction of the distance along the semimajor axis at which the focus lies. .. math:: e = \\sqrt{1 - \\frac{b^2}{a^2}} where :math:`a` and :math:`b` are the lengths of the semimajor and semiminor axes, respectively.
[ "The", "eccentricity", "of", "the", "2D", "Gaussian", "function", "that", "has", "the", "same", "second", "-", "order", "moments", "as", "the", "source", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/properties.py#L1061-L1078
10,533
astropy/photutils
photutils/segmentation/properties.py
SourceProperties.orientation
def orientation(self): """ The angle in radians between the ``x`` axis and the major axis of the 2D Gaussian function that has the same second-order moments as the source. The angle increases in the counter-clockwise direction. """ a, b, b, c = self.covariance.flat if a < 0 or c < 0: # negative variance return np.nan * u.rad # pragma: no cover return 0.5 * np.arctan2(2. * b, (a - c))
python
def orientation(self): """ The angle in radians between the ``x`` axis and the major axis of the 2D Gaussian function that has the same second-order moments as the source. The angle increases in the counter-clockwise direction. """ a, b, b, c = self.covariance.flat if a < 0 or c < 0: # negative variance return np.nan * u.rad # pragma: no cover return 0.5 * np.arctan2(2. * b, (a - c))
[ "def", "orientation", "(", "self", ")", ":", "a", ",", "b", ",", "b", ",", "c", "=", "self", ".", "covariance", ".", "flat", "if", "a", "<", "0", "or", "c", "<", "0", ":", "# negative variance", "return", "np", ".", "nan", "*", "u", ".", "rad", "# pragma: no cover", "return", "0.5", "*", "np", ".", "arctan2", "(", "2.", "*", "b", ",", "(", "a", "-", "c", ")", ")" ]
The angle in radians between the ``x`` axis and the major axis of the 2D Gaussian function that has the same second-order moments as the source. The angle increases in the counter-clockwise direction.
[ "The", "angle", "in", "radians", "between", "the", "x", "axis", "and", "the", "major", "axis", "of", "the", "2D", "Gaussian", "function", "that", "has", "the", "same", "second", "-", "order", "moments", "as", "the", "source", ".", "The", "angle", "increases", "in", "the", "counter", "-", "clockwise", "direction", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/properties.py#L1081-L1092
10,534
astropy/photutils
photutils/utils/stats.py
_mesh_values
def _mesh_values(data, box_size): """ Extract all the data values in boxes of size ``box_size``. Values from incomplete boxes, either because of the image edges or masked pixels, are not returned. Parameters ---------- data : 2D `~numpy.ma.MaskedArray` The input masked array. box_size : int The box size. Returns ------- result : 2D `~numpy.ndarray` A 2D array containing the data values in the boxes (along the x axis). """ data = np.ma.asanyarray(data) ny, nx = data.shape nyboxes = ny // box_size nxboxes = nx // box_size # include only complete boxes ny_crop = nyboxes * box_size nx_crop = nxboxes * box_size data = data[0:ny_crop, 0:nx_crop] # a reshaped 2D masked array with mesh data along the x axis data = np.ma.swapaxes(data.reshape( nyboxes, box_size, nxboxes, box_size), 1, 2).reshape( nyboxes * nxboxes, box_size * box_size) # include only boxes without any masked pixels idx = np.where(np.ma.count_masked(data, axis=1) == 0) return data[idx]
python
def _mesh_values(data, box_size): """ Extract all the data values in boxes of size ``box_size``. Values from incomplete boxes, either because of the image edges or masked pixels, are not returned. Parameters ---------- data : 2D `~numpy.ma.MaskedArray` The input masked array. box_size : int The box size. Returns ------- result : 2D `~numpy.ndarray` A 2D array containing the data values in the boxes (along the x axis). """ data = np.ma.asanyarray(data) ny, nx = data.shape nyboxes = ny // box_size nxboxes = nx // box_size # include only complete boxes ny_crop = nyboxes * box_size nx_crop = nxboxes * box_size data = data[0:ny_crop, 0:nx_crop] # a reshaped 2D masked array with mesh data along the x axis data = np.ma.swapaxes(data.reshape( nyboxes, box_size, nxboxes, box_size), 1, 2).reshape( nyboxes * nxboxes, box_size * box_size) # include only boxes without any masked pixels idx = np.where(np.ma.count_masked(data, axis=1) == 0) return data[idx]
[ "def", "_mesh_values", "(", "data", ",", "box_size", ")", ":", "data", "=", "np", ".", "ma", ".", "asanyarray", "(", "data", ")", "ny", ",", "nx", "=", "data", ".", "shape", "nyboxes", "=", "ny", "//", "box_size", "nxboxes", "=", "nx", "//", "box_size", "# include only complete boxes", "ny_crop", "=", "nyboxes", "*", "box_size", "nx_crop", "=", "nxboxes", "*", "box_size", "data", "=", "data", "[", "0", ":", "ny_crop", ",", "0", ":", "nx_crop", "]", "# a reshaped 2D masked array with mesh data along the x axis", "data", "=", "np", ".", "ma", ".", "swapaxes", "(", "data", ".", "reshape", "(", "nyboxes", ",", "box_size", ",", "nxboxes", ",", "box_size", ")", ",", "1", ",", "2", ")", ".", "reshape", "(", "nyboxes", "*", "nxboxes", ",", "box_size", "*", "box_size", ")", "# include only boxes without any masked pixels", "idx", "=", "np", ".", "where", "(", "np", ".", "ma", ".", "count_masked", "(", "data", ",", "axis", "=", "1", ")", "==", "0", ")", "return", "data", "[", "idx", "]" ]
Extract all the data values in boxes of size ``box_size``. Values from incomplete boxes, either because of the image edges or masked pixels, are not returned. Parameters ---------- data : 2D `~numpy.ma.MaskedArray` The input masked array. box_size : int The box size. Returns ------- result : 2D `~numpy.ndarray` A 2D array containing the data values in the boxes (along the x axis).
[ "Extract", "all", "the", "data", "values", "in", "boxes", "of", "size", "box_size", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/utils/stats.py#L9-L50
10,535
astropy/photutils
photutils/utils/stats.py
std_blocksum
def std_blocksum(data, block_sizes, mask=None): """ Calculate the standard deviation of block-summed data values at sizes of ``block_sizes``. Values from incomplete blocks, either because of the image edges or masked pixels, are not included. Parameters ---------- data : array-like The 2D array to block sum. block_sizes : int, array-like of int An array of integer (square) block sizes. mask : array-like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Blocks that contain *any* masked data are excluded from calculations. Returns ------- result : `~numpy.ndarray` An array of the standard deviations of the block-summed array for the input ``block_sizes``. """ data = np.ma.asanyarray(data) if mask is not None and mask is not np.ma.nomask: mask = np.asanyarray(mask) if data.shape != mask.shape: raise ValueError('data and mask must have the same shape.') data.mask |= mask stds = [] block_sizes = np.atleast_1d(block_sizes) for block_size in block_sizes: mesh_values = _mesh_values(data, block_size) block_sums = np.sum(mesh_values, axis=1) stds.append(np.std(block_sums)) return np.array(stds)
python
def std_blocksum(data, block_sizes, mask=None): """ Calculate the standard deviation of block-summed data values at sizes of ``block_sizes``. Values from incomplete blocks, either because of the image edges or masked pixels, are not included. Parameters ---------- data : array-like The 2D array to block sum. block_sizes : int, array-like of int An array of integer (square) block sizes. mask : array-like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Blocks that contain *any* masked data are excluded from calculations. Returns ------- result : `~numpy.ndarray` An array of the standard deviations of the block-summed array for the input ``block_sizes``. """ data = np.ma.asanyarray(data) if mask is not None and mask is not np.ma.nomask: mask = np.asanyarray(mask) if data.shape != mask.shape: raise ValueError('data and mask must have the same shape.') data.mask |= mask stds = [] block_sizes = np.atleast_1d(block_sizes) for block_size in block_sizes: mesh_values = _mesh_values(data, block_size) block_sums = np.sum(mesh_values, axis=1) stds.append(np.std(block_sums)) return np.array(stds)
[ "def", "std_blocksum", "(", "data", ",", "block_sizes", ",", "mask", "=", "None", ")", ":", "data", "=", "np", ".", "ma", ".", "asanyarray", "(", "data", ")", "if", "mask", "is", "not", "None", "and", "mask", "is", "not", "np", ".", "ma", ".", "nomask", ":", "mask", "=", "np", ".", "asanyarray", "(", "mask", ")", "if", "data", ".", "shape", "!=", "mask", ".", "shape", ":", "raise", "ValueError", "(", "'data and mask must have the same shape.'", ")", "data", ".", "mask", "|=", "mask", "stds", "=", "[", "]", "block_sizes", "=", "np", ".", "atleast_1d", "(", "block_sizes", ")", "for", "block_size", "in", "block_sizes", ":", "mesh_values", "=", "_mesh_values", "(", "data", ",", "block_size", ")", "block_sums", "=", "np", ".", "sum", "(", "mesh_values", ",", "axis", "=", "1", ")", "stds", ".", "append", "(", "np", ".", "std", "(", "block_sums", ")", ")", "return", "np", ".", "array", "(", "stds", ")" ]
Calculate the standard deviation of block-summed data values at sizes of ``block_sizes``. Values from incomplete blocks, either because of the image edges or masked pixels, are not included. Parameters ---------- data : array-like The 2D array to block sum. block_sizes : int, array-like of int An array of integer (square) block sizes. mask : array-like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Blocks that contain *any* masked data are excluded from calculations. Returns ------- result : `~numpy.ndarray` An array of the standard deviations of the block-summed array for the input ``block_sizes``.
[ "Calculate", "the", "standard", "deviation", "of", "block", "-", "summed", "data", "values", "at", "sizes", "of", "block_sizes", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/utils/stats.py#L53-L97
10,536
astropy/photutils
photutils/psf/photometry.py
BasicPSFPhotometry.nstar
def nstar(self, image, star_groups): """ Fit, as appropriate, a compound or single model to the given ``star_groups``. Groups are fitted sequentially from the smallest to the biggest. In each iteration, ``image`` is subtracted by the previous fitted group. Parameters ---------- image : numpy.ndarray Background-subtracted image. star_groups : `~astropy.table.Table` This table must contain the following columns: ``id``, ``group_id``, ``x_0``, ``y_0``, ``flux_0``. ``x_0`` and ``y_0`` are initial estimates of the centroids and ``flux_0`` is an initial estimate of the flux. Additionally, columns named as ``<param_name>_0`` are required if any other parameter in the psf model is free (i.e., the ``fixed`` attribute of that parameter is ``False``). Returns ------- result_tab : `~astropy.table.Table` Astropy table that contains photometry results. image : numpy.ndarray Residual image. """ result_tab = Table() for param_tab_name in self._pars_to_output.keys(): result_tab.add_column(Column(name=param_tab_name)) unc_tab = Table() for param, isfixed in self.psf_model.fixed.items(): if not isfixed: unc_tab.add_column(Column(name=param + "_unc")) y, x = np.indices(image.shape) star_groups = star_groups.group_by('group_id') for n in range(len(star_groups.groups)): group_psf = get_grouped_psf_model(self.psf_model, star_groups.groups[n], self._pars_to_set) usepixel = np.zeros_like(image, dtype=np.bool) for row in star_groups.groups[n]: usepixel[overlap_slices(large_array_shape=image.shape, small_array_shape=self.fitshape, position=(row['y_0'], row['x_0']), mode='trim')[0]] = True fit_model = self.fitter(group_psf, x[usepixel], y[usepixel], image[usepixel]) param_table = self._model_params2table(fit_model, len(star_groups.groups[n])) result_tab = vstack([result_tab, param_table]) if 'param_cov' in self.fitter.fit_info.keys(): unc_tab = vstack([unc_tab, self._get_uncertainties( len(star_groups.groups[n]))]) try: from astropy.nddata.utils import NoOverlapError except ImportError: raise ImportError("astropy 1.1 or greater is required in " "order to use this class.") # do not subtract if the fitting did not go well try: image = subtract_psf(image, self.psf_model, param_table, subshape=self.fitshape) except NoOverlapError: pass if 'param_cov' in self.fitter.fit_info.keys(): result_tab = hstack([result_tab, unc_tab]) return result_tab, image
python
def nstar(self, image, star_groups): """ Fit, as appropriate, a compound or single model to the given ``star_groups``. Groups are fitted sequentially from the smallest to the biggest. In each iteration, ``image`` is subtracted by the previous fitted group. Parameters ---------- image : numpy.ndarray Background-subtracted image. star_groups : `~astropy.table.Table` This table must contain the following columns: ``id``, ``group_id``, ``x_0``, ``y_0``, ``flux_0``. ``x_0`` and ``y_0`` are initial estimates of the centroids and ``flux_0`` is an initial estimate of the flux. Additionally, columns named as ``<param_name>_0`` are required if any other parameter in the psf model is free (i.e., the ``fixed`` attribute of that parameter is ``False``). Returns ------- result_tab : `~astropy.table.Table` Astropy table that contains photometry results. image : numpy.ndarray Residual image. """ result_tab = Table() for param_tab_name in self._pars_to_output.keys(): result_tab.add_column(Column(name=param_tab_name)) unc_tab = Table() for param, isfixed in self.psf_model.fixed.items(): if not isfixed: unc_tab.add_column(Column(name=param + "_unc")) y, x = np.indices(image.shape) star_groups = star_groups.group_by('group_id') for n in range(len(star_groups.groups)): group_psf = get_grouped_psf_model(self.psf_model, star_groups.groups[n], self._pars_to_set) usepixel = np.zeros_like(image, dtype=np.bool) for row in star_groups.groups[n]: usepixel[overlap_slices(large_array_shape=image.shape, small_array_shape=self.fitshape, position=(row['y_0'], row['x_0']), mode='trim')[0]] = True fit_model = self.fitter(group_psf, x[usepixel], y[usepixel], image[usepixel]) param_table = self._model_params2table(fit_model, len(star_groups.groups[n])) result_tab = vstack([result_tab, param_table]) if 'param_cov' in self.fitter.fit_info.keys(): unc_tab = vstack([unc_tab, self._get_uncertainties( len(star_groups.groups[n]))]) try: from astropy.nddata.utils import NoOverlapError except ImportError: raise ImportError("astropy 1.1 or greater is required in " "order to use this class.") # do not subtract if the fitting did not go well try: image = subtract_psf(image, self.psf_model, param_table, subshape=self.fitshape) except NoOverlapError: pass if 'param_cov' in self.fitter.fit_info.keys(): result_tab = hstack([result_tab, unc_tab]) return result_tab, image
[ "def", "nstar", "(", "self", ",", "image", ",", "star_groups", ")", ":", "result_tab", "=", "Table", "(", ")", "for", "param_tab_name", "in", "self", ".", "_pars_to_output", ".", "keys", "(", ")", ":", "result_tab", ".", "add_column", "(", "Column", "(", "name", "=", "param_tab_name", ")", ")", "unc_tab", "=", "Table", "(", ")", "for", "param", ",", "isfixed", "in", "self", ".", "psf_model", ".", "fixed", ".", "items", "(", ")", ":", "if", "not", "isfixed", ":", "unc_tab", ".", "add_column", "(", "Column", "(", "name", "=", "param", "+", "\"_unc\"", ")", ")", "y", ",", "x", "=", "np", ".", "indices", "(", "image", ".", "shape", ")", "star_groups", "=", "star_groups", ".", "group_by", "(", "'group_id'", ")", "for", "n", "in", "range", "(", "len", "(", "star_groups", ".", "groups", ")", ")", ":", "group_psf", "=", "get_grouped_psf_model", "(", "self", ".", "psf_model", ",", "star_groups", ".", "groups", "[", "n", "]", ",", "self", ".", "_pars_to_set", ")", "usepixel", "=", "np", ".", "zeros_like", "(", "image", ",", "dtype", "=", "np", ".", "bool", ")", "for", "row", "in", "star_groups", ".", "groups", "[", "n", "]", ":", "usepixel", "[", "overlap_slices", "(", "large_array_shape", "=", "image", ".", "shape", ",", "small_array_shape", "=", "self", ".", "fitshape", ",", "position", "=", "(", "row", "[", "'y_0'", "]", ",", "row", "[", "'x_0'", "]", ")", ",", "mode", "=", "'trim'", ")", "[", "0", "]", "]", "=", "True", "fit_model", "=", "self", ".", "fitter", "(", "group_psf", ",", "x", "[", "usepixel", "]", ",", "y", "[", "usepixel", "]", ",", "image", "[", "usepixel", "]", ")", "param_table", "=", "self", ".", "_model_params2table", "(", "fit_model", ",", "len", "(", "star_groups", ".", "groups", "[", "n", "]", ")", ")", "result_tab", "=", "vstack", "(", "[", "result_tab", ",", "param_table", "]", ")", "if", "'param_cov'", "in", "self", ".", "fitter", ".", "fit_info", ".", "keys", "(", ")", ":", "unc_tab", "=", "vstack", "(", "[", "unc_tab", ",", "self", ".", "_get_uncertainties", "(", "len", "(", "star_groups", ".", "groups", "[", "n", "]", ")", ")", "]", ")", "try", ":", "from", "astropy", ".", "nddata", ".", "utils", "import", "NoOverlapError", "except", "ImportError", ":", "raise", "ImportError", "(", "\"astropy 1.1 or greater is required in \"", "\"order to use this class.\"", ")", "# do not subtract if the fitting did not go well", "try", ":", "image", "=", "subtract_psf", "(", "image", ",", "self", ".", "psf_model", ",", "param_table", ",", "subshape", "=", "self", ".", "fitshape", ")", "except", "NoOverlapError", ":", "pass", "if", "'param_cov'", "in", "self", ".", "fitter", ".", "fit_info", ".", "keys", "(", ")", ":", "result_tab", "=", "hstack", "(", "[", "result_tab", ",", "unc_tab", "]", ")", "return", "result_tab", ",", "image" ]
Fit, as appropriate, a compound or single model to the given ``star_groups``. Groups are fitted sequentially from the smallest to the biggest. In each iteration, ``image`` is subtracted by the previous fitted group. Parameters ---------- image : numpy.ndarray Background-subtracted image. star_groups : `~astropy.table.Table` This table must contain the following columns: ``id``, ``group_id``, ``x_0``, ``y_0``, ``flux_0``. ``x_0`` and ``y_0`` are initial estimates of the centroids and ``flux_0`` is an initial estimate of the flux. Additionally, columns named as ``<param_name>_0`` are required if any other parameter in the psf model is free (i.e., the ``fixed`` attribute of that parameter is ``False``). Returns ------- result_tab : `~astropy.table.Table` Astropy table that contains photometry results. image : numpy.ndarray Residual image.
[ "Fit", "as", "appropriate", "a", "compound", "or", "single", "model", "to", "the", "given", "star_groups", ".", "Groups", "are", "fitted", "sequentially", "from", "the", "smallest", "to", "the", "biggest", ".", "In", "each", "iteration", "image", "is", "subtracted", "by", "the", "previous", "fitted", "group", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/photometry.py#L298-L375
10,537
astropy/photutils
photutils/psf/photometry.py
BasicPSFPhotometry._get_uncertainties
def _get_uncertainties(self, star_group_size): """ Retrieve uncertainties on fitted parameters from the fitter object. Parameters ---------- star_group_size : int Number of stars in the given group. Returns ------- unc_tab : `~astropy.table.Table` Table which contains uncertainties on the fitted parameters. The uncertainties are reported as one standard deviation. """ unc_tab = Table() for param_name in self.psf_model.param_names: if not self.psf_model.fixed[param_name]: unc_tab.add_column(Column(name=param_name + "_unc", data=np.empty(star_group_size))) if 'param_cov' in self.fitter.fit_info.keys(): if self.fitter.fit_info['param_cov'] is not None: k = 0 n_fit_params = len(unc_tab.colnames) for i in range(star_group_size): unc_tab[i] = np.sqrt(np.diag( self.fitter.fit_info['param_cov']) )[k: k + n_fit_params] k = k + n_fit_params return unc_tab
python
def _get_uncertainties(self, star_group_size): """ Retrieve uncertainties on fitted parameters from the fitter object. Parameters ---------- star_group_size : int Number of stars in the given group. Returns ------- unc_tab : `~astropy.table.Table` Table which contains uncertainties on the fitted parameters. The uncertainties are reported as one standard deviation. """ unc_tab = Table() for param_name in self.psf_model.param_names: if not self.psf_model.fixed[param_name]: unc_tab.add_column(Column(name=param_name + "_unc", data=np.empty(star_group_size))) if 'param_cov' in self.fitter.fit_info.keys(): if self.fitter.fit_info['param_cov'] is not None: k = 0 n_fit_params = len(unc_tab.colnames) for i in range(star_group_size): unc_tab[i] = np.sqrt(np.diag( self.fitter.fit_info['param_cov']) )[k: k + n_fit_params] k = k + n_fit_params return unc_tab
[ "def", "_get_uncertainties", "(", "self", ",", "star_group_size", ")", ":", "unc_tab", "=", "Table", "(", ")", "for", "param_name", "in", "self", ".", "psf_model", ".", "param_names", ":", "if", "not", "self", ".", "psf_model", ".", "fixed", "[", "param_name", "]", ":", "unc_tab", ".", "add_column", "(", "Column", "(", "name", "=", "param_name", "+", "\"_unc\"", ",", "data", "=", "np", ".", "empty", "(", "star_group_size", ")", ")", ")", "if", "'param_cov'", "in", "self", ".", "fitter", ".", "fit_info", ".", "keys", "(", ")", ":", "if", "self", ".", "fitter", ".", "fit_info", "[", "'param_cov'", "]", "is", "not", "None", ":", "k", "=", "0", "n_fit_params", "=", "len", "(", "unc_tab", ".", "colnames", ")", "for", "i", "in", "range", "(", "star_group_size", ")", ":", "unc_tab", "[", "i", "]", "=", "np", ".", "sqrt", "(", "np", ".", "diag", "(", "self", ".", "fitter", ".", "fit_info", "[", "'param_cov'", "]", ")", ")", "[", "k", ":", "k", "+", "n_fit_params", "]", "k", "=", "k", "+", "n_fit_params", "return", "unc_tab" ]
Retrieve uncertainties on fitted parameters from the fitter object. Parameters ---------- star_group_size : int Number of stars in the given group. Returns ------- unc_tab : `~astropy.table.Table` Table which contains uncertainties on the fitted parameters. The uncertainties are reported as one standard deviation.
[ "Retrieve", "uncertainties", "on", "fitted", "parameters", "from", "the", "fitter", "object", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/photometry.py#L403-L435
10,538
astropy/photutils
photutils/psf/photometry.py
BasicPSFPhotometry._model_params2table
def _model_params2table(self, fit_model, star_group_size): """ Place fitted parameters into an astropy table. Parameters ---------- fit_model : `astropy.modeling.Fittable2DModel` instance PSF or PRF model to fit the data. Could be one of the models in this package like `~photutils.psf.sandbox.DiscretePRF`, `~photutils.psf.IntegratedGaussianPRF`, or any other suitable 2D model. star_group_size : int Number of stars in the given group. Returns ------- param_tab : `~astropy.table.Table` Table that contains the fitted parameters. """ param_tab = Table() for param_tab_name in self._pars_to_output.keys(): param_tab.add_column(Column(name=param_tab_name, data=np.empty(star_group_size))) if star_group_size > 1: for i in range(star_group_size): for param_tab_name, param_name in self._pars_to_output.items(): param_tab[param_tab_name][i] = getattr(fit_model, param_name + '_' + str(i)).value else: for param_tab_name, param_name in self._pars_to_output.items(): param_tab[param_tab_name] = getattr(fit_model, param_name).value return param_tab
python
def _model_params2table(self, fit_model, star_group_size): """ Place fitted parameters into an astropy table. Parameters ---------- fit_model : `astropy.modeling.Fittable2DModel` instance PSF or PRF model to fit the data. Could be one of the models in this package like `~photutils.psf.sandbox.DiscretePRF`, `~photutils.psf.IntegratedGaussianPRF`, or any other suitable 2D model. star_group_size : int Number of stars in the given group. Returns ------- param_tab : `~astropy.table.Table` Table that contains the fitted parameters. """ param_tab = Table() for param_tab_name in self._pars_to_output.keys(): param_tab.add_column(Column(name=param_tab_name, data=np.empty(star_group_size))) if star_group_size > 1: for i in range(star_group_size): for param_tab_name, param_name in self._pars_to_output.items(): param_tab[param_tab_name][i] = getattr(fit_model, param_name + '_' + str(i)).value else: for param_tab_name, param_name in self._pars_to_output.items(): param_tab[param_tab_name] = getattr(fit_model, param_name).value return param_tab
[ "def", "_model_params2table", "(", "self", ",", "fit_model", ",", "star_group_size", ")", ":", "param_tab", "=", "Table", "(", ")", "for", "param_tab_name", "in", "self", ".", "_pars_to_output", ".", "keys", "(", ")", ":", "param_tab", ".", "add_column", "(", "Column", "(", "name", "=", "param_tab_name", ",", "data", "=", "np", ".", "empty", "(", "star_group_size", ")", ")", ")", "if", "star_group_size", ">", "1", ":", "for", "i", "in", "range", "(", "star_group_size", ")", ":", "for", "param_tab_name", ",", "param_name", "in", "self", ".", "_pars_to_output", ".", "items", "(", ")", ":", "param_tab", "[", "param_tab_name", "]", "[", "i", "]", "=", "getattr", "(", "fit_model", ",", "param_name", "+", "'_'", "+", "str", "(", "i", ")", ")", ".", "value", "else", ":", "for", "param_tab_name", ",", "param_name", "in", "self", ".", "_pars_to_output", ".", "items", "(", ")", ":", "param_tab", "[", "param_tab_name", "]", "=", "getattr", "(", "fit_model", ",", "param_name", ")", ".", "value", "return", "param_tab" ]
Place fitted parameters into an astropy table. Parameters ---------- fit_model : `astropy.modeling.Fittable2DModel` instance PSF or PRF model to fit the data. Could be one of the models in this package like `~photutils.psf.sandbox.DiscretePRF`, `~photutils.psf.IntegratedGaussianPRF`, or any other suitable 2D model. star_group_size : int Number of stars in the given group. Returns ------- param_tab : `~astropy.table.Table` Table that contains the fitted parameters.
[ "Place", "fitted", "parameters", "into", "an", "astropy", "table", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/photometry.py#L437-L473
10,539
astropy/photutils
photutils/psf/photometry.py
IterativelySubtractedPSFPhotometry._do_photometry
def _do_photometry(self, param_tab, n_start=1): """ Helper function which performs the iterations of the photometry process. Parameters ---------- param_names : list Names of the columns which represent the initial guesses. For example, ['x_0', 'y_0', 'flux_0'], for intial guesses on the center positions and the flux. n_start : int Integer representing the start index of the iteration. It is 1 if init_guesses are None, and 2 otherwise. Returns ------- output_table : `~astropy.table.Table` or None Table with the photometry results, i.e., centroids and fluxes estimations and the initial estimates used to start the fitting process. """ output_table = Table() self._define_fit_param_names() for (init_parname, fit_parname) in zip(self._pars_to_set.keys(), self._pars_to_output.keys()): output_table.add_column(Column(name=init_parname)) output_table.add_column(Column(name=fit_parname)) sources = self.finder(self._residual_image) n = n_start while(sources is not None and (self.niters is None or n <= self.niters)): apertures = CircularAperture((sources['xcentroid'], sources['ycentroid']), r=self.aperture_radius) sources['aperture_flux'] = aperture_photometry( self._residual_image, apertures)['aperture_sum'] init_guess_tab = Table(names=['id', 'x_0', 'y_0', 'flux_0'], data=[sources['id'], sources['xcentroid'], sources['ycentroid'], sources['aperture_flux']]) for param_tab_name, param_name in self._pars_to_set.items(): if param_tab_name not in (['x_0', 'y_0', 'flux_0']): init_guess_tab.add_column( Column(name=param_tab_name, data=(getattr(self.psf_model, param_name) * np.ones(len(sources))))) star_groups = self.group_maker(init_guess_tab) table, self._residual_image = super().nstar( self._residual_image, star_groups) star_groups = star_groups.group_by('group_id') table = hstack([star_groups, table]) table['iter_detected'] = n*np.ones(table['x_fit'].shape, dtype=np.int32) output_table = vstack([output_table, table]) # do not warn if no sources are found beyond the first iteration with warnings.catch_warnings(): warnings.simplefilter('ignore', NoDetectionsWarning) sources = self.finder(self._residual_image) n += 1 return output_table
python
def _do_photometry(self, param_tab, n_start=1): """ Helper function which performs the iterations of the photometry process. Parameters ---------- param_names : list Names of the columns which represent the initial guesses. For example, ['x_0', 'y_0', 'flux_0'], for intial guesses on the center positions and the flux. n_start : int Integer representing the start index of the iteration. It is 1 if init_guesses are None, and 2 otherwise. Returns ------- output_table : `~astropy.table.Table` or None Table with the photometry results, i.e., centroids and fluxes estimations and the initial estimates used to start the fitting process. """ output_table = Table() self._define_fit_param_names() for (init_parname, fit_parname) in zip(self._pars_to_set.keys(), self._pars_to_output.keys()): output_table.add_column(Column(name=init_parname)) output_table.add_column(Column(name=fit_parname)) sources = self.finder(self._residual_image) n = n_start while(sources is not None and (self.niters is None or n <= self.niters)): apertures = CircularAperture((sources['xcentroid'], sources['ycentroid']), r=self.aperture_radius) sources['aperture_flux'] = aperture_photometry( self._residual_image, apertures)['aperture_sum'] init_guess_tab = Table(names=['id', 'x_0', 'y_0', 'flux_0'], data=[sources['id'], sources['xcentroid'], sources['ycentroid'], sources['aperture_flux']]) for param_tab_name, param_name in self._pars_to_set.items(): if param_tab_name not in (['x_0', 'y_0', 'flux_0']): init_guess_tab.add_column( Column(name=param_tab_name, data=(getattr(self.psf_model, param_name) * np.ones(len(sources))))) star_groups = self.group_maker(init_guess_tab) table, self._residual_image = super().nstar( self._residual_image, star_groups) star_groups = star_groups.group_by('group_id') table = hstack([star_groups, table]) table['iter_detected'] = n*np.ones(table['x_fit'].shape, dtype=np.int32) output_table = vstack([output_table, table]) # do not warn if no sources are found beyond the first iteration with warnings.catch_warnings(): warnings.simplefilter('ignore', NoDetectionsWarning) sources = self.finder(self._residual_image) n += 1 return output_table
[ "def", "_do_photometry", "(", "self", ",", "param_tab", ",", "n_start", "=", "1", ")", ":", "output_table", "=", "Table", "(", ")", "self", ".", "_define_fit_param_names", "(", ")", "for", "(", "init_parname", ",", "fit_parname", ")", "in", "zip", "(", "self", ".", "_pars_to_set", ".", "keys", "(", ")", ",", "self", ".", "_pars_to_output", ".", "keys", "(", ")", ")", ":", "output_table", ".", "add_column", "(", "Column", "(", "name", "=", "init_parname", ")", ")", "output_table", ".", "add_column", "(", "Column", "(", "name", "=", "fit_parname", ")", ")", "sources", "=", "self", ".", "finder", "(", "self", ".", "_residual_image", ")", "n", "=", "n_start", "while", "(", "sources", "is", "not", "None", "and", "(", "self", ".", "niters", "is", "None", "or", "n", "<=", "self", ".", "niters", ")", ")", ":", "apertures", "=", "CircularAperture", "(", "(", "sources", "[", "'xcentroid'", "]", ",", "sources", "[", "'ycentroid'", "]", ")", ",", "r", "=", "self", ".", "aperture_radius", ")", "sources", "[", "'aperture_flux'", "]", "=", "aperture_photometry", "(", "self", ".", "_residual_image", ",", "apertures", ")", "[", "'aperture_sum'", "]", "init_guess_tab", "=", "Table", "(", "names", "=", "[", "'id'", ",", "'x_0'", ",", "'y_0'", ",", "'flux_0'", "]", ",", "data", "=", "[", "sources", "[", "'id'", "]", ",", "sources", "[", "'xcentroid'", "]", ",", "sources", "[", "'ycentroid'", "]", ",", "sources", "[", "'aperture_flux'", "]", "]", ")", "for", "param_tab_name", ",", "param_name", "in", "self", ".", "_pars_to_set", ".", "items", "(", ")", ":", "if", "param_tab_name", "not", "in", "(", "[", "'x_0'", ",", "'y_0'", ",", "'flux_0'", "]", ")", ":", "init_guess_tab", ".", "add_column", "(", "Column", "(", "name", "=", "param_tab_name", ",", "data", "=", "(", "getattr", "(", "self", ".", "psf_model", ",", "param_name", ")", "*", "np", ".", "ones", "(", "len", "(", "sources", ")", ")", ")", ")", ")", "star_groups", "=", "self", ".", "group_maker", "(", "init_guess_tab", ")", "table", ",", "self", ".", "_residual_image", "=", "super", "(", ")", ".", "nstar", "(", "self", ".", "_residual_image", ",", "star_groups", ")", "star_groups", "=", "star_groups", ".", "group_by", "(", "'group_id'", ")", "table", "=", "hstack", "(", "[", "star_groups", ",", "table", "]", ")", "table", "[", "'iter_detected'", "]", "=", "n", "*", "np", ".", "ones", "(", "table", "[", "'x_fit'", "]", ".", "shape", ",", "dtype", "=", "np", ".", "int32", ")", "output_table", "=", "vstack", "(", "[", "output_table", ",", "table", "]", ")", "# do not warn if no sources are found beyond the first iteration", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "'ignore'", ",", "NoDetectionsWarning", ")", "sources", "=", "self", ".", "finder", "(", "self", ".", "_residual_image", ")", "n", "+=", "1", "return", "output_table" ]
Helper function which performs the iterations of the photometry process. Parameters ---------- param_names : list Names of the columns which represent the initial guesses. For example, ['x_0', 'y_0', 'flux_0'], for intial guesses on the center positions and the flux. n_start : int Integer representing the start index of the iteration. It is 1 if init_guesses are None, and 2 otherwise. Returns ------- output_table : `~astropy.table.Table` or None Table with the photometry results, i.e., centroids and fluxes estimations and the initial estimates used to start the fitting process.
[ "Helper", "function", "which", "performs", "the", "iterations", "of", "the", "photometry", "process", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/photometry.py#L666-L740
10,540
astropy/photutils
photutils/utils/wcs_helpers.py
pixel_scale_angle_at_skycoord
def pixel_scale_angle_at_skycoord(skycoord, wcs, offset=1. * u.arcsec): """ Calculate the pixel scale and WCS rotation angle at the position of a SkyCoord coordinate. Parameters ---------- skycoord : `~astropy.coordinates.SkyCoord` The SkyCoord coordinate. wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. offset : `~astropy.units.Quantity` A small angular offset to use to compute the pixel scale and position angle. Returns ------- scale : `~astropy.units.Quantity` The pixel scale in arcsec/pixel. angle : `~astropy.units.Quantity` The angle (in degrees) measured counterclockwise from the positive x axis to the "North" axis of the celestial coordinate system. Notes ----- If distortions are present in the image, the x and y pixel scales likely differ. This function computes a single pixel scale along the North/South axis. """ # We take a point directly "above" (in latitude) the input position # and convert it to pixel coordinates, then we use the pixel deltas # between the input and offset point to calculate the pixel scale and # angle. # Find the coordinates as a representation object coord = skycoord.represent_as('unitspherical') # Add a a small perturbation in the latitude direction (since longitude # is more difficult because it is not directly an angle) coord_new = UnitSphericalRepresentation(coord.lon, coord.lat + offset) coord_offset = skycoord.realize_frame(coord_new) # Find pixel coordinates of offset coordinates and pixel deltas x_offset, y_offset = skycoord_to_pixel(coord_offset, wcs, mode='all') x, y = skycoord_to_pixel(skycoord, wcs, mode='all') dx = x_offset - x dy = y_offset - y scale = offset.to(u.arcsec) / (np.hypot(dx, dy) * u.pixel) angle = (np.arctan2(dy, dx) * u.radian).to(u.deg) return scale, angle
python
def pixel_scale_angle_at_skycoord(skycoord, wcs, offset=1. * u.arcsec): """ Calculate the pixel scale and WCS rotation angle at the position of a SkyCoord coordinate. Parameters ---------- skycoord : `~astropy.coordinates.SkyCoord` The SkyCoord coordinate. wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. offset : `~astropy.units.Quantity` A small angular offset to use to compute the pixel scale and position angle. Returns ------- scale : `~astropy.units.Quantity` The pixel scale in arcsec/pixel. angle : `~astropy.units.Quantity` The angle (in degrees) measured counterclockwise from the positive x axis to the "North" axis of the celestial coordinate system. Notes ----- If distortions are present in the image, the x and y pixel scales likely differ. This function computes a single pixel scale along the North/South axis. """ # We take a point directly "above" (in latitude) the input position # and convert it to pixel coordinates, then we use the pixel deltas # between the input and offset point to calculate the pixel scale and # angle. # Find the coordinates as a representation object coord = skycoord.represent_as('unitspherical') # Add a a small perturbation in the latitude direction (since longitude # is more difficult because it is not directly an angle) coord_new = UnitSphericalRepresentation(coord.lon, coord.lat + offset) coord_offset = skycoord.realize_frame(coord_new) # Find pixel coordinates of offset coordinates and pixel deltas x_offset, y_offset = skycoord_to_pixel(coord_offset, wcs, mode='all') x, y = skycoord_to_pixel(skycoord, wcs, mode='all') dx = x_offset - x dy = y_offset - y scale = offset.to(u.arcsec) / (np.hypot(dx, dy) * u.pixel) angle = (np.arctan2(dy, dx) * u.radian).to(u.deg) return scale, angle
[ "def", "pixel_scale_angle_at_skycoord", "(", "skycoord", ",", "wcs", ",", "offset", "=", "1.", "*", "u", ".", "arcsec", ")", ":", "# We take a point directly \"above\" (in latitude) the input position", "# and convert it to pixel coordinates, then we use the pixel deltas", "# between the input and offset point to calculate the pixel scale and", "# angle.", "# Find the coordinates as a representation object", "coord", "=", "skycoord", ".", "represent_as", "(", "'unitspherical'", ")", "# Add a a small perturbation in the latitude direction (since longitude", "# is more difficult because it is not directly an angle)", "coord_new", "=", "UnitSphericalRepresentation", "(", "coord", ".", "lon", ",", "coord", ".", "lat", "+", "offset", ")", "coord_offset", "=", "skycoord", ".", "realize_frame", "(", "coord_new", ")", "# Find pixel coordinates of offset coordinates and pixel deltas", "x_offset", ",", "y_offset", "=", "skycoord_to_pixel", "(", "coord_offset", ",", "wcs", ",", "mode", "=", "'all'", ")", "x", ",", "y", "=", "skycoord_to_pixel", "(", "skycoord", ",", "wcs", ",", "mode", "=", "'all'", ")", "dx", "=", "x_offset", "-", "x", "dy", "=", "y_offset", "-", "y", "scale", "=", "offset", ".", "to", "(", "u", ".", "arcsec", ")", "/", "(", "np", ".", "hypot", "(", "dx", ",", "dy", ")", "*", "u", ".", "pixel", ")", "angle", "=", "(", "np", ".", "arctan2", "(", "dy", ",", "dx", ")", "*", "u", ".", "radian", ")", ".", "to", "(", "u", ".", "deg", ")", "return", "scale", ",", "angle" ]
Calculate the pixel scale and WCS rotation angle at the position of a SkyCoord coordinate. Parameters ---------- skycoord : `~astropy.coordinates.SkyCoord` The SkyCoord coordinate. wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. offset : `~astropy.units.Quantity` A small angular offset to use to compute the pixel scale and position angle. Returns ------- scale : `~astropy.units.Quantity` The pixel scale in arcsec/pixel. angle : `~astropy.units.Quantity` The angle (in degrees) measured counterclockwise from the positive x axis to the "North" axis of the celestial coordinate system. Notes ----- If distortions are present in the image, the x and y pixel scales likely differ. This function computes a single pixel scale along the North/South axis.
[ "Calculate", "the", "pixel", "scale", "and", "WCS", "rotation", "angle", "at", "the", "position", "of", "a", "SkyCoord", "coordinate", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/utils/wcs_helpers.py#L9-L62
10,541
astropy/photutils
photutils/utils/wcs_helpers.py
pixel_to_icrs_coords
def pixel_to_icrs_coords(x, y, wcs): """ Convert pixel coordinates to ICRS Right Ascension and Declination. This is merely a convenience function to extract RA and Dec. from a `~astropy.coordinates.SkyCoord` instance so they can be put in separate columns in a `~astropy.table.Table`. Parameters ---------- x : float or array-like The x pixel coordinate. y : float or array-like The y pixel coordinate. wcs : `~astropy.wcs.WCS` The WCS transformation to use to convert from pixel coordinates to ICRS world coordinates. `~astropy.table.Table`. Returns ------- ra : `~astropy.units.Quantity` The ICRS Right Ascension in degrees. dec : `~astropy.units.Quantity` The ICRS Declination in degrees. """ icrs_coords = pixel_to_skycoord(x, y, wcs).icrs icrs_ra = icrs_coords.ra.degree * u.deg icrs_dec = icrs_coords.dec.degree * u.deg return icrs_ra, icrs_dec
python
def pixel_to_icrs_coords(x, y, wcs): """ Convert pixel coordinates to ICRS Right Ascension and Declination. This is merely a convenience function to extract RA and Dec. from a `~astropy.coordinates.SkyCoord` instance so they can be put in separate columns in a `~astropy.table.Table`. Parameters ---------- x : float or array-like The x pixel coordinate. y : float or array-like The y pixel coordinate. wcs : `~astropy.wcs.WCS` The WCS transformation to use to convert from pixel coordinates to ICRS world coordinates. `~astropy.table.Table`. Returns ------- ra : `~astropy.units.Quantity` The ICRS Right Ascension in degrees. dec : `~astropy.units.Quantity` The ICRS Declination in degrees. """ icrs_coords = pixel_to_skycoord(x, y, wcs).icrs icrs_ra = icrs_coords.ra.degree * u.deg icrs_dec = icrs_coords.dec.degree * u.deg return icrs_ra, icrs_dec
[ "def", "pixel_to_icrs_coords", "(", "x", ",", "y", ",", "wcs", ")", ":", "icrs_coords", "=", "pixel_to_skycoord", "(", "x", ",", "y", ",", "wcs", ")", ".", "icrs", "icrs_ra", "=", "icrs_coords", ".", "ra", ".", "degree", "*", "u", ".", "deg", "icrs_dec", "=", "icrs_coords", ".", "dec", ".", "degree", "*", "u", ".", "deg", "return", "icrs_ra", ",", "icrs_dec" ]
Convert pixel coordinates to ICRS Right Ascension and Declination. This is merely a convenience function to extract RA and Dec. from a `~astropy.coordinates.SkyCoord` instance so they can be put in separate columns in a `~astropy.table.Table`. Parameters ---------- x : float or array-like The x pixel coordinate. y : float or array-like The y pixel coordinate. wcs : `~astropy.wcs.WCS` The WCS transformation to use to convert from pixel coordinates to ICRS world coordinates. `~astropy.table.Table`. Returns ------- ra : `~astropy.units.Quantity` The ICRS Right Ascension in degrees. dec : `~astropy.units.Quantity` The ICRS Declination in degrees.
[ "Convert", "pixel", "coordinates", "to", "ICRS", "Right", "Ascension", "and", "Declination", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/utils/wcs_helpers.py#L95-L129
10,542
astropy/photutils
photutils/utils/convolution.py
filter_data
def filter_data(data, kernel, mode='constant', fill_value=0.0, check_normalization=False): """ Convolve a 2D image with a 2D kernel. The kernel may either be a 2D `~numpy.ndarray` or a `~astropy.convolution.Kernel2D` object. Parameters ---------- data : array_like The 2D array of the image. kernel : array-like (2D) or `~astropy.convolution.Kernel2D` The 2D kernel used to filter the input ``data``. Filtering the ``data`` will smooth the noise and maximize detectability of objects with a shape similar to the kernel. mode : {'constant', 'reflect', 'nearest', 'mirror', 'wrap'}, optional The ``mode`` determines how the array borders are handled. For the ``'constant'`` mode, values outside the array borders are set to ``fill_value``. The default is ``'constant'``. fill_value : scalar, optional Value to fill data values beyond the array borders if ``mode`` is ``'constant'``. The default is ``0.0``. check_normalization : bool, optional If `True` then a warning will be issued if the kernel is not normalized to 1. """ from scipy import ndimage if kernel is not None: if isinstance(kernel, Kernel2D): kernel_array = kernel.array else: kernel_array = kernel if check_normalization: if not np.allclose(np.sum(kernel_array), 1.0): warnings.warn('The kernel is not normalized.', AstropyUserWarning) # NOTE: astropy.convolution.convolve fails with zero-sum # kernels (used in findstars) (cf. astropy #1647) # NOTE: if data is int and kernel is float, ndimage.convolve # will return an int image - here we make the data float so # that a float image is always returned return ndimage.convolve(data.astype(float), kernel_array, mode=mode, cval=fill_value) else: return data
python
def filter_data(data, kernel, mode='constant', fill_value=0.0, check_normalization=False): """ Convolve a 2D image with a 2D kernel. The kernel may either be a 2D `~numpy.ndarray` or a `~astropy.convolution.Kernel2D` object. Parameters ---------- data : array_like The 2D array of the image. kernel : array-like (2D) or `~astropy.convolution.Kernel2D` The 2D kernel used to filter the input ``data``. Filtering the ``data`` will smooth the noise and maximize detectability of objects with a shape similar to the kernel. mode : {'constant', 'reflect', 'nearest', 'mirror', 'wrap'}, optional The ``mode`` determines how the array borders are handled. For the ``'constant'`` mode, values outside the array borders are set to ``fill_value``. The default is ``'constant'``. fill_value : scalar, optional Value to fill data values beyond the array borders if ``mode`` is ``'constant'``. The default is ``0.0``. check_normalization : bool, optional If `True` then a warning will be issued if the kernel is not normalized to 1. """ from scipy import ndimage if kernel is not None: if isinstance(kernel, Kernel2D): kernel_array = kernel.array else: kernel_array = kernel if check_normalization: if not np.allclose(np.sum(kernel_array), 1.0): warnings.warn('The kernel is not normalized.', AstropyUserWarning) # NOTE: astropy.convolution.convolve fails with zero-sum # kernels (used in findstars) (cf. astropy #1647) # NOTE: if data is int and kernel is float, ndimage.convolve # will return an int image - here we make the data float so # that a float image is always returned return ndimage.convolve(data.astype(float), kernel_array, mode=mode, cval=fill_value) else: return data
[ "def", "filter_data", "(", "data", ",", "kernel", ",", "mode", "=", "'constant'", ",", "fill_value", "=", "0.0", ",", "check_normalization", "=", "False", ")", ":", "from", "scipy", "import", "ndimage", "if", "kernel", "is", "not", "None", ":", "if", "isinstance", "(", "kernel", ",", "Kernel2D", ")", ":", "kernel_array", "=", "kernel", ".", "array", "else", ":", "kernel_array", "=", "kernel", "if", "check_normalization", ":", "if", "not", "np", ".", "allclose", "(", "np", ".", "sum", "(", "kernel_array", ")", ",", "1.0", ")", ":", "warnings", ".", "warn", "(", "'The kernel is not normalized.'", ",", "AstropyUserWarning", ")", "# NOTE: astropy.convolution.convolve fails with zero-sum", "# kernels (used in findstars) (cf. astropy #1647)", "# NOTE: if data is int and kernel is float, ndimage.convolve", "# will return an int image - here we make the data float so", "# that a float image is always returned", "return", "ndimage", ".", "convolve", "(", "data", ".", "astype", "(", "float", ")", ",", "kernel_array", ",", "mode", "=", "mode", ",", "cval", "=", "fill_value", ")", "else", ":", "return", "data" ]
Convolve a 2D image with a 2D kernel. The kernel may either be a 2D `~numpy.ndarray` or a `~astropy.convolution.Kernel2D` object. Parameters ---------- data : array_like The 2D array of the image. kernel : array-like (2D) or `~astropy.convolution.Kernel2D` The 2D kernel used to filter the input ``data``. Filtering the ``data`` will smooth the noise and maximize detectability of objects with a shape similar to the kernel. mode : {'constant', 'reflect', 'nearest', 'mirror', 'wrap'}, optional The ``mode`` determines how the array borders are handled. For the ``'constant'`` mode, values outside the array borders are set to ``fill_value``. The default is ``'constant'``. fill_value : scalar, optional Value to fill data values beyond the array borders if ``mode`` is ``'constant'``. The default is ``0.0``. check_normalization : bool, optional If `True` then a warning will be issued if the kernel is not normalized to 1.
[ "Convolve", "a", "2D", "image", "with", "a", "2D", "kernel", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/utils/convolution.py#L13-L66
10,543
astropy/photutils
photutils/psf/utils.py
prepare_psf_model
def prepare_psf_model(psfmodel, xname=None, yname=None, fluxname=None, renormalize_psf=True): """ Convert a 2D PSF model to one suitable for use with `BasicPSFPhotometry` or its subclasses. The resulting model may be a composite model, but should have only the x, y, and flux related parameters un-fixed. Parameters ---------- psfmodel : a 2D model The model to assume as representative of the PSF. xname : str or None The name of the ``psfmodel`` parameter that corresponds to the x-axis center of the PSF. If None, the model will be assumed to be centered at x=0, and a new parameter will be added for the offset. yname : str or None The name of the ``psfmodel`` parameter that corresponds to the y-axis center of the PSF. If None, the model will be assumed to be centered at y=0, and a new parameter will be added for the offset. fluxname : str or None The name of the ``psfmodel`` parameter that corresponds to the total flux of the star. If None, a scaling factor will be added to the model. renormalize_psf : bool If True, the model will be integrated from -inf to inf and re-scaled so that the total integrates to 1. Note that this renormalization only occurs *once*, so if the total flux of ``psfmodel`` depends on position, this will *not* be correct. Returns ------- outmod : a model A new model ready to be passed into `BasicPSFPhotometry` or its subclasses. """ if xname is None: xinmod = models.Shift(0, name='x_offset') xname = 'offset_0' else: xinmod = models.Identity(1) xname = xname + '_2' xinmod.fittable = True if yname is None: yinmod = models.Shift(0, name='y_offset') yname = 'offset_1' else: yinmod = models.Identity(1) yname = yname + '_2' yinmod.fittable = True outmod = (xinmod & yinmod) | psfmodel if fluxname is None: outmod = outmod * models.Const2D(1, name='flux_scaling') fluxname = 'amplitude_3' else: fluxname = fluxname + '_2' if renormalize_psf: # we do the import here because other machinery works w/o scipy from scipy import integrate integrand = integrate.dblquad(psfmodel, -np.inf, np.inf, lambda x: -np.inf, lambda x: np.inf)[0] normmod = models.Const2D(1./integrand, name='renormalize_scaling') outmod = outmod * normmod # final setup of the output model - fix all the non-offset/scale # parameters for pnm in outmod.param_names: outmod.fixed[pnm] = pnm not in (xname, yname, fluxname) # and set the names so that BasicPSFPhotometry knows what to do outmod.xname = xname outmod.yname = yname outmod.fluxname = fluxname # now some convenience aliases if reasonable outmod.psfmodel = outmod[2] if 'x_0' not in outmod.param_names and 'y_0' not in outmod.param_names: outmod.x_0 = getattr(outmod, xname) outmod.y_0 = getattr(outmod, yname) if 'flux' not in outmod.param_names: outmod.flux = getattr(outmod, fluxname) return outmod
python
def prepare_psf_model(psfmodel, xname=None, yname=None, fluxname=None, renormalize_psf=True): """ Convert a 2D PSF model to one suitable for use with `BasicPSFPhotometry` or its subclasses. The resulting model may be a composite model, but should have only the x, y, and flux related parameters un-fixed. Parameters ---------- psfmodel : a 2D model The model to assume as representative of the PSF. xname : str or None The name of the ``psfmodel`` parameter that corresponds to the x-axis center of the PSF. If None, the model will be assumed to be centered at x=0, and a new parameter will be added for the offset. yname : str or None The name of the ``psfmodel`` parameter that corresponds to the y-axis center of the PSF. If None, the model will be assumed to be centered at y=0, and a new parameter will be added for the offset. fluxname : str or None The name of the ``psfmodel`` parameter that corresponds to the total flux of the star. If None, a scaling factor will be added to the model. renormalize_psf : bool If True, the model will be integrated from -inf to inf and re-scaled so that the total integrates to 1. Note that this renormalization only occurs *once*, so if the total flux of ``psfmodel`` depends on position, this will *not* be correct. Returns ------- outmod : a model A new model ready to be passed into `BasicPSFPhotometry` or its subclasses. """ if xname is None: xinmod = models.Shift(0, name='x_offset') xname = 'offset_0' else: xinmod = models.Identity(1) xname = xname + '_2' xinmod.fittable = True if yname is None: yinmod = models.Shift(0, name='y_offset') yname = 'offset_1' else: yinmod = models.Identity(1) yname = yname + '_2' yinmod.fittable = True outmod = (xinmod & yinmod) | psfmodel if fluxname is None: outmod = outmod * models.Const2D(1, name='flux_scaling') fluxname = 'amplitude_3' else: fluxname = fluxname + '_2' if renormalize_psf: # we do the import here because other machinery works w/o scipy from scipy import integrate integrand = integrate.dblquad(psfmodel, -np.inf, np.inf, lambda x: -np.inf, lambda x: np.inf)[0] normmod = models.Const2D(1./integrand, name='renormalize_scaling') outmod = outmod * normmod # final setup of the output model - fix all the non-offset/scale # parameters for pnm in outmod.param_names: outmod.fixed[pnm] = pnm not in (xname, yname, fluxname) # and set the names so that BasicPSFPhotometry knows what to do outmod.xname = xname outmod.yname = yname outmod.fluxname = fluxname # now some convenience aliases if reasonable outmod.psfmodel = outmod[2] if 'x_0' not in outmod.param_names and 'y_0' not in outmod.param_names: outmod.x_0 = getattr(outmod, xname) outmod.y_0 = getattr(outmod, yname) if 'flux' not in outmod.param_names: outmod.flux = getattr(outmod, fluxname) return outmod
[ "def", "prepare_psf_model", "(", "psfmodel", ",", "xname", "=", "None", ",", "yname", "=", "None", ",", "fluxname", "=", "None", ",", "renormalize_psf", "=", "True", ")", ":", "if", "xname", "is", "None", ":", "xinmod", "=", "models", ".", "Shift", "(", "0", ",", "name", "=", "'x_offset'", ")", "xname", "=", "'offset_0'", "else", ":", "xinmod", "=", "models", ".", "Identity", "(", "1", ")", "xname", "=", "xname", "+", "'_2'", "xinmod", ".", "fittable", "=", "True", "if", "yname", "is", "None", ":", "yinmod", "=", "models", ".", "Shift", "(", "0", ",", "name", "=", "'y_offset'", ")", "yname", "=", "'offset_1'", "else", ":", "yinmod", "=", "models", ".", "Identity", "(", "1", ")", "yname", "=", "yname", "+", "'_2'", "yinmod", ".", "fittable", "=", "True", "outmod", "=", "(", "xinmod", "&", "yinmod", ")", "|", "psfmodel", "if", "fluxname", "is", "None", ":", "outmod", "=", "outmod", "*", "models", ".", "Const2D", "(", "1", ",", "name", "=", "'flux_scaling'", ")", "fluxname", "=", "'amplitude_3'", "else", ":", "fluxname", "=", "fluxname", "+", "'_2'", "if", "renormalize_psf", ":", "# we do the import here because other machinery works w/o scipy", "from", "scipy", "import", "integrate", "integrand", "=", "integrate", ".", "dblquad", "(", "psfmodel", ",", "-", "np", ".", "inf", ",", "np", ".", "inf", ",", "lambda", "x", ":", "-", "np", ".", "inf", ",", "lambda", "x", ":", "np", ".", "inf", ")", "[", "0", "]", "normmod", "=", "models", ".", "Const2D", "(", "1.", "/", "integrand", ",", "name", "=", "'renormalize_scaling'", ")", "outmod", "=", "outmod", "*", "normmod", "# final setup of the output model - fix all the non-offset/scale", "# parameters", "for", "pnm", "in", "outmod", ".", "param_names", ":", "outmod", ".", "fixed", "[", "pnm", "]", "=", "pnm", "not", "in", "(", "xname", ",", "yname", ",", "fluxname", ")", "# and set the names so that BasicPSFPhotometry knows what to do", "outmod", ".", "xname", "=", "xname", "outmod", ".", "yname", "=", "yname", "outmod", ".", "fluxname", "=", "fluxname", "# now some convenience aliases if reasonable", "outmod", ".", "psfmodel", "=", "outmod", "[", "2", "]", "if", "'x_0'", "not", "in", "outmod", ".", "param_names", "and", "'y_0'", "not", "in", "outmod", ".", "param_names", ":", "outmod", ".", "x_0", "=", "getattr", "(", "outmod", ",", "xname", ")", "outmod", ".", "y_0", "=", "getattr", "(", "outmod", ",", "yname", ")", "if", "'flux'", "not", "in", "outmod", ".", "param_names", ":", "outmod", ".", "flux", "=", "getattr", "(", "outmod", ",", "fluxname", ")", "return", "outmod" ]
Convert a 2D PSF model to one suitable for use with `BasicPSFPhotometry` or its subclasses. The resulting model may be a composite model, but should have only the x, y, and flux related parameters un-fixed. Parameters ---------- psfmodel : a 2D model The model to assume as representative of the PSF. xname : str or None The name of the ``psfmodel`` parameter that corresponds to the x-axis center of the PSF. If None, the model will be assumed to be centered at x=0, and a new parameter will be added for the offset. yname : str or None The name of the ``psfmodel`` parameter that corresponds to the y-axis center of the PSF. If None, the model will be assumed to be centered at y=0, and a new parameter will be added for the offset. fluxname : str or None The name of the ``psfmodel`` parameter that corresponds to the total flux of the star. If None, a scaling factor will be added to the model. renormalize_psf : bool If True, the model will be integrated from -inf to inf and re-scaled so that the total integrates to 1. Note that this renormalization only occurs *once*, so if the total flux of ``psfmodel`` depends on position, this will *not* be correct. Returns ------- outmod : a model A new model ready to be passed into `BasicPSFPhotometry` or its subclasses.
[ "Convert", "a", "2D", "PSF", "model", "to", "one", "suitable", "for", "use", "with", "BasicPSFPhotometry", "or", "its", "subclasses", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/utils.py#L15-L106
10,544
astropy/photutils
photutils/psf/utils.py
get_grouped_psf_model
def get_grouped_psf_model(template_psf_model, star_group, pars_to_set): """ Construct a joint PSF model which consists of a sum of PSF's templated on a specific model, but whose parameters are given by a table of objects. Parameters ---------- template_psf_model : `astropy.modeling.Fittable2DModel` instance The model to use for *individual* objects. Must have parameters named ``x_0``, ``y_0``, and ``flux``. star_group : `~astropy.table.Table` Table of stars for which the compound PSF will be constructed. It must have columns named ``x_0``, ``y_0``, and ``flux_0``. Returns ------- group_psf An `astropy.modeling` ``CompoundModel`` instance which is a sum of the given PSF models. """ group_psf = None for star in star_group: psf_to_add = template_psf_model.copy() for param_tab_name, param_name in pars_to_set.items(): setattr(psf_to_add, param_name, star[param_tab_name]) if group_psf is None: # this is the first one only group_psf = psf_to_add else: group_psf += psf_to_add return group_psf
python
def get_grouped_psf_model(template_psf_model, star_group, pars_to_set): """ Construct a joint PSF model which consists of a sum of PSF's templated on a specific model, but whose parameters are given by a table of objects. Parameters ---------- template_psf_model : `astropy.modeling.Fittable2DModel` instance The model to use for *individual* objects. Must have parameters named ``x_0``, ``y_0``, and ``flux``. star_group : `~astropy.table.Table` Table of stars for which the compound PSF will be constructed. It must have columns named ``x_0``, ``y_0``, and ``flux_0``. Returns ------- group_psf An `astropy.modeling` ``CompoundModel`` instance which is a sum of the given PSF models. """ group_psf = None for star in star_group: psf_to_add = template_psf_model.copy() for param_tab_name, param_name in pars_to_set.items(): setattr(psf_to_add, param_name, star[param_tab_name]) if group_psf is None: # this is the first one only group_psf = psf_to_add else: group_psf += psf_to_add return group_psf
[ "def", "get_grouped_psf_model", "(", "template_psf_model", ",", "star_group", ",", "pars_to_set", ")", ":", "group_psf", "=", "None", "for", "star", "in", "star_group", ":", "psf_to_add", "=", "template_psf_model", ".", "copy", "(", ")", "for", "param_tab_name", ",", "param_name", "in", "pars_to_set", ".", "items", "(", ")", ":", "setattr", "(", "psf_to_add", ",", "param_name", ",", "star", "[", "param_tab_name", "]", ")", "if", "group_psf", "is", "None", ":", "# this is the first one only", "group_psf", "=", "psf_to_add", "else", ":", "group_psf", "+=", "psf_to_add", "return", "group_psf" ]
Construct a joint PSF model which consists of a sum of PSF's templated on a specific model, but whose parameters are given by a table of objects. Parameters ---------- template_psf_model : `astropy.modeling.Fittable2DModel` instance The model to use for *individual* objects. Must have parameters named ``x_0``, ``y_0``, and ``flux``. star_group : `~astropy.table.Table` Table of stars for which the compound PSF will be constructed. It must have columns named ``x_0``, ``y_0``, and ``flux_0``. Returns ------- group_psf An `astropy.modeling` ``CompoundModel`` instance which is a sum of the given PSF models.
[ "Construct", "a", "joint", "PSF", "model", "which", "consists", "of", "a", "sum", "of", "PSF", "s", "templated", "on", "a", "specific", "model", "but", "whose", "parameters", "are", "given", "by", "a", "table", "of", "objects", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/utils.py#L109-L143
10,545
astropy/photutils
photutils/psf/utils.py
_call_fitter
def _call_fitter(fitter, psf, x, y, data, weights): """ Not all fitters have to support a weight array. This function includes the weight in the fitter call only if really needed. """ if np.all(weights == 1.): return fitter(psf, x, y, data) else: return fitter(psf, x, y, data, weights=weights)
python
def _call_fitter(fitter, psf, x, y, data, weights): """ Not all fitters have to support a weight array. This function includes the weight in the fitter call only if really needed. """ if np.all(weights == 1.): return fitter(psf, x, y, data) else: return fitter(psf, x, y, data, weights=weights)
[ "def", "_call_fitter", "(", "fitter", ",", "psf", ",", "x", ",", "y", ",", "data", ",", "weights", ")", ":", "if", "np", ".", "all", "(", "weights", "==", "1.", ")", ":", "return", "fitter", "(", "psf", ",", "x", ",", "y", ",", "data", ")", "else", ":", "return", "fitter", "(", "psf", ",", "x", ",", "y", ",", "data", ",", "weights", "=", "weights", ")" ]
Not all fitters have to support a weight array. This function includes the weight in the fitter call only if really needed.
[ "Not", "all", "fitters", "have", "to", "support", "a", "weight", "array", ".", "This", "function", "includes", "the", "weight", "in", "the", "fitter", "call", "only", "if", "really", "needed", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/utils.py#L178-L187
10,546
astropy/photutils
photutils/detection/core.py
detect_threshold
def detect_threshold(data, snr, background=None, error=None, mask=None, mask_value=None, sigclip_sigma=3.0, sigclip_iters=None): """ Calculate a pixel-wise threshold image that can be used to detect sources. Parameters ---------- data : array_like The 2D array of the image. snr : float The signal-to-noise ratio per pixel above the ``background`` for which to consider a pixel as possibly being part of a source. background : float or array_like, optional The background value(s) of the input ``data``. ``background`` may either be a scalar value or a 2D image with the same shape as the input ``data``. If the input ``data`` has been background-subtracted, then set ``background`` to ``0.0``. If `None`, then a scalar background value will be estimated using sigma-clipped statistics. error : float or array_like, optional The Gaussian 1-sigma standard deviation of the background noise in ``data``. ``error`` should include all sources of "background" error, but *exclude* the Poisson error of the sources. If ``error`` is a 2D image, then it should represent the 1-sigma background error in each pixel of ``data``. If `None`, then a scalar background rms value will be estimated using sigma-clipped statistics. mask : array_like, bool, optional A boolean mask with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked pixels are ignored when computing the image background statistics. mask_value : float, optional An image data value (e.g., ``0.0``) that is ignored when computing the image background statistics. ``mask_value`` will be ignored if ``mask`` is input. sigclip_sigma : float, optional The number of standard deviations to use as the clipping limit when calculating the image background statistics. sigclip_iters : int, optional The number of iterations to perform sigma clipping, or `None` to clip until convergence is achieved (i.e., continue until the last iteration clips nothing) when calculating the image background statistics. Returns ------- threshold : 2D `~numpy.ndarray` A 2D image with the same shape as ``data`` containing the pixel-wise threshold values. See Also -------- :func:`photutils.segmentation.detect_sources` Notes ----- The ``mask``, ``mask_value``, ``sigclip_sigma``, and ``sigclip_iters`` inputs are used only if it is necessary to estimate ``background`` or ``error`` using sigma-clipped background statistics. If ``background`` and ``error`` are both input, then ``mask``, ``mask_value``, ``sigclip_sigma``, and ``sigclip_iters`` are ignored. """ if background is None or error is None: if astropy_version < '3.1': data_mean, data_median, data_std = sigma_clipped_stats( data, mask=mask, mask_value=mask_value, sigma=sigclip_sigma, iters=sigclip_iters) else: data_mean, data_median, data_std = sigma_clipped_stats( data, mask=mask, mask_value=mask_value, sigma=sigclip_sigma, maxiters=sigclip_iters) bkgrd_image = np.zeros_like(data) + data_mean bkgrdrms_image = np.zeros_like(data) + data_std if background is None: background = bkgrd_image else: if np.isscalar(background): background = np.zeros_like(data) + background else: if background.shape != data.shape: raise ValueError('If input background is 2D, then it ' 'must have the same shape as the input ' 'data.') if error is None: error = bkgrdrms_image else: if np.isscalar(error): error = np.zeros_like(data) + error else: if error.shape != data.shape: raise ValueError('If input error is 2D, then it ' 'must have the same shape as the input ' 'data.') return background + (error * snr)
python
def detect_threshold(data, snr, background=None, error=None, mask=None, mask_value=None, sigclip_sigma=3.0, sigclip_iters=None): """ Calculate a pixel-wise threshold image that can be used to detect sources. Parameters ---------- data : array_like The 2D array of the image. snr : float The signal-to-noise ratio per pixel above the ``background`` for which to consider a pixel as possibly being part of a source. background : float or array_like, optional The background value(s) of the input ``data``. ``background`` may either be a scalar value or a 2D image with the same shape as the input ``data``. If the input ``data`` has been background-subtracted, then set ``background`` to ``0.0``. If `None`, then a scalar background value will be estimated using sigma-clipped statistics. error : float or array_like, optional The Gaussian 1-sigma standard deviation of the background noise in ``data``. ``error`` should include all sources of "background" error, but *exclude* the Poisson error of the sources. If ``error`` is a 2D image, then it should represent the 1-sigma background error in each pixel of ``data``. If `None`, then a scalar background rms value will be estimated using sigma-clipped statistics. mask : array_like, bool, optional A boolean mask with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked pixels are ignored when computing the image background statistics. mask_value : float, optional An image data value (e.g., ``0.0``) that is ignored when computing the image background statistics. ``mask_value`` will be ignored if ``mask`` is input. sigclip_sigma : float, optional The number of standard deviations to use as the clipping limit when calculating the image background statistics. sigclip_iters : int, optional The number of iterations to perform sigma clipping, or `None` to clip until convergence is achieved (i.e., continue until the last iteration clips nothing) when calculating the image background statistics. Returns ------- threshold : 2D `~numpy.ndarray` A 2D image with the same shape as ``data`` containing the pixel-wise threshold values. See Also -------- :func:`photutils.segmentation.detect_sources` Notes ----- The ``mask``, ``mask_value``, ``sigclip_sigma``, and ``sigclip_iters`` inputs are used only if it is necessary to estimate ``background`` or ``error`` using sigma-clipped background statistics. If ``background`` and ``error`` are both input, then ``mask``, ``mask_value``, ``sigclip_sigma``, and ``sigclip_iters`` are ignored. """ if background is None or error is None: if astropy_version < '3.1': data_mean, data_median, data_std = sigma_clipped_stats( data, mask=mask, mask_value=mask_value, sigma=sigclip_sigma, iters=sigclip_iters) else: data_mean, data_median, data_std = sigma_clipped_stats( data, mask=mask, mask_value=mask_value, sigma=sigclip_sigma, maxiters=sigclip_iters) bkgrd_image = np.zeros_like(data) + data_mean bkgrdrms_image = np.zeros_like(data) + data_std if background is None: background = bkgrd_image else: if np.isscalar(background): background = np.zeros_like(data) + background else: if background.shape != data.shape: raise ValueError('If input background is 2D, then it ' 'must have the same shape as the input ' 'data.') if error is None: error = bkgrdrms_image else: if np.isscalar(error): error = np.zeros_like(data) + error else: if error.shape != data.shape: raise ValueError('If input error is 2D, then it ' 'must have the same shape as the input ' 'data.') return background + (error * snr)
[ "def", "detect_threshold", "(", "data", ",", "snr", ",", "background", "=", "None", ",", "error", "=", "None", ",", "mask", "=", "None", ",", "mask_value", "=", "None", ",", "sigclip_sigma", "=", "3.0", ",", "sigclip_iters", "=", "None", ")", ":", "if", "background", "is", "None", "or", "error", "is", "None", ":", "if", "astropy_version", "<", "'3.1'", ":", "data_mean", ",", "data_median", ",", "data_std", "=", "sigma_clipped_stats", "(", "data", ",", "mask", "=", "mask", ",", "mask_value", "=", "mask_value", ",", "sigma", "=", "sigclip_sigma", ",", "iters", "=", "sigclip_iters", ")", "else", ":", "data_mean", ",", "data_median", ",", "data_std", "=", "sigma_clipped_stats", "(", "data", ",", "mask", "=", "mask", ",", "mask_value", "=", "mask_value", ",", "sigma", "=", "sigclip_sigma", ",", "maxiters", "=", "sigclip_iters", ")", "bkgrd_image", "=", "np", ".", "zeros_like", "(", "data", ")", "+", "data_mean", "bkgrdrms_image", "=", "np", ".", "zeros_like", "(", "data", ")", "+", "data_std", "if", "background", "is", "None", ":", "background", "=", "bkgrd_image", "else", ":", "if", "np", ".", "isscalar", "(", "background", ")", ":", "background", "=", "np", ".", "zeros_like", "(", "data", ")", "+", "background", "else", ":", "if", "background", ".", "shape", "!=", "data", ".", "shape", ":", "raise", "ValueError", "(", "'If input background is 2D, then it '", "'must have the same shape as the input '", "'data.'", ")", "if", "error", "is", "None", ":", "error", "=", "bkgrdrms_image", "else", ":", "if", "np", ".", "isscalar", "(", "error", ")", ":", "error", "=", "np", ".", "zeros_like", "(", "data", ")", "+", "error", "else", ":", "if", "error", ".", "shape", "!=", "data", ".", "shape", ":", "raise", "ValueError", "(", "'If input error is 2D, then it '", "'must have the same shape as the input '", "'data.'", ")", "return", "background", "+", "(", "error", "*", "snr", ")" ]
Calculate a pixel-wise threshold image that can be used to detect sources. Parameters ---------- data : array_like The 2D array of the image. snr : float The signal-to-noise ratio per pixel above the ``background`` for which to consider a pixel as possibly being part of a source. background : float or array_like, optional The background value(s) of the input ``data``. ``background`` may either be a scalar value or a 2D image with the same shape as the input ``data``. If the input ``data`` has been background-subtracted, then set ``background`` to ``0.0``. If `None`, then a scalar background value will be estimated using sigma-clipped statistics. error : float or array_like, optional The Gaussian 1-sigma standard deviation of the background noise in ``data``. ``error`` should include all sources of "background" error, but *exclude* the Poisson error of the sources. If ``error`` is a 2D image, then it should represent the 1-sigma background error in each pixel of ``data``. If `None`, then a scalar background rms value will be estimated using sigma-clipped statistics. mask : array_like, bool, optional A boolean mask with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked pixels are ignored when computing the image background statistics. mask_value : float, optional An image data value (e.g., ``0.0``) that is ignored when computing the image background statistics. ``mask_value`` will be ignored if ``mask`` is input. sigclip_sigma : float, optional The number of standard deviations to use as the clipping limit when calculating the image background statistics. sigclip_iters : int, optional The number of iterations to perform sigma clipping, or `None` to clip until convergence is achieved (i.e., continue until the last iteration clips nothing) when calculating the image background statistics. Returns ------- threshold : 2D `~numpy.ndarray` A 2D image with the same shape as ``data`` containing the pixel-wise threshold values. See Also -------- :func:`photutils.segmentation.detect_sources` Notes ----- The ``mask``, ``mask_value``, ``sigclip_sigma``, and ``sigclip_iters`` inputs are used only if it is necessary to estimate ``background`` or ``error`` using sigma-clipped background statistics. If ``background`` and ``error`` are both input, then ``mask``, ``mask_value``, ``sigclip_sigma``, and ``sigclip_iters`` are ignored.
[ "Calculate", "a", "pixel", "-", "wise", "threshold", "image", "that", "can", "be", "used", "to", "detect", "sources", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/detection/core.py#L18-L126
10,547
astropy/photutils
ah_bootstrap.py
run_cmd
def run_cmd(cmd): """ Run a command in a subprocess, given as a list of command-line arguments. Returns a ``(returncode, stdout, stderr)`` tuple. """ try: p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE) # XXX: May block if either stdout or stderr fill their buffers; # however for the commands this is currently used for that is # unlikely (they should have very brief output) stdout, stderr = p.communicate() except OSError as e: if DEBUG: raise if e.errno == errno.ENOENT: msg = 'Command not found: `{0}`'.format(' '.join(cmd)) raise _CommandNotFound(msg, cmd) else: raise _AHBootstrapSystemExit( 'An unexpected error occurred when running the ' '`{0}` command:\n{1}'.format(' '.join(cmd), str(e))) # Can fail of the default locale is not configured properly. See # https://github.com/astropy/astropy/issues/2749. For the purposes under # consideration 'latin1' is an acceptable fallback. try: stdio_encoding = locale.getdefaultlocale()[1] or 'latin1' except ValueError: # Due to an OSX oddity locale.getdefaultlocale() can also crash # depending on the user's locale/language settings. See: # http://bugs.python.org/issue18378 stdio_encoding = 'latin1' # Unlikely to fail at this point but even then let's be flexible if not isinstance(stdout, str): stdout = stdout.decode(stdio_encoding, 'replace') if not isinstance(stderr, str): stderr = stderr.decode(stdio_encoding, 'replace') return (p.returncode, stdout, stderr)
python
def run_cmd(cmd): """ Run a command in a subprocess, given as a list of command-line arguments. Returns a ``(returncode, stdout, stderr)`` tuple. """ try: p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE) # XXX: May block if either stdout or stderr fill their buffers; # however for the commands this is currently used for that is # unlikely (they should have very brief output) stdout, stderr = p.communicate() except OSError as e: if DEBUG: raise if e.errno == errno.ENOENT: msg = 'Command not found: `{0}`'.format(' '.join(cmd)) raise _CommandNotFound(msg, cmd) else: raise _AHBootstrapSystemExit( 'An unexpected error occurred when running the ' '`{0}` command:\n{1}'.format(' '.join(cmd), str(e))) # Can fail of the default locale is not configured properly. See # https://github.com/astropy/astropy/issues/2749. For the purposes under # consideration 'latin1' is an acceptable fallback. try: stdio_encoding = locale.getdefaultlocale()[1] or 'latin1' except ValueError: # Due to an OSX oddity locale.getdefaultlocale() can also crash # depending on the user's locale/language settings. See: # http://bugs.python.org/issue18378 stdio_encoding = 'latin1' # Unlikely to fail at this point but even then let's be flexible if not isinstance(stdout, str): stdout = stdout.decode(stdio_encoding, 'replace') if not isinstance(stderr, str): stderr = stderr.decode(stdio_encoding, 'replace') return (p.returncode, stdout, stderr)
[ "def", "run_cmd", "(", "cmd", ")", ":", "try", ":", "p", "=", "sp", ".", "Popen", "(", "cmd", ",", "stdout", "=", "sp", ".", "PIPE", ",", "stderr", "=", "sp", ".", "PIPE", ")", "# XXX: May block if either stdout or stderr fill their buffers;", "# however for the commands this is currently used for that is", "# unlikely (they should have very brief output)", "stdout", ",", "stderr", "=", "p", ".", "communicate", "(", ")", "except", "OSError", "as", "e", ":", "if", "DEBUG", ":", "raise", "if", "e", ".", "errno", "==", "errno", ".", "ENOENT", ":", "msg", "=", "'Command not found: `{0}`'", ".", "format", "(", "' '", ".", "join", "(", "cmd", ")", ")", "raise", "_CommandNotFound", "(", "msg", ",", "cmd", ")", "else", ":", "raise", "_AHBootstrapSystemExit", "(", "'An unexpected error occurred when running the '", "'`{0}` command:\\n{1}'", ".", "format", "(", "' '", ".", "join", "(", "cmd", ")", ",", "str", "(", "e", ")", ")", ")", "# Can fail of the default locale is not configured properly. See", "# https://github.com/astropy/astropy/issues/2749. For the purposes under", "# consideration 'latin1' is an acceptable fallback.", "try", ":", "stdio_encoding", "=", "locale", ".", "getdefaultlocale", "(", ")", "[", "1", "]", "or", "'latin1'", "except", "ValueError", ":", "# Due to an OSX oddity locale.getdefaultlocale() can also crash", "# depending on the user's locale/language settings. See:", "# http://bugs.python.org/issue18378", "stdio_encoding", "=", "'latin1'", "# Unlikely to fail at this point but even then let's be flexible", "if", "not", "isinstance", "(", "stdout", ",", "str", ")", ":", "stdout", "=", "stdout", ".", "decode", "(", "stdio_encoding", ",", "'replace'", ")", "if", "not", "isinstance", "(", "stderr", ",", "str", ")", ":", "stderr", "=", "stderr", ".", "decode", "(", "stdio_encoding", ",", "'replace'", ")", "return", "(", "p", ".", "returncode", ",", "stdout", ",", "stderr", ")" ]
Run a command in a subprocess, given as a list of command-line arguments. Returns a ``(returncode, stdout, stderr)`` tuple.
[ "Run", "a", "command", "in", "a", "subprocess", "given", "as", "a", "list", "of", "command", "-", "line", "arguments", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/ah_bootstrap.py#L768-L812
10,548
astropy/photutils
photutils/aperture/ellipse.py
EllipticalAperture.to_sky
def to_sky(self, wcs, mode='all'): """ Convert the aperture to a `SkyEllipticalAperture` object defined in celestial coordinates. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- aperture : `SkyEllipticalAperture` object A `SkyEllipticalAperture` object. """ sky_params = self._to_sky_params(wcs, mode=mode) return SkyEllipticalAperture(**sky_params)
python
def to_sky(self, wcs, mode='all'): """ Convert the aperture to a `SkyEllipticalAperture` object defined in celestial coordinates. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- aperture : `SkyEllipticalAperture` object A `SkyEllipticalAperture` object. """ sky_params = self._to_sky_params(wcs, mode=mode) return SkyEllipticalAperture(**sky_params)
[ "def", "to_sky", "(", "self", ",", "wcs", ",", "mode", "=", "'all'", ")", ":", "sky_params", "=", "self", ".", "_to_sky_params", "(", "wcs", ",", "mode", "=", "mode", ")", "return", "SkyEllipticalAperture", "(", "*", "*", "sky_params", ")" ]
Convert the aperture to a `SkyEllipticalAperture` object defined in celestial coordinates. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- aperture : `SkyEllipticalAperture` object A `SkyEllipticalAperture` object.
[ "Convert", "the", "aperture", "to", "a", "SkyEllipticalAperture", "object", "defined", "in", "celestial", "coordinates", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/aperture/ellipse.py#L187-L209
10,549
astropy/photutils
photutils/aperture/ellipse.py
EllipticalAnnulus.to_sky
def to_sky(self, wcs, mode='all'): """ Convert the aperture to a `SkyEllipticalAnnulus` object defined in celestial coordinates. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- aperture : `SkyEllipticalAnnulus` object A `SkyEllipticalAnnulus` object. """ sky_params = self._to_sky_params(wcs, mode=mode) return SkyEllipticalAnnulus(**sky_params)
python
def to_sky(self, wcs, mode='all'): """ Convert the aperture to a `SkyEllipticalAnnulus` object defined in celestial coordinates. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- aperture : `SkyEllipticalAnnulus` object A `SkyEllipticalAnnulus` object. """ sky_params = self._to_sky_params(wcs, mode=mode) return SkyEllipticalAnnulus(**sky_params)
[ "def", "to_sky", "(", "self", ",", "wcs", ",", "mode", "=", "'all'", ")", ":", "sky_params", "=", "self", ".", "_to_sky_params", "(", "wcs", ",", "mode", "=", "mode", ")", "return", "SkyEllipticalAnnulus", "(", "*", "*", "sky_params", ")" ]
Convert the aperture to a `SkyEllipticalAnnulus` object defined in celestial coordinates. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- aperture : `SkyEllipticalAnnulus` object A `SkyEllipticalAnnulus` object.
[ "Convert", "the", "aperture", "to", "a", "SkyEllipticalAnnulus", "object", "defined", "in", "celestial", "coordinates", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/aperture/ellipse.py#L318-L340
10,550
astropy/photutils
photutils/aperture/ellipse.py
SkyEllipticalAperture.to_pixel
def to_pixel(self, wcs, mode='all'): """ Convert the aperture to an `EllipticalAperture` object defined in pixel coordinates. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- aperture : `EllipticalAperture` object An `EllipticalAperture` object. """ pixel_params = self._to_pixel_params(wcs, mode=mode) return EllipticalAperture(**pixel_params)
python
def to_pixel(self, wcs, mode='all'): """ Convert the aperture to an `EllipticalAperture` object defined in pixel coordinates. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- aperture : `EllipticalAperture` object An `EllipticalAperture` object. """ pixel_params = self._to_pixel_params(wcs, mode=mode) return EllipticalAperture(**pixel_params)
[ "def", "to_pixel", "(", "self", ",", "wcs", ",", "mode", "=", "'all'", ")", ":", "pixel_params", "=", "self", ".", "_to_pixel_params", "(", "wcs", ",", "mode", "=", "mode", ")", "return", "EllipticalAperture", "(", "*", "*", "pixel_params", ")" ]
Convert the aperture to an `EllipticalAperture` object defined in pixel coordinates. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- aperture : `EllipticalAperture` object An `EllipticalAperture` object.
[ "Convert", "the", "aperture", "to", "an", "EllipticalAperture", "object", "defined", "in", "pixel", "coordinates", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/aperture/ellipse.py#L385-L407
10,551
astropy/photutils
photutils/aperture/ellipse.py
SkyEllipticalAnnulus.to_pixel
def to_pixel(self, wcs, mode='all'): """ Convert the aperture to an `EllipticalAnnulus` object defined in pixel coordinates. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- aperture : `EllipticalAnnulus` object An `EllipticalAnnulus` object. """ pixel_params = self._to_pixel_params(wcs, mode=mode) return EllipticalAnnulus(**pixel_params)
python
def to_pixel(self, wcs, mode='all'): """ Convert the aperture to an `EllipticalAnnulus` object defined in pixel coordinates. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- aperture : `EllipticalAnnulus` object An `EllipticalAnnulus` object. """ pixel_params = self._to_pixel_params(wcs, mode=mode) return EllipticalAnnulus(**pixel_params)
[ "def", "to_pixel", "(", "self", ",", "wcs", ",", "mode", "=", "'all'", ")", ":", "pixel_params", "=", "self", ".", "_to_pixel_params", "(", "wcs", ",", "mode", "=", "mode", ")", "return", "EllipticalAnnulus", "(", "*", "*", "pixel_params", ")" ]
Convert the aperture to an `EllipticalAnnulus` object defined in pixel coordinates. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- aperture : `EllipticalAnnulus` object An `EllipticalAnnulus` object.
[ "Convert", "the", "aperture", "to", "an", "EllipticalAnnulus", "object", "defined", "in", "pixel", "coordinates", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/aperture/ellipse.py#L465-L487
10,552
astropy/photutils
photutils/isophote/geometry.py
_area
def _area(sma, eps, phi, r): """ Compute elliptical sector area. """ aux = r * math.cos(phi) / sma signal = aux / abs(aux) if abs(aux) >= 1.: aux = signal return abs(sma**2 * (1.-eps) / 2. * math.acos(aux))
python
def _area(sma, eps, phi, r): """ Compute elliptical sector area. """ aux = r * math.cos(phi) / sma signal = aux / abs(aux) if abs(aux) >= 1.: aux = signal return abs(sma**2 * (1.-eps) / 2. * math.acos(aux))
[ "def", "_area", "(", "sma", ",", "eps", ",", "phi", ",", "r", ")", ":", "aux", "=", "r", "*", "math", ".", "cos", "(", "phi", ")", "/", "sma", "signal", "=", "aux", "/", "abs", "(", "aux", ")", "if", "abs", "(", "aux", ")", ">=", "1.", ":", "aux", "=", "signal", "return", "abs", "(", "sma", "**", "2", "*", "(", "1.", "-", "eps", ")", "/", "2.", "*", "math", ".", "acos", "(", "aux", ")", ")" ]
Compute elliptical sector area.
[ "Compute", "elliptical", "sector", "area", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/isophote/geometry.py#L50-L59
10,553
astropy/photutils
photutils/isophote/geometry.py
EllipseGeometry.find_center
def find_center(self, image, threshold=0.1, verbose=True): """ Find the center of a galaxy. If the algorithm is successful the (x, y) coordinates in this `~photutils.isophote.EllipseGeometry` (i.e. the ``x0`` and ``y0`` attributes) instance will be modified. The isophote fit algorithm requires an initial guess for the galaxy center (x, y) coordinates and these coordinates must be close to the actual galaxy center for the isophote fit to work. This method provides can provide an initial guess for the galaxy center coordinates. See the **Notes** section below for more details. Parameters ---------- image : 2D `~numpy.ndarray` The image array. Masked arrays are not recognized here. This assumes that centering should always be done on valid pixels. threshold : float, optional The centerer threshold. To turn off the centerer, set this to a large value (i.e. >> 1). The default is 0.1. verbose : bool, optional Whether to print object centering information. The default is `True`. Notes ----- The centerer function scans a 10x10 window centered on the (x, y) coordinates in the `~photutils.isophote.EllipseGeometry` instance passed to the constructor of the `~photutils.isophote.Ellipse` class. If any of the `~photutils.isophote.EllipseGeometry` (x, y) coordinates are `None`, the center of the input image frame is used. If the center acquisition is successful, the `~photutils.isophote.EllipseGeometry` instance is modified in place to reflect the solution of the object centerer algorithm. In some cases the object centerer algorithm may fail even though there is enough signal-to-noise to start a fit (e.g. objects with very high ellipticity). In those cases the sensitivity of the algorithm can be decreased by decreasing the value of the object centerer threshold parameter. The centerer works by looking where a quantity akin to a signal-to-noise ratio is maximized within the 10x10 window. The centerer can thus be shut off entirely by setting the threshold to a large value (i.e. >> 1; meaning no location inside the search window will achieve that signal-to-noise ratio). """ self._centerer_mask_half_size = len(IN_MASK) / 2 self.centerer_threshold = threshold # number of pixels in each mask sz = len(IN_MASK) self._centerer_ones_in = np.ma.masked_array(np.ones(shape=(sz, sz)), mask=IN_MASK) self._centerer_ones_out = np.ma.masked_array(np.ones(shape=(sz, sz)), mask=OUT_MASK) self._centerer_in_mask_npix = np.sum(self._centerer_ones_in) self._centerer_out_mask_npix = np.sum(self._centerer_ones_out) # Check if center coordinates point to somewhere inside the frame. # If not, set then to frame center. shape = image.shape _x0 = self.x0 _y0 = self.y0 if (_x0 is None or _x0 < 0 or _x0 >= shape[1] or _y0 is None or _y0 < 0 or _y0 >= shape[0]): _x0 = shape[1] / 2 _y0 = shape[0] / 2 max_fom = 0. max_i = 0 max_j = 0 # scan all positions inside window window_half_size = 5 for i in range(int(_x0 - window_half_size), int(_x0 + window_half_size) + 1): for j in range(int(_y0 - window_half_size), int(_y0 + window_half_size) + 1): # ensure that it stays inside image frame i1 = int(max(0, i - self._centerer_mask_half_size)) j1 = int(max(0, j - self._centerer_mask_half_size)) i2 = int(min(shape[1] - 1, i + self._centerer_mask_half_size)) j2 = int(min(shape[0] - 1, j + self._centerer_mask_half_size)) window = image[j1:j2, i1:i2] # averages in inner and outer regions. inner = np.ma.masked_array(window, mask=IN_MASK) outer = np.ma.masked_array(window, mask=OUT_MASK) inner_avg = np.sum(inner) / self._centerer_in_mask_npix outer_avg = np.sum(outer) / self._centerer_out_mask_npix # standard deviation and figure of merit inner_std = np.std(inner) outer_std = np.std(outer) stddev = np.sqrt(inner_std**2 + outer_std**2) fom = (inner_avg - outer_avg) / stddev if fom > max_fom: max_fom = fom max_i = i max_j = j # figure of merit > threshold: update geometry with new coordinates. if max_fom > threshold: self.x0 = float(max_i) self.y0 = float(max_j) if verbose: log.info("Found center at x0 = {0:5.1f}, y0 = {1:5.1f}" .format(self.x0, self.y0)) else: if verbose: log.info('Result is below the threshold -- keeping the ' 'original coordinates.')
python
def find_center(self, image, threshold=0.1, verbose=True): """ Find the center of a galaxy. If the algorithm is successful the (x, y) coordinates in this `~photutils.isophote.EllipseGeometry` (i.e. the ``x0`` and ``y0`` attributes) instance will be modified. The isophote fit algorithm requires an initial guess for the galaxy center (x, y) coordinates and these coordinates must be close to the actual galaxy center for the isophote fit to work. This method provides can provide an initial guess for the galaxy center coordinates. See the **Notes** section below for more details. Parameters ---------- image : 2D `~numpy.ndarray` The image array. Masked arrays are not recognized here. This assumes that centering should always be done on valid pixels. threshold : float, optional The centerer threshold. To turn off the centerer, set this to a large value (i.e. >> 1). The default is 0.1. verbose : bool, optional Whether to print object centering information. The default is `True`. Notes ----- The centerer function scans a 10x10 window centered on the (x, y) coordinates in the `~photutils.isophote.EllipseGeometry` instance passed to the constructor of the `~photutils.isophote.Ellipse` class. If any of the `~photutils.isophote.EllipseGeometry` (x, y) coordinates are `None`, the center of the input image frame is used. If the center acquisition is successful, the `~photutils.isophote.EllipseGeometry` instance is modified in place to reflect the solution of the object centerer algorithm. In some cases the object centerer algorithm may fail even though there is enough signal-to-noise to start a fit (e.g. objects with very high ellipticity). In those cases the sensitivity of the algorithm can be decreased by decreasing the value of the object centerer threshold parameter. The centerer works by looking where a quantity akin to a signal-to-noise ratio is maximized within the 10x10 window. The centerer can thus be shut off entirely by setting the threshold to a large value (i.e. >> 1; meaning no location inside the search window will achieve that signal-to-noise ratio). """ self._centerer_mask_half_size = len(IN_MASK) / 2 self.centerer_threshold = threshold # number of pixels in each mask sz = len(IN_MASK) self._centerer_ones_in = np.ma.masked_array(np.ones(shape=(sz, sz)), mask=IN_MASK) self._centerer_ones_out = np.ma.masked_array(np.ones(shape=(sz, sz)), mask=OUT_MASK) self._centerer_in_mask_npix = np.sum(self._centerer_ones_in) self._centerer_out_mask_npix = np.sum(self._centerer_ones_out) # Check if center coordinates point to somewhere inside the frame. # If not, set then to frame center. shape = image.shape _x0 = self.x0 _y0 = self.y0 if (_x0 is None or _x0 < 0 or _x0 >= shape[1] or _y0 is None or _y0 < 0 or _y0 >= shape[0]): _x0 = shape[1] / 2 _y0 = shape[0] / 2 max_fom = 0. max_i = 0 max_j = 0 # scan all positions inside window window_half_size = 5 for i in range(int(_x0 - window_half_size), int(_x0 + window_half_size) + 1): for j in range(int(_y0 - window_half_size), int(_y0 + window_half_size) + 1): # ensure that it stays inside image frame i1 = int(max(0, i - self._centerer_mask_half_size)) j1 = int(max(0, j - self._centerer_mask_half_size)) i2 = int(min(shape[1] - 1, i + self._centerer_mask_half_size)) j2 = int(min(shape[0] - 1, j + self._centerer_mask_half_size)) window = image[j1:j2, i1:i2] # averages in inner and outer regions. inner = np.ma.masked_array(window, mask=IN_MASK) outer = np.ma.masked_array(window, mask=OUT_MASK) inner_avg = np.sum(inner) / self._centerer_in_mask_npix outer_avg = np.sum(outer) / self._centerer_out_mask_npix # standard deviation and figure of merit inner_std = np.std(inner) outer_std = np.std(outer) stddev = np.sqrt(inner_std**2 + outer_std**2) fom = (inner_avg - outer_avg) / stddev if fom > max_fom: max_fom = fom max_i = i max_j = j # figure of merit > threshold: update geometry with new coordinates. if max_fom > threshold: self.x0 = float(max_i) self.y0 = float(max_j) if verbose: log.info("Found center at x0 = {0:5.1f}, y0 = {1:5.1f}" .format(self.x0, self.y0)) else: if verbose: log.info('Result is below the threshold -- keeping the ' 'original coordinates.')
[ "def", "find_center", "(", "self", ",", "image", ",", "threshold", "=", "0.1", ",", "verbose", "=", "True", ")", ":", "self", ".", "_centerer_mask_half_size", "=", "len", "(", "IN_MASK", ")", "/", "2", "self", ".", "centerer_threshold", "=", "threshold", "# number of pixels in each mask", "sz", "=", "len", "(", "IN_MASK", ")", "self", ".", "_centerer_ones_in", "=", "np", ".", "ma", ".", "masked_array", "(", "np", ".", "ones", "(", "shape", "=", "(", "sz", ",", "sz", ")", ")", ",", "mask", "=", "IN_MASK", ")", "self", ".", "_centerer_ones_out", "=", "np", ".", "ma", ".", "masked_array", "(", "np", ".", "ones", "(", "shape", "=", "(", "sz", ",", "sz", ")", ")", ",", "mask", "=", "OUT_MASK", ")", "self", ".", "_centerer_in_mask_npix", "=", "np", ".", "sum", "(", "self", ".", "_centerer_ones_in", ")", "self", ".", "_centerer_out_mask_npix", "=", "np", ".", "sum", "(", "self", ".", "_centerer_ones_out", ")", "# Check if center coordinates point to somewhere inside the frame.", "# If not, set then to frame center.", "shape", "=", "image", ".", "shape", "_x0", "=", "self", ".", "x0", "_y0", "=", "self", ".", "y0", "if", "(", "_x0", "is", "None", "or", "_x0", "<", "0", "or", "_x0", ">=", "shape", "[", "1", "]", "or", "_y0", "is", "None", "or", "_y0", "<", "0", "or", "_y0", ">=", "shape", "[", "0", "]", ")", ":", "_x0", "=", "shape", "[", "1", "]", "/", "2", "_y0", "=", "shape", "[", "0", "]", "/", "2", "max_fom", "=", "0.", "max_i", "=", "0", "max_j", "=", "0", "# scan all positions inside window", "window_half_size", "=", "5", "for", "i", "in", "range", "(", "int", "(", "_x0", "-", "window_half_size", ")", ",", "int", "(", "_x0", "+", "window_half_size", ")", "+", "1", ")", ":", "for", "j", "in", "range", "(", "int", "(", "_y0", "-", "window_half_size", ")", ",", "int", "(", "_y0", "+", "window_half_size", ")", "+", "1", ")", ":", "# ensure that it stays inside image frame", "i1", "=", "int", "(", "max", "(", "0", ",", "i", "-", "self", ".", "_centerer_mask_half_size", ")", ")", "j1", "=", "int", "(", "max", "(", "0", ",", "j", "-", "self", ".", "_centerer_mask_half_size", ")", ")", "i2", "=", "int", "(", "min", "(", "shape", "[", "1", "]", "-", "1", ",", "i", "+", "self", ".", "_centerer_mask_half_size", ")", ")", "j2", "=", "int", "(", "min", "(", "shape", "[", "0", "]", "-", "1", ",", "j", "+", "self", ".", "_centerer_mask_half_size", ")", ")", "window", "=", "image", "[", "j1", ":", "j2", ",", "i1", ":", "i2", "]", "# averages in inner and outer regions.", "inner", "=", "np", ".", "ma", ".", "masked_array", "(", "window", ",", "mask", "=", "IN_MASK", ")", "outer", "=", "np", ".", "ma", ".", "masked_array", "(", "window", ",", "mask", "=", "OUT_MASK", ")", "inner_avg", "=", "np", ".", "sum", "(", "inner", ")", "/", "self", ".", "_centerer_in_mask_npix", "outer_avg", "=", "np", ".", "sum", "(", "outer", ")", "/", "self", ".", "_centerer_out_mask_npix", "# standard deviation and figure of merit", "inner_std", "=", "np", ".", "std", "(", "inner", ")", "outer_std", "=", "np", ".", "std", "(", "outer", ")", "stddev", "=", "np", ".", "sqrt", "(", "inner_std", "**", "2", "+", "outer_std", "**", "2", ")", "fom", "=", "(", "inner_avg", "-", "outer_avg", ")", "/", "stddev", "if", "fom", ">", "max_fom", ":", "max_fom", "=", "fom", "max_i", "=", "i", "max_j", "=", "j", "# figure of merit > threshold: update geometry with new coordinates.", "if", "max_fom", ">", "threshold", ":", "self", ".", "x0", "=", "float", "(", "max_i", ")", "self", ".", "y0", "=", "float", "(", "max_j", ")", "if", "verbose", ":", "log", ".", "info", "(", "\"Found center at x0 = {0:5.1f}, y0 = {1:5.1f}\"", ".", "format", "(", "self", ".", "x0", ",", "self", ".", "y0", ")", ")", "else", ":", "if", "verbose", ":", "log", ".", "info", "(", "'Result is below the threshold -- keeping the '", "'original coordinates.'", ")" ]
Find the center of a galaxy. If the algorithm is successful the (x, y) coordinates in this `~photutils.isophote.EllipseGeometry` (i.e. the ``x0`` and ``y0`` attributes) instance will be modified. The isophote fit algorithm requires an initial guess for the galaxy center (x, y) coordinates and these coordinates must be close to the actual galaxy center for the isophote fit to work. This method provides can provide an initial guess for the galaxy center coordinates. See the **Notes** section below for more details. Parameters ---------- image : 2D `~numpy.ndarray` The image array. Masked arrays are not recognized here. This assumes that centering should always be done on valid pixels. threshold : float, optional The centerer threshold. To turn off the centerer, set this to a large value (i.e. >> 1). The default is 0.1. verbose : bool, optional Whether to print object centering information. The default is `True`. Notes ----- The centerer function scans a 10x10 window centered on the (x, y) coordinates in the `~photutils.isophote.EllipseGeometry` instance passed to the constructor of the `~photutils.isophote.Ellipse` class. If any of the `~photutils.isophote.EllipseGeometry` (x, y) coordinates are `None`, the center of the input image frame is used. If the center acquisition is successful, the `~photutils.isophote.EllipseGeometry` instance is modified in place to reflect the solution of the object centerer algorithm. In some cases the object centerer algorithm may fail even though there is enough signal-to-noise to start a fit (e.g. objects with very high ellipticity). In those cases the sensitivity of the algorithm can be decreased by decreasing the value of the object centerer threshold parameter. The centerer works by looking where a quantity akin to a signal-to-noise ratio is maximized within the 10x10 window. The centerer can thus be shut off entirely by setting the threshold to a large value (i.e. >> 1; meaning no location inside the search window will achieve that signal-to-noise ratio).
[ "Find", "the", "center", "of", "a", "galaxy", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/isophote/geometry.py#L133-L254
10,554
astropy/photutils
photutils/isophote/geometry.py
EllipseGeometry.radius
def radius(self, angle): """ Calculate the polar radius for a given polar angle. Parameters ---------- angle : float The polar angle (radians). Returns ------- radius : float The polar radius (pixels). """ return (self.sma * (1. - self.eps) / np.sqrt(((1. - self.eps) * np.cos(angle))**2 + (np.sin(angle))**2))
python
def radius(self, angle): """ Calculate the polar radius for a given polar angle. Parameters ---------- angle : float The polar angle (radians). Returns ------- radius : float The polar radius (pixels). """ return (self.sma * (1. - self.eps) / np.sqrt(((1. - self.eps) * np.cos(angle))**2 + (np.sin(angle))**2))
[ "def", "radius", "(", "self", ",", "angle", ")", ":", "return", "(", "self", ".", "sma", "*", "(", "1.", "-", "self", ".", "eps", ")", "/", "np", ".", "sqrt", "(", "(", "(", "1.", "-", "self", ".", "eps", ")", "*", "np", ".", "cos", "(", "angle", ")", ")", "**", "2", "+", "(", "np", ".", "sin", "(", "angle", ")", ")", "**", "2", ")", ")" ]
Calculate the polar radius for a given polar angle. Parameters ---------- angle : float The polar angle (radians). Returns ------- radius : float The polar radius (pixels).
[ "Calculate", "the", "polar", "radius", "for", "a", "given", "polar", "angle", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/isophote/geometry.py#L256-L273
10,555
astropy/photutils
photutils/isophote/geometry.py
EllipseGeometry.initialize_sector_geometry
def initialize_sector_geometry(self, phi): """ Initialize geometry attributes associated with an elliptical sector at the given polar angle ``phi``. This function computes: * the four vertices that define the elliptical sector on the pixel array. * the sector area (saved in the ``sector_area`` attribute) * the sector angular width (saved in ``sector_angular_width`` attribute) Parameters ---------- phi : float The polar angle (radians) where the sector is located. Returns ------- x, y : 1D `~numpy.ndarray` The x and y coordinates of each vertex as 1D arrays. """ # These polar radii bound the region between the inner # and outer ellipses that define the sector. sma1, sma2 = self.bounding_ellipses() eps_ = 1. - self.eps # polar vector at one side of the elliptical sector self._phi1 = phi - self.sector_angular_width / 2. r1 = (sma1 * eps_ / math.sqrt((eps_ * math.cos(self._phi1))**2 + (math.sin(self._phi1))**2)) r2 = (sma2 * eps_ / math.sqrt((eps_ * math.cos(self._phi1))**2 + (math.sin(self._phi1))**2)) # polar vector at the other side of the elliptical sector self._phi2 = phi + self.sector_angular_width / 2. r3 = (sma2 * eps_ / math.sqrt((eps_ * math.cos(self._phi2))**2 + (math.sin(self._phi2))**2)) r4 = (sma1 * eps_ / math.sqrt((eps_ * math.cos(self._phi2))**2 + (math.sin(self._phi2))**2)) # sector area sa1 = _area(sma1, self.eps, self._phi1, r1) sa2 = _area(sma2, self.eps, self._phi1, r2) sa3 = _area(sma2, self.eps, self._phi2, r3) sa4 = _area(sma1, self.eps, self._phi2, r4) self.sector_area = abs((sa3 - sa2) - (sa4 - sa1)) # angular width of sector. It is calculated such that the sectors # come out with roughly constant area along the ellipse. self.sector_angular_width = max(min((self._area_factor / (r3 - r4) / r4), self._phi_max), self._phi_min) # compute the 4 vertices that define the elliptical sector. vertex_x = np.zeros(shape=4, dtype=float) vertex_y = np.zeros(shape=4, dtype=float) # vertices are labelled in counterclockwise sequence vertex_x[0:2] = np.array([r1, r2]) * math.cos(self._phi1 + self.pa) vertex_x[2:4] = np.array([r4, r3]) * math.cos(self._phi2 + self.pa) vertex_y[0:2] = np.array([r1, r2]) * math.sin(self._phi1 + self.pa) vertex_y[2:4] = np.array([r4, r3]) * math.sin(self._phi2 + self.pa) vertex_x += self.x0 vertex_y += self.y0 return vertex_x, vertex_y
python
def initialize_sector_geometry(self, phi): """ Initialize geometry attributes associated with an elliptical sector at the given polar angle ``phi``. This function computes: * the four vertices that define the elliptical sector on the pixel array. * the sector area (saved in the ``sector_area`` attribute) * the sector angular width (saved in ``sector_angular_width`` attribute) Parameters ---------- phi : float The polar angle (radians) where the sector is located. Returns ------- x, y : 1D `~numpy.ndarray` The x and y coordinates of each vertex as 1D arrays. """ # These polar radii bound the region between the inner # and outer ellipses that define the sector. sma1, sma2 = self.bounding_ellipses() eps_ = 1. - self.eps # polar vector at one side of the elliptical sector self._phi1 = phi - self.sector_angular_width / 2. r1 = (sma1 * eps_ / math.sqrt((eps_ * math.cos(self._phi1))**2 + (math.sin(self._phi1))**2)) r2 = (sma2 * eps_ / math.sqrt((eps_ * math.cos(self._phi1))**2 + (math.sin(self._phi1))**2)) # polar vector at the other side of the elliptical sector self._phi2 = phi + self.sector_angular_width / 2. r3 = (sma2 * eps_ / math.sqrt((eps_ * math.cos(self._phi2))**2 + (math.sin(self._phi2))**2)) r4 = (sma1 * eps_ / math.sqrt((eps_ * math.cos(self._phi2))**2 + (math.sin(self._phi2))**2)) # sector area sa1 = _area(sma1, self.eps, self._phi1, r1) sa2 = _area(sma2, self.eps, self._phi1, r2) sa3 = _area(sma2, self.eps, self._phi2, r3) sa4 = _area(sma1, self.eps, self._phi2, r4) self.sector_area = abs((sa3 - sa2) - (sa4 - sa1)) # angular width of sector. It is calculated such that the sectors # come out with roughly constant area along the ellipse. self.sector_angular_width = max(min((self._area_factor / (r3 - r4) / r4), self._phi_max), self._phi_min) # compute the 4 vertices that define the elliptical sector. vertex_x = np.zeros(shape=4, dtype=float) vertex_y = np.zeros(shape=4, dtype=float) # vertices are labelled in counterclockwise sequence vertex_x[0:2] = np.array([r1, r2]) * math.cos(self._phi1 + self.pa) vertex_x[2:4] = np.array([r4, r3]) * math.cos(self._phi2 + self.pa) vertex_y[0:2] = np.array([r1, r2]) * math.sin(self._phi1 + self.pa) vertex_y[2:4] = np.array([r4, r3]) * math.sin(self._phi2 + self.pa) vertex_x += self.x0 vertex_y += self.y0 return vertex_x, vertex_y
[ "def", "initialize_sector_geometry", "(", "self", ",", "phi", ")", ":", "# These polar radii bound the region between the inner", "# and outer ellipses that define the sector.", "sma1", ",", "sma2", "=", "self", ".", "bounding_ellipses", "(", ")", "eps_", "=", "1.", "-", "self", ".", "eps", "# polar vector at one side of the elliptical sector", "self", ".", "_phi1", "=", "phi", "-", "self", ".", "sector_angular_width", "/", "2.", "r1", "=", "(", "sma1", "*", "eps_", "/", "math", ".", "sqrt", "(", "(", "eps_", "*", "math", ".", "cos", "(", "self", ".", "_phi1", ")", ")", "**", "2", "+", "(", "math", ".", "sin", "(", "self", ".", "_phi1", ")", ")", "**", "2", ")", ")", "r2", "=", "(", "sma2", "*", "eps_", "/", "math", ".", "sqrt", "(", "(", "eps_", "*", "math", ".", "cos", "(", "self", ".", "_phi1", ")", ")", "**", "2", "+", "(", "math", ".", "sin", "(", "self", ".", "_phi1", ")", ")", "**", "2", ")", ")", "# polar vector at the other side of the elliptical sector", "self", ".", "_phi2", "=", "phi", "+", "self", ".", "sector_angular_width", "/", "2.", "r3", "=", "(", "sma2", "*", "eps_", "/", "math", ".", "sqrt", "(", "(", "eps_", "*", "math", ".", "cos", "(", "self", ".", "_phi2", ")", ")", "**", "2", "+", "(", "math", ".", "sin", "(", "self", ".", "_phi2", ")", ")", "**", "2", ")", ")", "r4", "=", "(", "sma1", "*", "eps_", "/", "math", ".", "sqrt", "(", "(", "eps_", "*", "math", ".", "cos", "(", "self", ".", "_phi2", ")", ")", "**", "2", "+", "(", "math", ".", "sin", "(", "self", ".", "_phi2", ")", ")", "**", "2", ")", ")", "# sector area", "sa1", "=", "_area", "(", "sma1", ",", "self", ".", "eps", ",", "self", ".", "_phi1", ",", "r1", ")", "sa2", "=", "_area", "(", "sma2", ",", "self", ".", "eps", ",", "self", ".", "_phi1", ",", "r2", ")", "sa3", "=", "_area", "(", "sma2", ",", "self", ".", "eps", ",", "self", ".", "_phi2", ",", "r3", ")", "sa4", "=", "_area", "(", "sma1", ",", "self", ".", "eps", ",", "self", ".", "_phi2", ",", "r4", ")", "self", ".", "sector_area", "=", "abs", "(", "(", "sa3", "-", "sa2", ")", "-", "(", "sa4", "-", "sa1", ")", ")", "# angular width of sector. It is calculated such that the sectors", "# come out with roughly constant area along the ellipse.", "self", ".", "sector_angular_width", "=", "max", "(", "min", "(", "(", "self", ".", "_area_factor", "/", "(", "r3", "-", "r4", ")", "/", "r4", ")", ",", "self", ".", "_phi_max", ")", ",", "self", ".", "_phi_min", ")", "# compute the 4 vertices that define the elliptical sector.", "vertex_x", "=", "np", ".", "zeros", "(", "shape", "=", "4", ",", "dtype", "=", "float", ")", "vertex_y", "=", "np", ".", "zeros", "(", "shape", "=", "4", ",", "dtype", "=", "float", ")", "# vertices are labelled in counterclockwise sequence", "vertex_x", "[", "0", ":", "2", "]", "=", "np", ".", "array", "(", "[", "r1", ",", "r2", "]", ")", "*", "math", ".", "cos", "(", "self", ".", "_phi1", "+", "self", ".", "pa", ")", "vertex_x", "[", "2", ":", "4", "]", "=", "np", ".", "array", "(", "[", "r4", ",", "r3", "]", ")", "*", "math", ".", "cos", "(", "self", ".", "_phi2", "+", "self", ".", "pa", ")", "vertex_y", "[", "0", ":", "2", "]", "=", "np", ".", "array", "(", "[", "r1", ",", "r2", "]", ")", "*", "math", ".", "sin", "(", "self", ".", "_phi1", "+", "self", ".", "pa", ")", "vertex_y", "[", "2", ":", "4", "]", "=", "np", ".", "array", "(", "[", "r4", ",", "r3", "]", ")", "*", "math", ".", "sin", "(", "self", ".", "_phi2", "+", "self", ".", "pa", ")", "vertex_x", "+=", "self", ".", "x0", "vertex_y", "+=", "self", ".", "y0", "return", "vertex_x", ",", "vertex_y" ]
Initialize geometry attributes associated with an elliptical sector at the given polar angle ``phi``. This function computes: * the four vertices that define the elliptical sector on the pixel array. * the sector area (saved in the ``sector_area`` attribute) * the sector angular width (saved in ``sector_angular_width`` attribute) Parameters ---------- phi : float The polar angle (radians) where the sector is located. Returns ------- x, y : 1D `~numpy.ndarray` The x and y coordinates of each vertex as 1D arrays.
[ "Initialize", "geometry", "attributes", "associated", "with", "an", "elliptical", "sector", "at", "the", "given", "polar", "angle", "phi", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/isophote/geometry.py#L275-L344
10,556
astropy/photutils
photutils/isophote/geometry.py
EllipseGeometry.bounding_ellipses
def bounding_ellipses(self): """ Compute the semimajor axis of the two ellipses that bound the annulus where integrations take place. Returns ------- sma1, sma2 : float The smaller and larger values of semimajor axis length that define the annulus bounding ellipses. """ if (self.linear_growth): a1 = self.sma - self.astep / 2. a2 = self.sma + self.astep / 2. else: a1 = self.sma * (1. - self.astep / 2.) a2 = self.sma * (1. + self.astep / 2.) return a1, a2
python
def bounding_ellipses(self): """ Compute the semimajor axis of the two ellipses that bound the annulus where integrations take place. Returns ------- sma1, sma2 : float The smaller and larger values of semimajor axis length that define the annulus bounding ellipses. """ if (self.linear_growth): a1 = self.sma - self.astep / 2. a2 = self.sma + self.astep / 2. else: a1 = self.sma * (1. - self.astep / 2.) a2 = self.sma * (1. + self.astep / 2.) return a1, a2
[ "def", "bounding_ellipses", "(", "self", ")", ":", "if", "(", "self", ".", "linear_growth", ")", ":", "a1", "=", "self", ".", "sma", "-", "self", ".", "astep", "/", "2.", "a2", "=", "self", ".", "sma", "+", "self", ".", "astep", "/", "2.", "else", ":", "a1", "=", "self", ".", "sma", "*", "(", "1.", "-", "self", ".", "astep", "/", "2.", ")", "a2", "=", "self", ".", "sma", "*", "(", "1.", "+", "self", ".", "astep", "/", "2.", ")", "return", "a1", ",", "a2" ]
Compute the semimajor axis of the two ellipses that bound the annulus where integrations take place. Returns ------- sma1, sma2 : float The smaller and larger values of semimajor axis length that define the annulus bounding ellipses.
[ "Compute", "the", "semimajor", "axis", "of", "the", "two", "ellipses", "that", "bound", "the", "annulus", "where", "integrations", "take", "place", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/isophote/geometry.py#L346-L365
10,557
astropy/photutils
photutils/isophote/geometry.py
EllipseGeometry.update_sma
def update_sma(self, step): """ Calculate an updated value for the semimajor axis, given the current value and the step value. The step value must be managed by the caller to support both modes: grow outwards and shrink inwards. Parameters ---------- step : float The step value. Returns ------- sma : float The new semimajor axis length. """ if self.linear_growth: sma = self.sma + step else: sma = self.sma * (1. + step) return sma
python
def update_sma(self, step): """ Calculate an updated value for the semimajor axis, given the current value and the step value. The step value must be managed by the caller to support both modes: grow outwards and shrink inwards. Parameters ---------- step : float The step value. Returns ------- sma : float The new semimajor axis length. """ if self.linear_growth: sma = self.sma + step else: sma = self.sma * (1. + step) return sma
[ "def", "update_sma", "(", "self", ",", "step", ")", ":", "if", "self", ".", "linear_growth", ":", "sma", "=", "self", ".", "sma", "+", "step", "else", ":", "sma", "=", "self", ".", "sma", "*", "(", "1.", "+", "step", ")", "return", "sma" ]
Calculate an updated value for the semimajor axis, given the current value and the step value. The step value must be managed by the caller to support both modes: grow outwards and shrink inwards. Parameters ---------- step : float The step value. Returns ------- sma : float The new semimajor axis length.
[ "Calculate", "an", "updated", "value", "for", "the", "semimajor", "axis", "given", "the", "current", "value", "and", "the", "step", "value", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/isophote/geometry.py#L484-L507
10,558
astropy/photutils
photutils/isophote/geometry.py
EllipseGeometry.reset_sma
def reset_sma(self, step): """ Change the direction of semimajor axis growth, from outwards to inwards. Parameters ---------- step : float The current step value. Returns ------- sma, new_step : float The new semimajor axis length and the new step value to initiate the shrinking of the semimajor axis length. This is the step value that should be used when calling the :meth:`~photutils.isophote.EllipseGeometry.update_sma` method. """ if self.linear_growth: sma = self.sma - step step = -step else: aux = 1. / (1. + step) sma = self.sma * aux step = aux - 1. return sma, step
python
def reset_sma(self, step): """ Change the direction of semimajor axis growth, from outwards to inwards. Parameters ---------- step : float The current step value. Returns ------- sma, new_step : float The new semimajor axis length and the new step value to initiate the shrinking of the semimajor axis length. This is the step value that should be used when calling the :meth:`~photutils.isophote.EllipseGeometry.update_sma` method. """ if self.linear_growth: sma = self.sma - step step = -step else: aux = 1. / (1. + step) sma = self.sma * aux step = aux - 1. return sma, step
[ "def", "reset_sma", "(", "self", ",", "step", ")", ":", "if", "self", ".", "linear_growth", ":", "sma", "=", "self", ".", "sma", "-", "step", "step", "=", "-", "step", "else", ":", "aux", "=", "1.", "/", "(", "1.", "+", "step", ")", "sma", "=", "self", ".", "sma", "*", "aux", "step", "=", "aux", "-", "1.", "return", "sma", ",", "step" ]
Change the direction of semimajor axis growth, from outwards to inwards. Parameters ---------- step : float The current step value. Returns ------- sma, new_step : float The new semimajor axis length and the new step value to initiate the shrinking of the semimajor axis length. This is the step value that should be used when calling the :meth:`~photutils.isophote.EllipseGeometry.update_sma` method.
[ "Change", "the", "direction", "of", "semimajor", "axis", "growth", "from", "outwards", "to", "inwards", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/isophote/geometry.py#L509-L537
10,559
astropy/photutils
photutils/psf/matching/fourier.py
resize_psf
def resize_psf(psf, input_pixel_scale, output_pixel_scale, order=3): """ Resize a PSF using spline interpolation of the requested order. Parameters ---------- psf : 2D `~numpy.ndarray` The 2D data array of the PSF. input_pixel_scale : float The pixel scale of the input ``psf``. The units must match ``output_pixel_scale``. output_pixel_scale : float The pixel scale of the output ``psf``. The units must match ``input_pixel_scale``. order : float, optional The order of the spline interpolation (0-5). The default is 3. Returns ------- result : 2D `~numpy.ndarray` The resampled/interpolated 2D data array. """ from scipy.ndimage import zoom ratio = input_pixel_scale / output_pixel_scale return zoom(psf, ratio, order=order) / ratio**2
python
def resize_psf(psf, input_pixel_scale, output_pixel_scale, order=3): """ Resize a PSF using spline interpolation of the requested order. Parameters ---------- psf : 2D `~numpy.ndarray` The 2D data array of the PSF. input_pixel_scale : float The pixel scale of the input ``psf``. The units must match ``output_pixel_scale``. output_pixel_scale : float The pixel scale of the output ``psf``. The units must match ``input_pixel_scale``. order : float, optional The order of the spline interpolation (0-5). The default is 3. Returns ------- result : 2D `~numpy.ndarray` The resampled/interpolated 2D data array. """ from scipy.ndimage import zoom ratio = input_pixel_scale / output_pixel_scale return zoom(psf, ratio, order=order) / ratio**2
[ "def", "resize_psf", "(", "psf", ",", "input_pixel_scale", ",", "output_pixel_scale", ",", "order", "=", "3", ")", ":", "from", "scipy", ".", "ndimage", "import", "zoom", "ratio", "=", "input_pixel_scale", "/", "output_pixel_scale", "return", "zoom", "(", "psf", ",", "ratio", ",", "order", "=", "order", ")", "/", "ratio", "**", "2" ]
Resize a PSF using spline interpolation of the requested order. Parameters ---------- psf : 2D `~numpy.ndarray` The 2D data array of the PSF. input_pixel_scale : float The pixel scale of the input ``psf``. The units must match ``output_pixel_scale``. output_pixel_scale : float The pixel scale of the output ``psf``. The units must match ``input_pixel_scale``. order : float, optional The order of the spline interpolation (0-5). The default is 3. Returns ------- result : 2D `~numpy.ndarray` The resampled/interpolated 2D data array.
[ "Resize", "a", "PSF", "using", "spline", "interpolation", "of", "the", "requested", "order", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/matching/fourier.py#L13-L42
10,560
astropy/photutils
photutils/background/background_2d.py
Background2D._select_meshes
def _select_meshes(self, data): """ Define the x and y indices with respect to the low-resolution mesh image of the meshes to use for the background interpolation. The ``exclude_percentile`` keyword determines which meshes are not used for the background interpolation. Parameters ---------- data : 2D `~numpy.ma.MaskedArray` A 2D array where the y dimension represents each mesh and the x dimension represents the data in each mesh. Returns ------- mesh_idx : 1D `~numpy.ndarray` The 1D mesh indices. """ # the number of masked pixels in each mesh nmasked = np.ma.count_masked(data, axis=1) # meshes that contain more than ``exclude_percentile`` percent # masked pixels are excluded: # - for exclude_percentile=0, good meshes will be only where # nmasked=0 # - meshes where nmasked=self.box_npixels are *always* excluded # (second conditional needed for exclude_percentile=100) threshold_npixels = self.exclude_percentile / 100. * self.box_npixels mesh_idx = np.where((nmasked <= threshold_npixels) & (nmasked != self.box_npixels))[0] # good meshes if len(mesh_idx) == 0: raise ValueError('All meshes contain > {0} ({1} percent per ' 'mesh) masked pixels. Please check your data ' 'or decrease "exclude_percentile".' .format(threshold_npixels, self.exclude_percentile)) return mesh_idx
python
def _select_meshes(self, data): """ Define the x and y indices with respect to the low-resolution mesh image of the meshes to use for the background interpolation. The ``exclude_percentile`` keyword determines which meshes are not used for the background interpolation. Parameters ---------- data : 2D `~numpy.ma.MaskedArray` A 2D array where the y dimension represents each mesh and the x dimension represents the data in each mesh. Returns ------- mesh_idx : 1D `~numpy.ndarray` The 1D mesh indices. """ # the number of masked pixels in each mesh nmasked = np.ma.count_masked(data, axis=1) # meshes that contain more than ``exclude_percentile`` percent # masked pixels are excluded: # - for exclude_percentile=0, good meshes will be only where # nmasked=0 # - meshes where nmasked=self.box_npixels are *always* excluded # (second conditional needed for exclude_percentile=100) threshold_npixels = self.exclude_percentile / 100. * self.box_npixels mesh_idx = np.where((nmasked <= threshold_npixels) & (nmasked != self.box_npixels))[0] # good meshes if len(mesh_idx) == 0: raise ValueError('All meshes contain > {0} ({1} percent per ' 'mesh) masked pixels. Please check your data ' 'or decrease "exclude_percentile".' .format(threshold_npixels, self.exclude_percentile)) return mesh_idx
[ "def", "_select_meshes", "(", "self", ",", "data", ")", ":", "# the number of masked pixels in each mesh", "nmasked", "=", "np", ".", "ma", ".", "count_masked", "(", "data", ",", "axis", "=", "1", ")", "# meshes that contain more than ``exclude_percentile`` percent", "# masked pixels are excluded:", "# - for exclude_percentile=0, good meshes will be only where", "# nmasked=0", "# - meshes where nmasked=self.box_npixels are *always* excluded", "# (second conditional needed for exclude_percentile=100)", "threshold_npixels", "=", "self", ".", "exclude_percentile", "/", "100.", "*", "self", ".", "box_npixels", "mesh_idx", "=", "np", ".", "where", "(", "(", "nmasked", "<=", "threshold_npixels", ")", "&", "(", "nmasked", "!=", "self", ".", "box_npixels", ")", ")", "[", "0", "]", "# good meshes", "if", "len", "(", "mesh_idx", ")", "==", "0", ":", "raise", "ValueError", "(", "'All meshes contain > {0} ({1} percent per '", "'mesh) masked pixels. Please check your data '", "'or decrease \"exclude_percentile\".'", ".", "format", "(", "threshold_npixels", ",", "self", ".", "exclude_percentile", ")", ")", "return", "mesh_idx" ]
Define the x and y indices with respect to the low-resolution mesh image of the meshes to use for the background interpolation. The ``exclude_percentile`` keyword determines which meshes are not used for the background interpolation. Parameters ---------- data : 2D `~numpy.ma.MaskedArray` A 2D array where the y dimension represents each mesh and the x dimension represents the data in each mesh. Returns ------- mesh_idx : 1D `~numpy.ndarray` The 1D mesh indices.
[ "Define", "the", "x", "and", "y", "indices", "with", "respect", "to", "the", "low", "-", "resolution", "mesh", "image", "of", "the", "meshes", "to", "use", "for", "the", "background", "interpolation", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/background/background_2d.py#L412-L453
10,561
astropy/photutils
photutils/background/background_2d.py
Background2D._prepare_data
def _prepare_data(self): """ Prepare the data. First, pad or crop the 2D data array so that there are an integer number of meshes in both dimensions, creating a masked array. Then reshape into a different 2D masked array where each row represents the data in a single mesh. This method also performs a first cut at rejecting certain meshes as specified by the input keywords. """ self.nyboxes = self.data.shape[0] // self.box_size[0] self.nxboxes = self.data.shape[1] // self.box_size[1] yextra = self.data.shape[0] % self.box_size[0] xextra = self.data.shape[1] % self.box_size[1] if (xextra + yextra) == 0: # no resizing of the data is necessary data_ma = np.ma.masked_array(self.data, mask=self.mask) else: # pad or crop the data if self.edge_method == 'pad': data_ma = self._pad_data(yextra, xextra) self.nyboxes = data_ma.shape[0] // self.box_size[0] self.nxboxes = data_ma.shape[1] // self.box_size[1] elif self.edge_method == 'crop': data_ma = self._crop_data() else: raise ValueError('edge_method must be "pad" or "crop"') self.nboxes = self.nxboxes * self.nyboxes # a reshaped 2D masked array with mesh data along the x axis mesh_data = np.ma.swapaxes(data_ma.reshape( self.nyboxes, self.box_size[0], self.nxboxes, self.box_size[1]), 1, 2).reshape(self.nyboxes * self.nxboxes, self.box_npixels) # first cut on rejecting meshes self.mesh_idx = self._select_meshes(mesh_data) self._mesh_data = mesh_data[self.mesh_idx, :] return
python
def _prepare_data(self): """ Prepare the data. First, pad or crop the 2D data array so that there are an integer number of meshes in both dimensions, creating a masked array. Then reshape into a different 2D masked array where each row represents the data in a single mesh. This method also performs a first cut at rejecting certain meshes as specified by the input keywords. """ self.nyboxes = self.data.shape[0] // self.box_size[0] self.nxboxes = self.data.shape[1] // self.box_size[1] yextra = self.data.shape[0] % self.box_size[0] xextra = self.data.shape[1] % self.box_size[1] if (xextra + yextra) == 0: # no resizing of the data is necessary data_ma = np.ma.masked_array(self.data, mask=self.mask) else: # pad or crop the data if self.edge_method == 'pad': data_ma = self._pad_data(yextra, xextra) self.nyboxes = data_ma.shape[0] // self.box_size[0] self.nxboxes = data_ma.shape[1] // self.box_size[1] elif self.edge_method == 'crop': data_ma = self._crop_data() else: raise ValueError('edge_method must be "pad" or "crop"') self.nboxes = self.nxboxes * self.nyboxes # a reshaped 2D masked array with mesh data along the x axis mesh_data = np.ma.swapaxes(data_ma.reshape( self.nyboxes, self.box_size[0], self.nxboxes, self.box_size[1]), 1, 2).reshape(self.nyboxes * self.nxboxes, self.box_npixels) # first cut on rejecting meshes self.mesh_idx = self._select_meshes(mesh_data) self._mesh_data = mesh_data[self.mesh_idx, :] return
[ "def", "_prepare_data", "(", "self", ")", ":", "self", ".", "nyboxes", "=", "self", ".", "data", ".", "shape", "[", "0", "]", "//", "self", ".", "box_size", "[", "0", "]", "self", ".", "nxboxes", "=", "self", ".", "data", ".", "shape", "[", "1", "]", "//", "self", ".", "box_size", "[", "1", "]", "yextra", "=", "self", ".", "data", ".", "shape", "[", "0", "]", "%", "self", ".", "box_size", "[", "0", "]", "xextra", "=", "self", ".", "data", ".", "shape", "[", "1", "]", "%", "self", ".", "box_size", "[", "1", "]", "if", "(", "xextra", "+", "yextra", ")", "==", "0", ":", "# no resizing of the data is necessary", "data_ma", "=", "np", ".", "ma", ".", "masked_array", "(", "self", ".", "data", ",", "mask", "=", "self", ".", "mask", ")", "else", ":", "# pad or crop the data", "if", "self", ".", "edge_method", "==", "'pad'", ":", "data_ma", "=", "self", ".", "_pad_data", "(", "yextra", ",", "xextra", ")", "self", ".", "nyboxes", "=", "data_ma", ".", "shape", "[", "0", "]", "//", "self", ".", "box_size", "[", "0", "]", "self", ".", "nxboxes", "=", "data_ma", ".", "shape", "[", "1", "]", "//", "self", ".", "box_size", "[", "1", "]", "elif", "self", ".", "edge_method", "==", "'crop'", ":", "data_ma", "=", "self", ".", "_crop_data", "(", ")", "else", ":", "raise", "ValueError", "(", "'edge_method must be \"pad\" or \"crop\"'", ")", "self", ".", "nboxes", "=", "self", ".", "nxboxes", "*", "self", ".", "nyboxes", "# a reshaped 2D masked array with mesh data along the x axis", "mesh_data", "=", "np", ".", "ma", ".", "swapaxes", "(", "data_ma", ".", "reshape", "(", "self", ".", "nyboxes", ",", "self", ".", "box_size", "[", "0", "]", ",", "self", ".", "nxboxes", ",", "self", ".", "box_size", "[", "1", "]", ")", ",", "1", ",", "2", ")", ".", "reshape", "(", "self", ".", "nyboxes", "*", "self", ".", "nxboxes", ",", "self", ".", "box_npixels", ")", "# first cut on rejecting meshes", "self", ".", "mesh_idx", "=", "self", ".", "_select_meshes", "(", "mesh_data", ")", "self", ".", "_mesh_data", "=", "mesh_data", "[", "self", ".", "mesh_idx", ",", ":", "]", "return" ]
Prepare the data. First, pad or crop the 2D data array so that there are an integer number of meshes in both dimensions, creating a masked array. Then reshape into a different 2D masked array where each row represents the data in a single mesh. This method also performs a first cut at rejecting certain meshes as specified by the input keywords.
[ "Prepare", "the", "data", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/background/background_2d.py#L455-L499
10,562
astropy/photutils
photutils/background/background_2d.py
Background2D._make_2d_array
def _make_2d_array(self, data): """ Convert a 1D array of mesh values to a masked 2D mesh array given the 1D mesh indices ``mesh_idx``. Parameters ---------- data : 1D `~numpy.ndarray` A 1D array of mesh values. Returns ------- result : 2D `~numpy.ma.MaskedArray` A 2D masked array. Pixels not defined in ``mesh_idx`` are masked. """ if data.shape != self.mesh_idx.shape: raise ValueError('data and mesh_idx must have the same shape') if np.ma.is_masked(data): raise ValueError('data must not be a masked array') data2d = np.zeros(self._mesh_shape).astype(data.dtype) data2d[self.mesh_yidx, self.mesh_xidx] = data if len(self.mesh_idx) == self.nboxes: # no meshes were masked return data2d else: # some meshes were masked mask2d = np.ones(data2d.shape).astype(np.bool) mask2d[self.mesh_yidx, self.mesh_xidx] = False return np.ma.masked_array(data2d, mask=mask2d)
python
def _make_2d_array(self, data): """ Convert a 1D array of mesh values to a masked 2D mesh array given the 1D mesh indices ``mesh_idx``. Parameters ---------- data : 1D `~numpy.ndarray` A 1D array of mesh values. Returns ------- result : 2D `~numpy.ma.MaskedArray` A 2D masked array. Pixels not defined in ``mesh_idx`` are masked. """ if data.shape != self.mesh_idx.shape: raise ValueError('data and mesh_idx must have the same shape') if np.ma.is_masked(data): raise ValueError('data must not be a masked array') data2d = np.zeros(self._mesh_shape).astype(data.dtype) data2d[self.mesh_yidx, self.mesh_xidx] = data if len(self.mesh_idx) == self.nboxes: # no meshes were masked return data2d else: # some meshes were masked mask2d = np.ones(data2d.shape).astype(np.bool) mask2d[self.mesh_yidx, self.mesh_xidx] = False return np.ma.masked_array(data2d, mask=mask2d)
[ "def", "_make_2d_array", "(", "self", ",", "data", ")", ":", "if", "data", ".", "shape", "!=", "self", ".", "mesh_idx", ".", "shape", ":", "raise", "ValueError", "(", "'data and mesh_idx must have the same shape'", ")", "if", "np", ".", "ma", ".", "is_masked", "(", "data", ")", ":", "raise", "ValueError", "(", "'data must not be a masked array'", ")", "data2d", "=", "np", ".", "zeros", "(", "self", ".", "_mesh_shape", ")", ".", "astype", "(", "data", ".", "dtype", ")", "data2d", "[", "self", ".", "mesh_yidx", ",", "self", ".", "mesh_xidx", "]", "=", "data", "if", "len", "(", "self", ".", "mesh_idx", ")", "==", "self", ".", "nboxes", ":", "# no meshes were masked", "return", "data2d", "else", ":", "# some meshes were masked", "mask2d", "=", "np", ".", "ones", "(", "data2d", ".", "shape", ")", ".", "astype", "(", "np", ".", "bool", ")", "mask2d", "[", "self", ".", "mesh_yidx", ",", "self", ".", "mesh_xidx", "]", "=", "False", "return", "np", ".", "ma", ".", "masked_array", "(", "data2d", ",", "mask", "=", "mask2d", ")" ]
Convert a 1D array of mesh values to a masked 2D mesh array given the 1D mesh indices ``mesh_idx``. Parameters ---------- data : 1D `~numpy.ndarray` A 1D array of mesh values. Returns ------- result : 2D `~numpy.ma.MaskedArray` A 2D masked array. Pixels not defined in ``mesh_idx`` are masked.
[ "Convert", "a", "1D", "array", "of", "mesh", "values", "to", "a", "masked", "2D", "mesh", "array", "given", "the", "1D", "mesh", "indices", "mesh_idx", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/background/background_2d.py#L501-L535
10,563
astropy/photutils
photutils/background/background_2d.py
Background2D._interpolate_meshes
def _interpolate_meshes(self, data, n_neighbors=10, eps=0., power=1., reg=0.): """ Use IDW interpolation to fill in any masked pixels in the low-resolution 2D mesh background and background RMS images. This is required to use a regular-grid interpolator to expand the low-resolution image to the full size image. Parameters ---------- data : 1D `~numpy.ndarray` A 1D array of mesh values. n_neighbors : int, optional The maximum number of nearest neighbors to use during the interpolation. eps : float, optional Set to use approximate nearest neighbors; the kth neighbor is guaranteed to be no further than (1 + ``eps``) times the distance to the real *k*-th nearest neighbor. See `scipy.spatial.cKDTree.query` for further information. power : float, optional The power of the inverse distance used for the interpolation weights. See the Notes section for more details. reg : float, optional The regularization parameter. It may be used to control the smoothness of the interpolator. See the Notes section for more details. Returns ------- result : 2D `~numpy.ndarray` A 2D array of the mesh values where masked pixels have been filled by IDW interpolation. """ yx = np.column_stack([self.mesh_yidx, self.mesh_xidx]) coords = np.array(list(product(range(self.nyboxes), range(self.nxboxes)))) f = ShepardIDWInterpolator(yx, data) img1d = f(coords, n_neighbors=n_neighbors, power=power, eps=eps, reg=reg) return img1d.reshape(self._mesh_shape)
python
def _interpolate_meshes(self, data, n_neighbors=10, eps=0., power=1., reg=0.): """ Use IDW interpolation to fill in any masked pixels in the low-resolution 2D mesh background and background RMS images. This is required to use a regular-grid interpolator to expand the low-resolution image to the full size image. Parameters ---------- data : 1D `~numpy.ndarray` A 1D array of mesh values. n_neighbors : int, optional The maximum number of nearest neighbors to use during the interpolation. eps : float, optional Set to use approximate nearest neighbors; the kth neighbor is guaranteed to be no further than (1 + ``eps``) times the distance to the real *k*-th nearest neighbor. See `scipy.spatial.cKDTree.query` for further information. power : float, optional The power of the inverse distance used for the interpolation weights. See the Notes section for more details. reg : float, optional The regularization parameter. It may be used to control the smoothness of the interpolator. See the Notes section for more details. Returns ------- result : 2D `~numpy.ndarray` A 2D array of the mesh values where masked pixels have been filled by IDW interpolation. """ yx = np.column_stack([self.mesh_yidx, self.mesh_xidx]) coords = np.array(list(product(range(self.nyboxes), range(self.nxboxes)))) f = ShepardIDWInterpolator(yx, data) img1d = f(coords, n_neighbors=n_neighbors, power=power, eps=eps, reg=reg) return img1d.reshape(self._mesh_shape)
[ "def", "_interpolate_meshes", "(", "self", ",", "data", ",", "n_neighbors", "=", "10", ",", "eps", "=", "0.", ",", "power", "=", "1.", ",", "reg", "=", "0.", ")", ":", "yx", "=", "np", ".", "column_stack", "(", "[", "self", ".", "mesh_yidx", ",", "self", ".", "mesh_xidx", "]", ")", "coords", "=", "np", ".", "array", "(", "list", "(", "product", "(", "range", "(", "self", ".", "nyboxes", ")", ",", "range", "(", "self", ".", "nxboxes", ")", ")", ")", ")", "f", "=", "ShepardIDWInterpolator", "(", "yx", ",", "data", ")", "img1d", "=", "f", "(", "coords", ",", "n_neighbors", "=", "n_neighbors", ",", "power", "=", "power", ",", "eps", "=", "eps", ",", "reg", "=", "reg", ")", "return", "img1d", ".", "reshape", "(", "self", ".", "_mesh_shape", ")" ]
Use IDW interpolation to fill in any masked pixels in the low-resolution 2D mesh background and background RMS images. This is required to use a regular-grid interpolator to expand the low-resolution image to the full size image. Parameters ---------- data : 1D `~numpy.ndarray` A 1D array of mesh values. n_neighbors : int, optional The maximum number of nearest neighbors to use during the interpolation. eps : float, optional Set to use approximate nearest neighbors; the kth neighbor is guaranteed to be no further than (1 + ``eps``) times the distance to the real *k*-th nearest neighbor. See `scipy.spatial.cKDTree.query` for further information. power : float, optional The power of the inverse distance used for the interpolation weights. See the Notes section for more details. reg : float, optional The regularization parameter. It may be used to control the smoothness of the interpolator. See the Notes section for more details. Returns ------- result : 2D `~numpy.ndarray` A 2D array of the mesh values where masked pixels have been filled by IDW interpolation.
[ "Use", "IDW", "interpolation", "to", "fill", "in", "any", "masked", "pixels", "in", "the", "low", "-", "resolution", "2D", "mesh", "background", "and", "background", "RMS", "images", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/background/background_2d.py#L537-L584
10,564
astropy/photutils
photutils/background/background_2d.py
Background2D._selective_filter
def _selective_filter(self, data, indices): """ Selectively filter only pixels above ``filter_threshold`` in the background mesh. The same pixels are filtered in both the background and background RMS meshes. Parameters ---------- data : 2D `~numpy.ndarray` A 2D array of mesh values. indices : 2 tuple of int A tuple of the ``y`` and ``x`` indices of the pixels to filter. Returns ------- filtered_data : 2D `~numpy.ndarray` The filtered 2D array of mesh values. """ data_out = np.copy(data) for i, j in zip(*indices): yfs, xfs = self.filter_size hyfs, hxfs = yfs // 2, xfs // 2 y0, y1 = max(i - hyfs, 0), min(i - hyfs + yfs, data.shape[0]) x0, x1 = max(j - hxfs, 0), min(j - hxfs + xfs, data.shape[1]) data_out[i, j] = np.median(data[y0:y1, x0:x1]) return data_out
python
def _selective_filter(self, data, indices): """ Selectively filter only pixels above ``filter_threshold`` in the background mesh. The same pixels are filtered in both the background and background RMS meshes. Parameters ---------- data : 2D `~numpy.ndarray` A 2D array of mesh values. indices : 2 tuple of int A tuple of the ``y`` and ``x`` indices of the pixels to filter. Returns ------- filtered_data : 2D `~numpy.ndarray` The filtered 2D array of mesh values. """ data_out = np.copy(data) for i, j in zip(*indices): yfs, xfs = self.filter_size hyfs, hxfs = yfs // 2, xfs // 2 y0, y1 = max(i - hyfs, 0), min(i - hyfs + yfs, data.shape[0]) x0, x1 = max(j - hxfs, 0), min(j - hxfs + xfs, data.shape[1]) data_out[i, j] = np.median(data[y0:y1, x0:x1]) return data_out
[ "def", "_selective_filter", "(", "self", ",", "data", ",", "indices", ")", ":", "data_out", "=", "np", ".", "copy", "(", "data", ")", "for", "i", ",", "j", "in", "zip", "(", "*", "indices", ")", ":", "yfs", ",", "xfs", "=", "self", ".", "filter_size", "hyfs", ",", "hxfs", "=", "yfs", "//", "2", ",", "xfs", "//", "2", "y0", ",", "y1", "=", "max", "(", "i", "-", "hyfs", ",", "0", ")", ",", "min", "(", "i", "-", "hyfs", "+", "yfs", ",", "data", ".", "shape", "[", "0", "]", ")", "x0", ",", "x1", "=", "max", "(", "j", "-", "hxfs", ",", "0", ")", ",", "min", "(", "j", "-", "hxfs", "+", "xfs", ",", "data", ".", "shape", "[", "1", "]", ")", "data_out", "[", "i", ",", "j", "]", "=", "np", ".", "median", "(", "data", "[", "y0", ":", "y1", ",", "x0", ":", "x1", "]", ")", "return", "data_out" ]
Selectively filter only pixels above ``filter_threshold`` in the background mesh. The same pixels are filtered in both the background and background RMS meshes. Parameters ---------- data : 2D `~numpy.ndarray` A 2D array of mesh values. indices : 2 tuple of int A tuple of the ``y`` and ``x`` indices of the pixels to filter. Returns ------- filtered_data : 2D `~numpy.ndarray` The filtered 2D array of mesh values.
[ "Selectively", "filter", "only", "pixels", "above", "filter_threshold", "in", "the", "background", "mesh", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/background/background_2d.py#L586-L617
10,565
astropy/photutils
photutils/background/background_2d.py
Background2D._filter_meshes
def _filter_meshes(self): """ Apply a 2D median filter to the low-resolution 2D mesh, including only pixels inside the image at the borders. """ from scipy.ndimage import generic_filter try: nanmedian_func = np.nanmedian # numpy >= 1.9 except AttributeError: # pragma: no cover from scipy.stats import nanmedian nanmedian_func = nanmedian if self.filter_threshold is None: # filter the entire arrays self.background_mesh = generic_filter( self.background_mesh, nanmedian_func, size=self.filter_size, mode='constant', cval=np.nan) self.background_rms_mesh = generic_filter( self.background_rms_mesh, nanmedian_func, size=self.filter_size, mode='constant', cval=np.nan) else: # selectively filter indices = np.nonzero(self.background_mesh > self.filter_threshold) self.background_mesh = self._selective_filter( self.background_mesh, indices) self.background_rms_mesh = self._selective_filter( self.background_rms_mesh, indices) return
python
def _filter_meshes(self): """ Apply a 2D median filter to the low-resolution 2D mesh, including only pixels inside the image at the borders. """ from scipy.ndimage import generic_filter try: nanmedian_func = np.nanmedian # numpy >= 1.9 except AttributeError: # pragma: no cover from scipy.stats import nanmedian nanmedian_func = nanmedian if self.filter_threshold is None: # filter the entire arrays self.background_mesh = generic_filter( self.background_mesh, nanmedian_func, size=self.filter_size, mode='constant', cval=np.nan) self.background_rms_mesh = generic_filter( self.background_rms_mesh, nanmedian_func, size=self.filter_size, mode='constant', cval=np.nan) else: # selectively filter indices = np.nonzero(self.background_mesh > self.filter_threshold) self.background_mesh = self._selective_filter( self.background_mesh, indices) self.background_rms_mesh = self._selective_filter( self.background_rms_mesh, indices) return
[ "def", "_filter_meshes", "(", "self", ")", ":", "from", "scipy", ".", "ndimage", "import", "generic_filter", "try", ":", "nanmedian_func", "=", "np", ".", "nanmedian", "# numpy >= 1.9", "except", "AttributeError", ":", "# pragma: no cover", "from", "scipy", ".", "stats", "import", "nanmedian", "nanmedian_func", "=", "nanmedian", "if", "self", ".", "filter_threshold", "is", "None", ":", "# filter the entire arrays", "self", ".", "background_mesh", "=", "generic_filter", "(", "self", ".", "background_mesh", ",", "nanmedian_func", ",", "size", "=", "self", ".", "filter_size", ",", "mode", "=", "'constant'", ",", "cval", "=", "np", ".", "nan", ")", "self", ".", "background_rms_mesh", "=", "generic_filter", "(", "self", ".", "background_rms_mesh", ",", "nanmedian_func", ",", "size", "=", "self", ".", "filter_size", ",", "mode", "=", "'constant'", ",", "cval", "=", "np", ".", "nan", ")", "else", ":", "# selectively filter", "indices", "=", "np", ".", "nonzero", "(", "self", ".", "background_mesh", ">", "self", ".", "filter_threshold", ")", "self", ".", "background_mesh", "=", "self", ".", "_selective_filter", "(", "self", ".", "background_mesh", ",", "indices", ")", "self", ".", "background_rms_mesh", "=", "self", ".", "_selective_filter", "(", "self", ".", "background_rms_mesh", ",", "indices", ")", "return" ]
Apply a 2D median filter to the low-resolution 2D mesh, including only pixels inside the image at the borders.
[ "Apply", "a", "2D", "median", "filter", "to", "the", "low", "-", "resolution", "2D", "mesh", "including", "only", "pixels", "inside", "the", "image", "at", "the", "borders", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/background/background_2d.py#L619-L648
10,566
astropy/photutils
photutils/background/background_2d.py
Background2D._calc_bkg_bkgrms
def _calc_bkg_bkgrms(self): """ Calculate the background and background RMS estimate in each of the meshes. Both meshes are computed at the same time here method because the filtering of both depends on the background mesh. The ``background_mesh`` and ``background_rms_mesh`` images are equivalent to the low-resolution "MINIBACKGROUND" and "MINIBACK_RMS" background maps in SExtractor, respectively. """ if self.sigma_clip is not None: data_sigclip = self.sigma_clip(self._mesh_data, axis=1) else: data_sigclip = self._mesh_data del self._mesh_data # preform mesh rejection on sigma-clipped data (i.e. for any # newly-masked pixels) idx = self._select_meshes(data_sigclip) self.mesh_idx = self.mesh_idx[idx] # indices for the output mesh self._data_sigclip = data_sigclip[idx] # always a 2D masked array self._mesh_shape = (self.nyboxes, self.nxboxes) self.mesh_yidx, self.mesh_xidx = np.unravel_index(self.mesh_idx, self._mesh_shape) # These properties are needed later to calculate # background_mesh_ma and background_rms_mesh_ma. Note that _bkg1d # and _bkgrms1d are masked arrays, but the mask should always be # False. self._bkg1d = self.bkg_estimator(self._data_sigclip, axis=1) self._bkgrms1d = self.bkgrms_estimator(self._data_sigclip, axis=1) # make the unfiltered 2D mesh arrays (these are not masked) if len(self._bkg1d) == self.nboxes: bkg = self._make_2d_array(self._bkg1d) bkgrms = self._make_2d_array(self._bkgrms1d) else: bkg = self._interpolate_meshes(self._bkg1d) bkgrms = self._interpolate_meshes(self._bkgrms1d) self._background_mesh_unfiltered = bkg self._background_rms_mesh_unfiltered = bkgrms self.background_mesh = bkg self.background_rms_mesh = bkgrms # filter the 2D mesh arrays if not np.array_equal(self.filter_size, [1, 1]): self._filter_meshes() return
python
def _calc_bkg_bkgrms(self): """ Calculate the background and background RMS estimate in each of the meshes. Both meshes are computed at the same time here method because the filtering of both depends on the background mesh. The ``background_mesh`` and ``background_rms_mesh`` images are equivalent to the low-resolution "MINIBACKGROUND" and "MINIBACK_RMS" background maps in SExtractor, respectively. """ if self.sigma_clip is not None: data_sigclip = self.sigma_clip(self._mesh_data, axis=1) else: data_sigclip = self._mesh_data del self._mesh_data # preform mesh rejection on sigma-clipped data (i.e. for any # newly-masked pixels) idx = self._select_meshes(data_sigclip) self.mesh_idx = self.mesh_idx[idx] # indices for the output mesh self._data_sigclip = data_sigclip[idx] # always a 2D masked array self._mesh_shape = (self.nyboxes, self.nxboxes) self.mesh_yidx, self.mesh_xidx = np.unravel_index(self.mesh_idx, self._mesh_shape) # These properties are needed later to calculate # background_mesh_ma and background_rms_mesh_ma. Note that _bkg1d # and _bkgrms1d are masked arrays, but the mask should always be # False. self._bkg1d = self.bkg_estimator(self._data_sigclip, axis=1) self._bkgrms1d = self.bkgrms_estimator(self._data_sigclip, axis=1) # make the unfiltered 2D mesh arrays (these are not masked) if len(self._bkg1d) == self.nboxes: bkg = self._make_2d_array(self._bkg1d) bkgrms = self._make_2d_array(self._bkgrms1d) else: bkg = self._interpolate_meshes(self._bkg1d) bkgrms = self._interpolate_meshes(self._bkgrms1d) self._background_mesh_unfiltered = bkg self._background_rms_mesh_unfiltered = bkgrms self.background_mesh = bkg self.background_rms_mesh = bkgrms # filter the 2D mesh arrays if not np.array_equal(self.filter_size, [1, 1]): self._filter_meshes() return
[ "def", "_calc_bkg_bkgrms", "(", "self", ")", ":", "if", "self", ".", "sigma_clip", "is", "not", "None", ":", "data_sigclip", "=", "self", ".", "sigma_clip", "(", "self", ".", "_mesh_data", ",", "axis", "=", "1", ")", "else", ":", "data_sigclip", "=", "self", ".", "_mesh_data", "del", "self", ".", "_mesh_data", "# preform mesh rejection on sigma-clipped data (i.e. for any", "# newly-masked pixels)", "idx", "=", "self", ".", "_select_meshes", "(", "data_sigclip", ")", "self", ".", "mesh_idx", "=", "self", ".", "mesh_idx", "[", "idx", "]", "# indices for the output mesh", "self", ".", "_data_sigclip", "=", "data_sigclip", "[", "idx", "]", "# always a 2D masked array", "self", ".", "_mesh_shape", "=", "(", "self", ".", "nyboxes", ",", "self", ".", "nxboxes", ")", "self", ".", "mesh_yidx", ",", "self", ".", "mesh_xidx", "=", "np", ".", "unravel_index", "(", "self", ".", "mesh_idx", ",", "self", ".", "_mesh_shape", ")", "# These properties are needed later to calculate", "# background_mesh_ma and background_rms_mesh_ma. Note that _bkg1d", "# and _bkgrms1d are masked arrays, but the mask should always be", "# False.", "self", ".", "_bkg1d", "=", "self", ".", "bkg_estimator", "(", "self", ".", "_data_sigclip", ",", "axis", "=", "1", ")", "self", ".", "_bkgrms1d", "=", "self", ".", "bkgrms_estimator", "(", "self", ".", "_data_sigclip", ",", "axis", "=", "1", ")", "# make the unfiltered 2D mesh arrays (these are not masked)", "if", "len", "(", "self", ".", "_bkg1d", ")", "==", "self", ".", "nboxes", ":", "bkg", "=", "self", ".", "_make_2d_array", "(", "self", ".", "_bkg1d", ")", "bkgrms", "=", "self", ".", "_make_2d_array", "(", "self", ".", "_bkgrms1d", ")", "else", ":", "bkg", "=", "self", ".", "_interpolate_meshes", "(", "self", ".", "_bkg1d", ")", "bkgrms", "=", "self", ".", "_interpolate_meshes", "(", "self", ".", "_bkgrms1d", ")", "self", ".", "_background_mesh_unfiltered", "=", "bkg", "self", ".", "_background_rms_mesh_unfiltered", "=", "bkgrms", "self", ".", "background_mesh", "=", "bkg", "self", ".", "background_rms_mesh", "=", "bkgrms", "# filter the 2D mesh arrays", "if", "not", "np", ".", "array_equal", "(", "self", ".", "filter_size", ",", "[", "1", ",", "1", "]", ")", ":", "self", ".", "_filter_meshes", "(", ")", "return" ]
Calculate the background and background RMS estimate in each of the meshes. Both meshes are computed at the same time here method because the filtering of both depends on the background mesh. The ``background_mesh`` and ``background_rms_mesh`` images are equivalent to the low-resolution "MINIBACKGROUND" and "MINIBACK_RMS" background maps in SExtractor, respectively.
[ "Calculate", "the", "background", "and", "background", "RMS", "estimate", "in", "each", "of", "the", "meshes", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/background/background_2d.py#L650-L703
10,567
astropy/photutils
photutils/background/background_2d.py
Background2D._calc_coordinates
def _calc_coordinates(self): """ Calculate the coordinates to use when calling an interpolator. These are needed for `Background2D` and `BackgroundIDW2D`. Regular-grid interpolators require a 2D array of values. Some require a 2D meshgrid of x and y. Other require a strictly increasing 1D array of the x and y ranges. """ # the position coordinates used to initialize an interpolation self.y = (self.mesh_yidx * self.box_size[0] + (self.box_size[0] - 1) / 2.) self.x = (self.mesh_xidx * self.box_size[1] + (self.box_size[1] - 1) / 2.) self.yx = np.column_stack([self.y, self.x]) # the position coordinates used when calling an interpolator nx, ny = self.data.shape self.data_coords = np.array(list(product(range(ny), range(nx))))
python
def _calc_coordinates(self): """ Calculate the coordinates to use when calling an interpolator. These are needed for `Background2D` and `BackgroundIDW2D`. Regular-grid interpolators require a 2D array of values. Some require a 2D meshgrid of x and y. Other require a strictly increasing 1D array of the x and y ranges. """ # the position coordinates used to initialize an interpolation self.y = (self.mesh_yidx * self.box_size[0] + (self.box_size[0] - 1) / 2.) self.x = (self.mesh_xidx * self.box_size[1] + (self.box_size[1] - 1) / 2.) self.yx = np.column_stack([self.y, self.x]) # the position coordinates used when calling an interpolator nx, ny = self.data.shape self.data_coords = np.array(list(product(range(ny), range(nx))))
[ "def", "_calc_coordinates", "(", "self", ")", ":", "# the position coordinates used to initialize an interpolation", "self", ".", "y", "=", "(", "self", ".", "mesh_yidx", "*", "self", ".", "box_size", "[", "0", "]", "+", "(", "self", ".", "box_size", "[", "0", "]", "-", "1", ")", "/", "2.", ")", "self", ".", "x", "=", "(", "self", ".", "mesh_xidx", "*", "self", ".", "box_size", "[", "1", "]", "+", "(", "self", ".", "box_size", "[", "1", "]", "-", "1", ")", "/", "2.", ")", "self", ".", "yx", "=", "np", ".", "column_stack", "(", "[", "self", ".", "y", ",", "self", ".", "x", "]", ")", "# the position coordinates used when calling an interpolator", "nx", ",", "ny", "=", "self", ".", "data", ".", "shape", "self", ".", "data_coords", "=", "np", ".", "array", "(", "list", "(", "product", "(", "range", "(", "ny", ")", ",", "range", "(", "nx", ")", ")", ")", ")" ]
Calculate the coordinates to use when calling an interpolator. These are needed for `Background2D` and `BackgroundIDW2D`. Regular-grid interpolators require a 2D array of values. Some require a 2D meshgrid of x and y. Other require a strictly increasing 1D array of the x and y ranges.
[ "Calculate", "the", "coordinates", "to", "use", "when", "calling", "an", "interpolator", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/background/background_2d.py#L705-L725
10,568
astropy/photutils
photutils/background/background_2d.py
Background2D.plot_meshes
def plot_meshes(self, ax=None, marker='+', color='blue', outlines=False, **kwargs): """ Plot the low-resolution mesh boxes on a matplotlib Axes instance. Parameters ---------- ax : `matplotlib.axes.Axes` instance, optional If `None`, then the current ``Axes`` instance is used. marker : str, optional The marker to use to mark the center of the boxes. Default is '+'. color : str, optional The color for the markers and the box outlines. Default is 'blue'. outlines : bool, optional Whether or not to plot the box outlines in addition to the box centers. kwargs Any keyword arguments accepted by `matplotlib.patches.Patch`. Used only if ``outlines`` is True. """ import matplotlib.pyplot as plt kwargs['color'] = color if ax is None: ax = plt.gca() ax.scatter(self.x, self.y, marker=marker, color=color) if outlines: from ..aperture import RectangularAperture xy = np.column_stack([self.x, self.y]) apers = RectangularAperture(xy, self.box_size[1], self.box_size[0], 0.) apers.plot(ax=ax, **kwargs) return
python
def plot_meshes(self, ax=None, marker='+', color='blue', outlines=False, **kwargs): """ Plot the low-resolution mesh boxes on a matplotlib Axes instance. Parameters ---------- ax : `matplotlib.axes.Axes` instance, optional If `None`, then the current ``Axes`` instance is used. marker : str, optional The marker to use to mark the center of the boxes. Default is '+'. color : str, optional The color for the markers and the box outlines. Default is 'blue'. outlines : bool, optional Whether or not to plot the box outlines in addition to the box centers. kwargs Any keyword arguments accepted by `matplotlib.patches.Patch`. Used only if ``outlines`` is True. """ import matplotlib.pyplot as plt kwargs['color'] = color if ax is None: ax = plt.gca() ax.scatter(self.x, self.y, marker=marker, color=color) if outlines: from ..aperture import RectangularAperture xy = np.column_stack([self.x, self.y]) apers = RectangularAperture(xy, self.box_size[1], self.box_size[0], 0.) apers.plot(ax=ax, **kwargs) return
[ "def", "plot_meshes", "(", "self", ",", "ax", "=", "None", ",", "marker", "=", "'+'", ",", "color", "=", "'blue'", ",", "outlines", "=", "False", ",", "*", "*", "kwargs", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "kwargs", "[", "'color'", "]", "=", "color", "if", "ax", "is", "None", ":", "ax", "=", "plt", ".", "gca", "(", ")", "ax", ".", "scatter", "(", "self", ".", "x", ",", "self", ".", "y", ",", "marker", "=", "marker", ",", "color", "=", "color", ")", "if", "outlines", ":", "from", ".", ".", "aperture", "import", "RectangularAperture", "xy", "=", "np", ".", "column_stack", "(", "[", "self", ".", "x", ",", "self", ".", "y", "]", ")", "apers", "=", "RectangularAperture", "(", "xy", ",", "self", ".", "box_size", "[", "1", "]", ",", "self", ".", "box_size", "[", "0", "]", ",", "0.", ")", "apers", ".", "plot", "(", "ax", "=", "ax", ",", "*", "*", "kwargs", ")", "return" ]
Plot the low-resolution mesh boxes on a matplotlib Axes instance. Parameters ---------- ax : `matplotlib.axes.Axes` instance, optional If `None`, then the current ``Axes`` instance is used. marker : str, optional The marker to use to mark the center of the boxes. Default is '+'. color : str, optional The color for the markers and the box outlines. Default is 'blue'. outlines : bool, optional Whether or not to plot the box outlines in addition to the box centers. kwargs Any keyword arguments accepted by `matplotlib.patches.Patch`. Used only if ``outlines`` is True.
[ "Plot", "the", "low", "-", "resolution", "mesh", "boxes", "on", "a", "matplotlib", "Axes", "instance", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/background/background_2d.py#L798-L839
10,569
astropy/photutils
photutils/isophote/sample.py
EllipseSample.extract
def extract(self): """ Extract sample data by scanning an elliptical path over the image array. Returns ------- result : 2D `~numpy.ndarray` The rows of the array contain the angles, radii, and extracted intensity values, respectively. """ # the sample values themselves are kept cached to prevent # multiple calls to the integrator code. if self.values is not None: return self.values else: s = self._extract() self.values = s return s
python
def extract(self): """ Extract sample data by scanning an elliptical path over the image array. Returns ------- result : 2D `~numpy.ndarray` The rows of the array contain the angles, radii, and extracted intensity values, respectively. """ # the sample values themselves are kept cached to prevent # multiple calls to the integrator code. if self.values is not None: return self.values else: s = self._extract() self.values = s return s
[ "def", "extract", "(", "self", ")", ":", "# the sample values themselves are kept cached to prevent", "# multiple calls to the integrator code.", "if", "self", ".", "values", "is", "not", "None", ":", "return", "self", ".", "values", "else", ":", "s", "=", "self", ".", "_extract", "(", ")", "self", ".", "values", "=", "s", "return", "s" ]
Extract sample data by scanning an elliptical path over the image array. Returns ------- result : 2D `~numpy.ndarray` The rows of the array contain the angles, radii, and extracted intensity values, respectively.
[ "Extract", "sample", "data", "by", "scanning", "an", "elliptical", "path", "over", "the", "image", "array", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/isophote/sample.py#L133-L152
10,570
astropy/photutils
photutils/isophote/sample.py
EllipseSample.update
def update(self): """ Update this `~photutils.isophote.EllipseSample` instance. This method calls the :meth:`~photutils.isophote.EllipseSample.extract` method to get the values that match the current ``geometry`` attribute, and then computes the the mean intensity, local gradient, and other associated quantities. """ step = self.geometry.astep # Update the mean value first, using extraction from main sample. s = self.extract() self.mean = np.mean(s[2]) # Get sample with same geometry but at a different distance from # center. Estimate gradient from there. gradient, gradient_error = self._get_gradient(step) # Check for meaningful gradient. If no meaningful gradient, try # another sample, this time using larger radius. Meaningful # gradient means something shallower, but still close to within # a factor 3 from previous gradient estimate. If no previous # estimate is available, guess it. previous_gradient = self.gradient if not previous_gradient: previous_gradient = -0.05 # good enough, based on usage if gradient >= (previous_gradient / 3.): # gradient is negative! gradient, gradient_error = self._get_gradient(2 * step) # If still no meaningful gradient can be measured, try with # previous one, slightly shallower. A factor 0.8 is not too far # from what is expected from geometrical sampling steps of 10-20% # and a deVaucouleurs law or an exponential disk (at least at its # inner parts, r <~ 5 req). Gradient error is meaningless in this # case. if gradient >= (previous_gradient / 3.): gradient = previous_gradient * 0.8 gradient_error = None self.gradient = gradient self.gradient_error = gradient_error if gradient_error: self.gradient_relative_error = gradient_error / np.abs(gradient) else: self.gradient_relative_error = None
python
def update(self): """ Update this `~photutils.isophote.EllipseSample` instance. This method calls the :meth:`~photutils.isophote.EllipseSample.extract` method to get the values that match the current ``geometry`` attribute, and then computes the the mean intensity, local gradient, and other associated quantities. """ step = self.geometry.astep # Update the mean value first, using extraction from main sample. s = self.extract() self.mean = np.mean(s[2]) # Get sample with same geometry but at a different distance from # center. Estimate gradient from there. gradient, gradient_error = self._get_gradient(step) # Check for meaningful gradient. If no meaningful gradient, try # another sample, this time using larger radius. Meaningful # gradient means something shallower, but still close to within # a factor 3 from previous gradient estimate. If no previous # estimate is available, guess it. previous_gradient = self.gradient if not previous_gradient: previous_gradient = -0.05 # good enough, based on usage if gradient >= (previous_gradient / 3.): # gradient is negative! gradient, gradient_error = self._get_gradient(2 * step) # If still no meaningful gradient can be measured, try with # previous one, slightly shallower. A factor 0.8 is not too far # from what is expected from geometrical sampling steps of 10-20% # and a deVaucouleurs law or an exponential disk (at least at its # inner parts, r <~ 5 req). Gradient error is meaningless in this # case. if gradient >= (previous_gradient / 3.): gradient = previous_gradient * 0.8 gradient_error = None self.gradient = gradient self.gradient_error = gradient_error if gradient_error: self.gradient_relative_error = gradient_error / np.abs(gradient) else: self.gradient_relative_error = None
[ "def", "update", "(", "self", ")", ":", "step", "=", "self", ".", "geometry", ".", "astep", "# Update the mean value first, using extraction from main sample.", "s", "=", "self", ".", "extract", "(", ")", "self", ".", "mean", "=", "np", ".", "mean", "(", "s", "[", "2", "]", ")", "# Get sample with same geometry but at a different distance from", "# center. Estimate gradient from there.", "gradient", ",", "gradient_error", "=", "self", ".", "_get_gradient", "(", "step", ")", "# Check for meaningful gradient. If no meaningful gradient, try", "# another sample, this time using larger radius. Meaningful", "# gradient means something shallower, but still close to within", "# a factor 3 from previous gradient estimate. If no previous", "# estimate is available, guess it.", "previous_gradient", "=", "self", ".", "gradient", "if", "not", "previous_gradient", ":", "previous_gradient", "=", "-", "0.05", "# good enough, based on usage", "if", "gradient", ">=", "(", "previous_gradient", "/", "3.", ")", ":", "# gradient is negative!", "gradient", ",", "gradient_error", "=", "self", ".", "_get_gradient", "(", "2", "*", "step", ")", "# If still no meaningful gradient can be measured, try with", "# previous one, slightly shallower. A factor 0.8 is not too far", "# from what is expected from geometrical sampling steps of 10-20%", "# and a deVaucouleurs law or an exponential disk (at least at its", "# inner parts, r <~ 5 req). Gradient error is meaningless in this", "# case.", "if", "gradient", ">=", "(", "previous_gradient", "/", "3.", ")", ":", "gradient", "=", "previous_gradient", "*", "0.8", "gradient_error", "=", "None", "self", ".", "gradient", "=", "gradient", "self", ".", "gradient_error", "=", "gradient_error", "if", "gradient_error", ":", "self", ".", "gradient_relative_error", "=", "gradient_error", "/", "np", ".", "abs", "(", "gradient", ")", "else", ":", "self", ".", "gradient_relative_error", "=", "None" ]
Update this `~photutils.isophote.EllipseSample` instance. This method calls the :meth:`~photutils.isophote.EllipseSample.extract` method to get the values that match the current ``geometry`` attribute, and then computes the the mean intensity, local gradient, and other associated quantities.
[ "Update", "this", "~photutils", ".", "isophote", ".", "EllipseSample", "instance", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/isophote/sample.py#L279-L327
10,571
astropy/photutils
photutils/isophote/fitter.py
EllipseFitter.fit
def fit(self, conver=DEFAULT_CONVERGENCE, minit=DEFAULT_MINIT, maxit=DEFAULT_MAXIT, fflag=DEFAULT_FFLAG, maxgerr=DEFAULT_MAXGERR, going_inwards=False): """ Fit an elliptical isophote. Parameters ---------- conver : float, optional The main convergence criterion. Iterations stop when the largest harmonic amplitude becomes smaller (in absolute value) than ``conver`` times the harmonic fit rms. The default is 0.05. minit : int, optional The minimum number of iterations to perform. A minimum of 10 (the default) iterations guarantees that, on average, 2 iterations will be available for fitting each independent parameter (the four harmonic amplitudes and the intensity level). For the first isophote, the minimum number of iterations is 2 * ``minit`` to ensure that, even departing from not-so-good initial values, the algorithm has a better chance to converge to a sensible solution. maxit : int, optional The maximum number of iterations to perform. The default is 50. fflag : float, optional The acceptable fraction of flagged data points in the sample. If the actual fraction of valid data points is smaller than this, the iterations will stop and the current `~photutils.isophote.Isophote` will be returned. Flagged data points are points that either lie outside the image frame, are masked, or were rejected by sigma-clipping. The default is 0.7. maxgerr : float, optional The maximum acceptable relative error in the local radial intensity gradient. This is the main control for preventing ellipses to grow to regions of too low signal-to-noise ratio. It specifies the maximum acceptable relative error in the local radial intensity gradient. `Busko (1996; ASPC 101, 139) <http://adsabs.harvard.edu/abs/1996ASPC..101..139B>`_ showed that the fitting precision relates to that relative error. The usual behavior of the gradient relative error is to increase with semimajor axis, being larger in outer, fainter regions of a galaxy image. In the current implementation, the ``maxgerr`` criterion is triggered only when two consecutive isophotes exceed the value specified by the parameter. This prevents premature stopping caused by contamination such as stars and HII regions. A number of actions may happen when the gradient error exceeds ``maxgerr`` (or becomes non-significant and is set to `None`). If the maximum semimajor axis specified by ``maxsma`` is set to `None`, semimajor axis growth is stopped and the algorithm proceeds inwards to the galaxy center. If ``maxsma`` is set to some finite value, and this value is larger than the current semimajor axis length, the algorithm enters non-iterative mode and proceeds outwards until reaching ``maxsma``. The default is 0.5. going_inwards : bool, optional Parameter to define the sense of SMA growth. When fitting just one isophote, this parameter is used only by the code that defines the details of how elliptical arc segments ("sectors") are extracted from the image, when using area extraction modes (see the ``integrmode`` parameter in the `~photutils.isophote.EllipseSample` class). The default is `False`. Returns ------- result : `~photutils.isophote.Isophote` instance The fitted isophote, which also contains fit status information. Examples -------- >>> from photutils.isophote import EllipseSample, EllipseFitter >>> sample = EllipseSample(data, sma=10.) >>> fitter = EllipseFitter(sample) >>> isophote = fitter.fit() """ sample = self._sample # this flag signals that limiting gradient error (`maxgerr`) # wasn't exceeded yet. lexceed = False # here we keep track of the sample that caused the minimum harmonic # amplitude(in absolute value). This will eventually be used to # build the resulting Isophote in cases where iterations run to # the maximum allowed (maxit), or the maximum number of flagged # data points (fflag) is reached. minimum_amplitude_value = np.Inf minimum_amplitude_sample = None for iter in range(maxit): # Force the sample to compute its gradient and associated values. sample.update() # The extract() method returns sampled values as a 2-d numpy array # with the following structure: # values[0] = 1-d array with angles # values[1] = 1-d array with radii # values[2] = 1-d array with intensity values = sample.extract() # Fit harmonic coefficients. Failure in fitting is # a fatal error; terminate immediately with sample # marked as invalid. try: coeffs = fit_first_and_second_harmonics(values[0], values[2]) except Exception as e: log.info(e) return Isophote(sample, iter+1, False, 3) coeffs = coeffs[0] # largest harmonic in absolute value drives the correction. largest_harmonic_index = np.argmax(np.abs(coeffs[1:])) largest_harmonic = coeffs[1:][largest_harmonic_index] # see if the amplitude decreased; if yes, keep the # corresponding sample for eventual later use. if abs(largest_harmonic) < minimum_amplitude_value: minimum_amplitude_value = abs(largest_harmonic) minimum_amplitude_sample = sample # check if converged model = first_and_second_harmonic_function(values[0], coeffs) residual = values[2] - model if ((conver * sample.sector_area * np.std(residual)) > np.abs(largest_harmonic)): # Got a valid solution. But before returning, ensure # that a minimum of iterations has run. if iter >= minit-1: sample.update() return Isophote(sample, iter+1, True, 0) # it may not have converged yet, but the sample contains too # many invalid data points: return. if sample.actual_points < (sample.total_points * fflag): # when too many data points were flagged, return the # best fit sample instead of the current one. minimum_amplitude_sample.update() return Isophote(minimum_amplitude_sample, iter+1, True, 1) # pick appropriate corrector code. corrector = _correctors[largest_harmonic_index] # generate *NEW* EllipseSample instance with corrected # parameter. Note that this instance is still devoid of other # information besides its geometry. It needs to be explicitly # updated for computations to proceed. We have to build a new # EllipseSample instance every time because of the lazy # extraction process used by EllipseSample code. To minimize # the number of calls to the area integrators, we pay a # (hopefully smaller) price here, by having multiple calls to # the EllipseSample constructor. sample = corrector.correct(sample, largest_harmonic) sample.update() # see if any abnormal (or unusual) conditions warrant # the change to non-iterative mode, or go-inwards mode. proceed, lexceed = self._check_conditions( sample, maxgerr, going_inwards, lexceed) if not proceed: sample.update() return Isophote(sample, iter+1, True, -1) # Got to the maximum number of iterations. Return with # code 2, and handle it as a valid isophote. Use the # best fit sample instead of the current one. minimum_amplitude_sample.update() return Isophote(minimum_amplitude_sample, maxit, True, 2)
python
def fit(self, conver=DEFAULT_CONVERGENCE, minit=DEFAULT_MINIT, maxit=DEFAULT_MAXIT, fflag=DEFAULT_FFLAG, maxgerr=DEFAULT_MAXGERR, going_inwards=False): """ Fit an elliptical isophote. Parameters ---------- conver : float, optional The main convergence criterion. Iterations stop when the largest harmonic amplitude becomes smaller (in absolute value) than ``conver`` times the harmonic fit rms. The default is 0.05. minit : int, optional The minimum number of iterations to perform. A minimum of 10 (the default) iterations guarantees that, on average, 2 iterations will be available for fitting each independent parameter (the four harmonic amplitudes and the intensity level). For the first isophote, the minimum number of iterations is 2 * ``minit`` to ensure that, even departing from not-so-good initial values, the algorithm has a better chance to converge to a sensible solution. maxit : int, optional The maximum number of iterations to perform. The default is 50. fflag : float, optional The acceptable fraction of flagged data points in the sample. If the actual fraction of valid data points is smaller than this, the iterations will stop and the current `~photutils.isophote.Isophote` will be returned. Flagged data points are points that either lie outside the image frame, are masked, or were rejected by sigma-clipping. The default is 0.7. maxgerr : float, optional The maximum acceptable relative error in the local radial intensity gradient. This is the main control for preventing ellipses to grow to regions of too low signal-to-noise ratio. It specifies the maximum acceptable relative error in the local radial intensity gradient. `Busko (1996; ASPC 101, 139) <http://adsabs.harvard.edu/abs/1996ASPC..101..139B>`_ showed that the fitting precision relates to that relative error. The usual behavior of the gradient relative error is to increase with semimajor axis, being larger in outer, fainter regions of a galaxy image. In the current implementation, the ``maxgerr`` criterion is triggered only when two consecutive isophotes exceed the value specified by the parameter. This prevents premature stopping caused by contamination such as stars and HII regions. A number of actions may happen when the gradient error exceeds ``maxgerr`` (or becomes non-significant and is set to `None`). If the maximum semimajor axis specified by ``maxsma`` is set to `None`, semimajor axis growth is stopped and the algorithm proceeds inwards to the galaxy center. If ``maxsma`` is set to some finite value, and this value is larger than the current semimajor axis length, the algorithm enters non-iterative mode and proceeds outwards until reaching ``maxsma``. The default is 0.5. going_inwards : bool, optional Parameter to define the sense of SMA growth. When fitting just one isophote, this parameter is used only by the code that defines the details of how elliptical arc segments ("sectors") are extracted from the image, when using area extraction modes (see the ``integrmode`` parameter in the `~photutils.isophote.EllipseSample` class). The default is `False`. Returns ------- result : `~photutils.isophote.Isophote` instance The fitted isophote, which also contains fit status information. Examples -------- >>> from photutils.isophote import EllipseSample, EllipseFitter >>> sample = EllipseSample(data, sma=10.) >>> fitter = EllipseFitter(sample) >>> isophote = fitter.fit() """ sample = self._sample # this flag signals that limiting gradient error (`maxgerr`) # wasn't exceeded yet. lexceed = False # here we keep track of the sample that caused the minimum harmonic # amplitude(in absolute value). This will eventually be used to # build the resulting Isophote in cases where iterations run to # the maximum allowed (maxit), or the maximum number of flagged # data points (fflag) is reached. minimum_amplitude_value = np.Inf minimum_amplitude_sample = None for iter in range(maxit): # Force the sample to compute its gradient and associated values. sample.update() # The extract() method returns sampled values as a 2-d numpy array # with the following structure: # values[0] = 1-d array with angles # values[1] = 1-d array with radii # values[2] = 1-d array with intensity values = sample.extract() # Fit harmonic coefficients. Failure in fitting is # a fatal error; terminate immediately with sample # marked as invalid. try: coeffs = fit_first_and_second_harmonics(values[0], values[2]) except Exception as e: log.info(e) return Isophote(sample, iter+1, False, 3) coeffs = coeffs[0] # largest harmonic in absolute value drives the correction. largest_harmonic_index = np.argmax(np.abs(coeffs[1:])) largest_harmonic = coeffs[1:][largest_harmonic_index] # see if the amplitude decreased; if yes, keep the # corresponding sample for eventual later use. if abs(largest_harmonic) < minimum_amplitude_value: minimum_amplitude_value = abs(largest_harmonic) minimum_amplitude_sample = sample # check if converged model = first_and_second_harmonic_function(values[0], coeffs) residual = values[2] - model if ((conver * sample.sector_area * np.std(residual)) > np.abs(largest_harmonic)): # Got a valid solution. But before returning, ensure # that a minimum of iterations has run. if iter >= minit-1: sample.update() return Isophote(sample, iter+1, True, 0) # it may not have converged yet, but the sample contains too # many invalid data points: return. if sample.actual_points < (sample.total_points * fflag): # when too many data points were flagged, return the # best fit sample instead of the current one. minimum_amplitude_sample.update() return Isophote(minimum_amplitude_sample, iter+1, True, 1) # pick appropriate corrector code. corrector = _correctors[largest_harmonic_index] # generate *NEW* EllipseSample instance with corrected # parameter. Note that this instance is still devoid of other # information besides its geometry. It needs to be explicitly # updated for computations to proceed. We have to build a new # EllipseSample instance every time because of the lazy # extraction process used by EllipseSample code. To minimize # the number of calls to the area integrators, we pay a # (hopefully smaller) price here, by having multiple calls to # the EllipseSample constructor. sample = corrector.correct(sample, largest_harmonic) sample.update() # see if any abnormal (or unusual) conditions warrant # the change to non-iterative mode, or go-inwards mode. proceed, lexceed = self._check_conditions( sample, maxgerr, going_inwards, lexceed) if not proceed: sample.update() return Isophote(sample, iter+1, True, -1) # Got to the maximum number of iterations. Return with # code 2, and handle it as a valid isophote. Use the # best fit sample instead of the current one. minimum_amplitude_sample.update() return Isophote(minimum_amplitude_sample, maxit, True, 2)
[ "def", "fit", "(", "self", ",", "conver", "=", "DEFAULT_CONVERGENCE", ",", "minit", "=", "DEFAULT_MINIT", ",", "maxit", "=", "DEFAULT_MAXIT", ",", "fflag", "=", "DEFAULT_FFLAG", ",", "maxgerr", "=", "DEFAULT_MAXGERR", ",", "going_inwards", "=", "False", ")", ":", "sample", "=", "self", ".", "_sample", "# this flag signals that limiting gradient error (`maxgerr`)", "# wasn't exceeded yet.", "lexceed", "=", "False", "# here we keep track of the sample that caused the minimum harmonic", "# amplitude(in absolute value). This will eventually be used to", "# build the resulting Isophote in cases where iterations run to", "# the maximum allowed (maxit), or the maximum number of flagged", "# data points (fflag) is reached.", "minimum_amplitude_value", "=", "np", ".", "Inf", "minimum_amplitude_sample", "=", "None", "for", "iter", "in", "range", "(", "maxit", ")", ":", "# Force the sample to compute its gradient and associated values.", "sample", ".", "update", "(", ")", "# The extract() method returns sampled values as a 2-d numpy array", "# with the following structure:", "# values[0] = 1-d array with angles", "# values[1] = 1-d array with radii", "# values[2] = 1-d array with intensity", "values", "=", "sample", ".", "extract", "(", ")", "# Fit harmonic coefficients. Failure in fitting is", "# a fatal error; terminate immediately with sample", "# marked as invalid.", "try", ":", "coeffs", "=", "fit_first_and_second_harmonics", "(", "values", "[", "0", "]", ",", "values", "[", "2", "]", ")", "except", "Exception", "as", "e", ":", "log", ".", "info", "(", "e", ")", "return", "Isophote", "(", "sample", ",", "iter", "+", "1", ",", "False", ",", "3", ")", "coeffs", "=", "coeffs", "[", "0", "]", "# largest harmonic in absolute value drives the correction.", "largest_harmonic_index", "=", "np", ".", "argmax", "(", "np", ".", "abs", "(", "coeffs", "[", "1", ":", "]", ")", ")", "largest_harmonic", "=", "coeffs", "[", "1", ":", "]", "[", "largest_harmonic_index", "]", "# see if the amplitude decreased; if yes, keep the", "# corresponding sample for eventual later use.", "if", "abs", "(", "largest_harmonic", ")", "<", "minimum_amplitude_value", ":", "minimum_amplitude_value", "=", "abs", "(", "largest_harmonic", ")", "minimum_amplitude_sample", "=", "sample", "# check if converged", "model", "=", "first_and_second_harmonic_function", "(", "values", "[", "0", "]", ",", "coeffs", ")", "residual", "=", "values", "[", "2", "]", "-", "model", "if", "(", "(", "conver", "*", "sample", ".", "sector_area", "*", "np", ".", "std", "(", "residual", ")", ")", ">", "np", ".", "abs", "(", "largest_harmonic", ")", ")", ":", "# Got a valid solution. But before returning, ensure", "# that a minimum of iterations has run.", "if", "iter", ">=", "minit", "-", "1", ":", "sample", ".", "update", "(", ")", "return", "Isophote", "(", "sample", ",", "iter", "+", "1", ",", "True", ",", "0", ")", "# it may not have converged yet, but the sample contains too", "# many invalid data points: return.", "if", "sample", ".", "actual_points", "<", "(", "sample", ".", "total_points", "*", "fflag", ")", ":", "# when too many data points were flagged, return the", "# best fit sample instead of the current one.", "minimum_amplitude_sample", ".", "update", "(", ")", "return", "Isophote", "(", "minimum_amplitude_sample", ",", "iter", "+", "1", ",", "True", ",", "1", ")", "# pick appropriate corrector code.", "corrector", "=", "_correctors", "[", "largest_harmonic_index", "]", "# generate *NEW* EllipseSample instance with corrected", "# parameter. Note that this instance is still devoid of other", "# information besides its geometry. It needs to be explicitly", "# updated for computations to proceed. We have to build a new", "# EllipseSample instance every time because of the lazy", "# extraction process used by EllipseSample code. To minimize", "# the number of calls to the area integrators, we pay a", "# (hopefully smaller) price here, by having multiple calls to", "# the EllipseSample constructor.", "sample", "=", "corrector", ".", "correct", "(", "sample", ",", "largest_harmonic", ")", "sample", ".", "update", "(", ")", "# see if any abnormal (or unusual) conditions warrant", "# the change to non-iterative mode, or go-inwards mode.", "proceed", ",", "lexceed", "=", "self", ".", "_check_conditions", "(", "sample", ",", "maxgerr", ",", "going_inwards", ",", "lexceed", ")", "if", "not", "proceed", ":", "sample", ".", "update", "(", ")", "return", "Isophote", "(", "sample", ",", "iter", "+", "1", ",", "True", ",", "-", "1", ")", "# Got to the maximum number of iterations. Return with", "# code 2, and handle it as a valid isophote. Use the", "# best fit sample instead of the current one.", "minimum_amplitude_sample", ".", "update", "(", ")", "return", "Isophote", "(", "minimum_amplitude_sample", ",", "maxit", ",", "True", ",", "2", ")" ]
Fit an elliptical isophote. Parameters ---------- conver : float, optional The main convergence criterion. Iterations stop when the largest harmonic amplitude becomes smaller (in absolute value) than ``conver`` times the harmonic fit rms. The default is 0.05. minit : int, optional The minimum number of iterations to perform. A minimum of 10 (the default) iterations guarantees that, on average, 2 iterations will be available for fitting each independent parameter (the four harmonic amplitudes and the intensity level). For the first isophote, the minimum number of iterations is 2 * ``minit`` to ensure that, even departing from not-so-good initial values, the algorithm has a better chance to converge to a sensible solution. maxit : int, optional The maximum number of iterations to perform. The default is 50. fflag : float, optional The acceptable fraction of flagged data points in the sample. If the actual fraction of valid data points is smaller than this, the iterations will stop and the current `~photutils.isophote.Isophote` will be returned. Flagged data points are points that either lie outside the image frame, are masked, or were rejected by sigma-clipping. The default is 0.7. maxgerr : float, optional The maximum acceptable relative error in the local radial intensity gradient. This is the main control for preventing ellipses to grow to regions of too low signal-to-noise ratio. It specifies the maximum acceptable relative error in the local radial intensity gradient. `Busko (1996; ASPC 101, 139) <http://adsabs.harvard.edu/abs/1996ASPC..101..139B>`_ showed that the fitting precision relates to that relative error. The usual behavior of the gradient relative error is to increase with semimajor axis, being larger in outer, fainter regions of a galaxy image. In the current implementation, the ``maxgerr`` criterion is triggered only when two consecutive isophotes exceed the value specified by the parameter. This prevents premature stopping caused by contamination such as stars and HII regions. A number of actions may happen when the gradient error exceeds ``maxgerr`` (or becomes non-significant and is set to `None`). If the maximum semimajor axis specified by ``maxsma`` is set to `None`, semimajor axis growth is stopped and the algorithm proceeds inwards to the galaxy center. If ``maxsma`` is set to some finite value, and this value is larger than the current semimajor axis length, the algorithm enters non-iterative mode and proceeds outwards until reaching ``maxsma``. The default is 0.5. going_inwards : bool, optional Parameter to define the sense of SMA growth. When fitting just one isophote, this parameter is used only by the code that defines the details of how elliptical arc segments ("sectors") are extracted from the image, when using area extraction modes (see the ``integrmode`` parameter in the `~photutils.isophote.EllipseSample` class). The default is `False`. Returns ------- result : `~photutils.isophote.Isophote` instance The fitted isophote, which also contains fit status information. Examples -------- >>> from photutils.isophote import EllipseSample, EllipseFitter >>> sample = EllipseSample(data, sma=10.) >>> fitter = EllipseFitter(sample) >>> isophote = fitter.fit()
[ "Fit", "an", "elliptical", "isophote", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/isophote/fitter.py#L41-L217
10,572
astropy/photutils
photutils/psf/epsf_stars.py
_extract_stars
def _extract_stars(data, catalog, size=(11, 11), use_xy=True): """ Extract cutout images from a single image centered on stars defined in the single input catalog. Parameters ---------- data : `~astropy.nddata.NDData` A `~astropy.nddata.NDData` object containing the 2D image from which to extract the stars. If the input ``catalog`` contains only the sky coordinates (i.e. not the pixel coordinates) of the stars then the `~astropy.nddata.NDData` object must have a valid ``wcs`` attribute. catalogs : `~astropy.table.Table` A single catalog of sources to be extracted from the input ``data``. The center of each source can be defined either in pixel coordinates (in ``x`` and ``y`` columns) or sky coordinates (in a ``skycoord`` column containing a `~astropy.coordinates.SkyCoord` object). If both are specified, then the value of the ``use_xy`` keyword determines which coordinates will be used. size : int or array_like (int), optional The extraction box size along each axis. If ``size`` is a scalar then a square box of size ``size`` will be used. If ``size`` has two elements, they should be in ``(ny, nx)`` order. The size must be greater than or equal to 3 pixel for both axes. use_xy : bool, optional Whether to use the ``x`` and ``y`` pixel positions when both pixel and sky coordinates are present in the input catalog table. If `False` then sky coordinates are used instead of pixel coordinates (e.g. for linked stars). The default is `True`. Returns ------- stars : list of `EPSFStar` objects A list of `EPSFStar` instances containing the extracted stars. """ colnames = catalog.colnames if ('x' not in colnames or 'y' not in colnames) or not use_xy: xcenters, ycenters = skycoord_to_pixel(catalog['skycoord'], data.wcs, origin=0, mode='all') else: xcenters = catalog['x'].data.astype(np.float) ycenters = catalog['y'].data.astype(np.float) if 'id' in colnames: ids = catalog['id'] else: ids = np.arange(len(catalog), dtype=np.int) + 1 if data.uncertainty is None: weights = np.ones_like(data.data) else: if data.uncertainty.uncertainty_type == 'weights': weights = np.asanyarray(data.uncertainty.array, dtype=np.float) else: warnings.warn('The data uncertainty attribute has an unsupported ' 'type. Only uncertainty_type="weights" can be ' 'used to set weights. Weights will be set to 1.', AstropyUserWarning) weights = np.ones_like(data.data) if data.mask is not None: weights[data.mask] = 0. stars = [] for xcenter, ycenter, obj_id in zip(xcenters, ycenters, ids): try: large_slc, small_slc = overlap_slices(data.data.shape, size, (ycenter, xcenter), mode='strict') data_cutout = data.data[large_slc] weights_cutout = weights[large_slc] except (PartialOverlapError, NoOverlapError): stars.append(None) continue origin = (large_slc[1].start, large_slc[0].start) cutout_center = (xcenter - origin[0], ycenter - origin[1]) star = EPSFStar(data_cutout, weights_cutout, cutout_center=cutout_center, origin=origin, wcs_large=data.wcs, id_label=obj_id) stars.append(star) return stars
python
def _extract_stars(data, catalog, size=(11, 11), use_xy=True): """ Extract cutout images from a single image centered on stars defined in the single input catalog. Parameters ---------- data : `~astropy.nddata.NDData` A `~astropy.nddata.NDData` object containing the 2D image from which to extract the stars. If the input ``catalog`` contains only the sky coordinates (i.e. not the pixel coordinates) of the stars then the `~astropy.nddata.NDData` object must have a valid ``wcs`` attribute. catalogs : `~astropy.table.Table` A single catalog of sources to be extracted from the input ``data``. The center of each source can be defined either in pixel coordinates (in ``x`` and ``y`` columns) or sky coordinates (in a ``skycoord`` column containing a `~astropy.coordinates.SkyCoord` object). If both are specified, then the value of the ``use_xy`` keyword determines which coordinates will be used. size : int or array_like (int), optional The extraction box size along each axis. If ``size`` is a scalar then a square box of size ``size`` will be used. If ``size`` has two elements, they should be in ``(ny, nx)`` order. The size must be greater than or equal to 3 pixel for both axes. use_xy : bool, optional Whether to use the ``x`` and ``y`` pixel positions when both pixel and sky coordinates are present in the input catalog table. If `False` then sky coordinates are used instead of pixel coordinates (e.g. for linked stars). The default is `True`. Returns ------- stars : list of `EPSFStar` objects A list of `EPSFStar` instances containing the extracted stars. """ colnames = catalog.colnames if ('x' not in colnames or 'y' not in colnames) or not use_xy: xcenters, ycenters = skycoord_to_pixel(catalog['skycoord'], data.wcs, origin=0, mode='all') else: xcenters = catalog['x'].data.astype(np.float) ycenters = catalog['y'].data.astype(np.float) if 'id' in colnames: ids = catalog['id'] else: ids = np.arange(len(catalog), dtype=np.int) + 1 if data.uncertainty is None: weights = np.ones_like(data.data) else: if data.uncertainty.uncertainty_type == 'weights': weights = np.asanyarray(data.uncertainty.array, dtype=np.float) else: warnings.warn('The data uncertainty attribute has an unsupported ' 'type. Only uncertainty_type="weights" can be ' 'used to set weights. Weights will be set to 1.', AstropyUserWarning) weights = np.ones_like(data.data) if data.mask is not None: weights[data.mask] = 0. stars = [] for xcenter, ycenter, obj_id in zip(xcenters, ycenters, ids): try: large_slc, small_slc = overlap_slices(data.data.shape, size, (ycenter, xcenter), mode='strict') data_cutout = data.data[large_slc] weights_cutout = weights[large_slc] except (PartialOverlapError, NoOverlapError): stars.append(None) continue origin = (large_slc[1].start, large_slc[0].start) cutout_center = (xcenter - origin[0], ycenter - origin[1]) star = EPSFStar(data_cutout, weights_cutout, cutout_center=cutout_center, origin=origin, wcs_large=data.wcs, id_label=obj_id) stars.append(star) return stars
[ "def", "_extract_stars", "(", "data", ",", "catalog", ",", "size", "=", "(", "11", ",", "11", ")", ",", "use_xy", "=", "True", ")", ":", "colnames", "=", "catalog", ".", "colnames", "if", "(", "'x'", "not", "in", "colnames", "or", "'y'", "not", "in", "colnames", ")", "or", "not", "use_xy", ":", "xcenters", ",", "ycenters", "=", "skycoord_to_pixel", "(", "catalog", "[", "'skycoord'", "]", ",", "data", ".", "wcs", ",", "origin", "=", "0", ",", "mode", "=", "'all'", ")", "else", ":", "xcenters", "=", "catalog", "[", "'x'", "]", ".", "data", ".", "astype", "(", "np", ".", "float", ")", "ycenters", "=", "catalog", "[", "'y'", "]", ".", "data", ".", "astype", "(", "np", ".", "float", ")", "if", "'id'", "in", "colnames", ":", "ids", "=", "catalog", "[", "'id'", "]", "else", ":", "ids", "=", "np", ".", "arange", "(", "len", "(", "catalog", ")", ",", "dtype", "=", "np", ".", "int", ")", "+", "1", "if", "data", ".", "uncertainty", "is", "None", ":", "weights", "=", "np", ".", "ones_like", "(", "data", ".", "data", ")", "else", ":", "if", "data", ".", "uncertainty", ".", "uncertainty_type", "==", "'weights'", ":", "weights", "=", "np", ".", "asanyarray", "(", "data", ".", "uncertainty", ".", "array", ",", "dtype", "=", "np", ".", "float", ")", "else", ":", "warnings", ".", "warn", "(", "'The data uncertainty attribute has an unsupported '", "'type. Only uncertainty_type=\"weights\" can be '", "'used to set weights. Weights will be set to 1.'", ",", "AstropyUserWarning", ")", "weights", "=", "np", ".", "ones_like", "(", "data", ".", "data", ")", "if", "data", ".", "mask", "is", "not", "None", ":", "weights", "[", "data", ".", "mask", "]", "=", "0.", "stars", "=", "[", "]", "for", "xcenter", ",", "ycenter", ",", "obj_id", "in", "zip", "(", "xcenters", ",", "ycenters", ",", "ids", ")", ":", "try", ":", "large_slc", ",", "small_slc", "=", "overlap_slices", "(", "data", ".", "data", ".", "shape", ",", "size", ",", "(", "ycenter", ",", "xcenter", ")", ",", "mode", "=", "'strict'", ")", "data_cutout", "=", "data", ".", "data", "[", "large_slc", "]", "weights_cutout", "=", "weights", "[", "large_slc", "]", "except", "(", "PartialOverlapError", ",", "NoOverlapError", ")", ":", "stars", ".", "append", "(", "None", ")", "continue", "origin", "=", "(", "large_slc", "[", "1", "]", ".", "start", ",", "large_slc", "[", "0", "]", ".", "start", ")", "cutout_center", "=", "(", "xcenter", "-", "origin", "[", "0", "]", ",", "ycenter", "-", "origin", "[", "1", "]", ")", "star", "=", "EPSFStar", "(", "data_cutout", ",", "weights_cutout", ",", "cutout_center", "=", "cutout_center", ",", "origin", "=", "origin", ",", "wcs_large", "=", "data", ".", "wcs", ",", "id_label", "=", "obj_id", ")", "stars", ".", "append", "(", "star", ")", "return", "stars" ]
Extract cutout images from a single image centered on stars defined in the single input catalog. Parameters ---------- data : `~astropy.nddata.NDData` A `~astropy.nddata.NDData` object containing the 2D image from which to extract the stars. If the input ``catalog`` contains only the sky coordinates (i.e. not the pixel coordinates) of the stars then the `~astropy.nddata.NDData` object must have a valid ``wcs`` attribute. catalogs : `~astropy.table.Table` A single catalog of sources to be extracted from the input ``data``. The center of each source can be defined either in pixel coordinates (in ``x`` and ``y`` columns) or sky coordinates (in a ``skycoord`` column containing a `~astropy.coordinates.SkyCoord` object). If both are specified, then the value of the ``use_xy`` keyword determines which coordinates will be used. size : int or array_like (int), optional The extraction box size along each axis. If ``size`` is a scalar then a square box of size ``size`` will be used. If ``size`` has two elements, they should be in ``(ny, nx)`` order. The size must be greater than or equal to 3 pixel for both axes. use_xy : bool, optional Whether to use the ``x`` and ``y`` pixel positions when both pixel and sky coordinates are present in the input catalog table. If `False` then sky coordinates are used instead of pixel coordinates (e.g. for linked stars). The default is `True`. Returns ------- stars : list of `EPSFStar` objects A list of `EPSFStar` instances containing the extracted stars.
[ "Extract", "cutout", "images", "from", "a", "single", "image", "centered", "on", "stars", "defined", "in", "the", "single", "input", "catalog", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/epsf_stars.py#L687-L777
10,573
astropy/photutils
photutils/psf/epsf_stars.py
EPSFStar.estimate_flux
def estimate_flux(self): """ Estimate the star's flux by summing values in the input cutout array. Missing data is filled in by interpolation to better estimate the total flux. """ from .epsf import _interpolate_missing_data if np.any(self.mask): data_interp = _interpolate_missing_data(self.data, method='cubic', mask=self.mask) data_interp = _interpolate_missing_data(data_interp, method='nearest', mask=self.mask) flux = np.sum(data_interp, dtype=np.float64) else: flux = np.sum(self.data, dtype=np.float64) return flux
python
def estimate_flux(self): """ Estimate the star's flux by summing values in the input cutout array. Missing data is filled in by interpolation to better estimate the total flux. """ from .epsf import _interpolate_missing_data if np.any(self.mask): data_interp = _interpolate_missing_data(self.data, method='cubic', mask=self.mask) data_interp = _interpolate_missing_data(data_interp, method='nearest', mask=self.mask) flux = np.sum(data_interp, dtype=np.float64) else: flux = np.sum(self.data, dtype=np.float64) return flux
[ "def", "estimate_flux", "(", "self", ")", ":", "from", ".", "epsf", "import", "_interpolate_missing_data", "if", "np", ".", "any", "(", "self", ".", "mask", ")", ":", "data_interp", "=", "_interpolate_missing_data", "(", "self", ".", "data", ",", "method", "=", "'cubic'", ",", "mask", "=", "self", ".", "mask", ")", "data_interp", "=", "_interpolate_missing_data", "(", "data_interp", ",", "method", "=", "'nearest'", ",", "mask", "=", "self", ".", "mask", ")", "flux", "=", "np", ".", "sum", "(", "data_interp", ",", "dtype", "=", "np", ".", "float64", ")", "else", ":", "flux", "=", "np", ".", "sum", "(", "self", ".", "data", ",", "dtype", "=", "np", ".", "float64", ")", "return", "flux" ]
Estimate the star's flux by summing values in the input cutout array. Missing data is filled in by interpolation to better estimate the total flux.
[ "Estimate", "the", "star", "s", "flux", "by", "summing", "values", "in", "the", "input", "cutout", "array", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/epsf_stars.py#L158-L180
10,574
astropy/photutils
photutils/psf/epsf_stars.py
EPSFStar._xy_idx
def _xy_idx(self): """ 1D arrays of x and y indices of unmasked pixels in the cutout reference frame. """ yidx, xidx = np.indices(self._data.shape) return xidx[~self.mask].ravel(), yidx[~self.mask].ravel()
python
def _xy_idx(self): """ 1D arrays of x and y indices of unmasked pixels in the cutout reference frame. """ yidx, xidx = np.indices(self._data.shape) return xidx[~self.mask].ravel(), yidx[~self.mask].ravel()
[ "def", "_xy_idx", "(", "self", ")", ":", "yidx", ",", "xidx", "=", "np", ".", "indices", "(", "self", ".", "_data", ".", "shape", ")", "return", "xidx", "[", "~", "self", ".", "mask", "]", ".", "ravel", "(", ")", ",", "yidx", "[", "~", "self", ".", "mask", "]", ".", "ravel", "(", ")" ]
1D arrays of x and y indices of unmasked pixels in the cutout reference frame.
[ "1D", "arrays", "of", "x", "and", "y", "indices", "of", "unmasked", "pixels", "in", "the", "cutout", "reference", "frame", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/epsf_stars.py#L223-L230
10,575
astropy/photutils
photutils/psf/groupstars.py
DAOGroup.find_group
def find_group(self, star, starlist): """ Find the ids of those stars in ``starlist`` which are at a distance less than ``crit_separation`` from ``star``. Parameters ---------- star : `~astropy.table.Row` Star which will be either the head of a cluster or an isolated one. starlist : `~astropy.table.Table` List of star positions. Columns named as ``x_0`` and ``y_0``, which corresponds to the centroid coordinates of the sources, must be provided. Returns ------- Array containing the ids of those stars which are at a distance less than ``crit_separation`` from ``star``. """ star_distance = np.hypot(star['x_0'] - starlist['x_0'], star['y_0'] - starlist['y_0']) distance_criteria = star_distance < self.crit_separation return np.asarray(starlist[distance_criteria]['id'])
python
def find_group(self, star, starlist): """ Find the ids of those stars in ``starlist`` which are at a distance less than ``crit_separation`` from ``star``. Parameters ---------- star : `~astropy.table.Row` Star which will be either the head of a cluster or an isolated one. starlist : `~astropy.table.Table` List of star positions. Columns named as ``x_0`` and ``y_0``, which corresponds to the centroid coordinates of the sources, must be provided. Returns ------- Array containing the ids of those stars which are at a distance less than ``crit_separation`` from ``star``. """ star_distance = np.hypot(star['x_0'] - starlist['x_0'], star['y_0'] - starlist['y_0']) distance_criteria = star_distance < self.crit_separation return np.asarray(starlist[distance_criteria]['id'])
[ "def", "find_group", "(", "self", ",", "star", ",", "starlist", ")", ":", "star_distance", "=", "np", ".", "hypot", "(", "star", "[", "'x_0'", "]", "-", "starlist", "[", "'x_0'", "]", ",", "star", "[", "'y_0'", "]", "-", "starlist", "[", "'y_0'", "]", ")", "distance_criteria", "=", "star_distance", "<", "self", ".", "crit_separation", "return", "np", ".", "asarray", "(", "starlist", "[", "distance_criteria", "]", "[", "'id'", "]", ")" ]
Find the ids of those stars in ``starlist`` which are at a distance less than ``crit_separation`` from ``star``. Parameters ---------- star : `~astropy.table.Row` Star which will be either the head of a cluster or an isolated one. starlist : `~astropy.table.Table` List of star positions. Columns named as ``x_0`` and ``y_0``, which corresponds to the centroid coordinates of the sources, must be provided. Returns ------- Array containing the ids of those stars which are at a distance less than ``crit_separation`` from ``star``.
[ "Find", "the", "ids", "of", "those", "stars", "in", "starlist", "which", "are", "at", "a", "distance", "less", "than", "crit_separation", "from", "star", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/groupstars.py#L152-L176
10,576
astropy/photutils
photutils/aperture/bounding_box.py
BoundingBox._from_float
def _from_float(cls, xmin, xmax, ymin, ymax): """ Return the smallest bounding box that fully contains a given rectangle defined by float coordinate values. Following the pixel index convention, an integer index corresponds to the center of a pixel and the pixel edges span from (index - 0.5) to (index + 0.5). For example, the pixel edge spans of the following pixels are: - pixel 0: from -0.5 to 0.5 - pixel 1: from 0.5 to 1.5 - pixel 2: from 1.5 to 2.5 In addition, because `BoundingBox` upper limits are exclusive (by definition), 1 is added to the upper pixel edges. See examples below. Parameters ---------- xmin, xmax, ymin, ymax : float Float coordinates defining a rectangle. The lower values (``xmin`` and ``ymin``) must not be greater than the respective upper values (``xmax`` and ``ymax``). Returns ------- bbox : `BoundingBox` object The minimal ``BoundingBox`` object fully containing the input rectangle coordinates. Examples -------- >>> from photutils import BoundingBox >>> BoundingBox._from_float(xmin=1.0, xmax=10.0, ymin=2.0, ymax=20.0) BoundingBox(ixmin=1, ixmax=11, iymin=2, iymax=21) >>> BoundingBox._from_float(xmin=1.4, xmax=10.4, ymin=1.6, ymax=10.6) BoundingBox(ixmin=1, ixmax=11, iymin=2, iymax=12) """ ixmin = int(np.floor(xmin + 0.5)) ixmax = int(np.ceil(xmax + 0.5)) iymin = int(np.floor(ymin + 0.5)) iymax = int(np.ceil(ymax + 0.5)) return cls(ixmin, ixmax, iymin, iymax)
python
def _from_float(cls, xmin, xmax, ymin, ymax): """ Return the smallest bounding box that fully contains a given rectangle defined by float coordinate values. Following the pixel index convention, an integer index corresponds to the center of a pixel and the pixel edges span from (index - 0.5) to (index + 0.5). For example, the pixel edge spans of the following pixels are: - pixel 0: from -0.5 to 0.5 - pixel 1: from 0.5 to 1.5 - pixel 2: from 1.5 to 2.5 In addition, because `BoundingBox` upper limits are exclusive (by definition), 1 is added to the upper pixel edges. See examples below. Parameters ---------- xmin, xmax, ymin, ymax : float Float coordinates defining a rectangle. The lower values (``xmin`` and ``ymin``) must not be greater than the respective upper values (``xmax`` and ``ymax``). Returns ------- bbox : `BoundingBox` object The minimal ``BoundingBox`` object fully containing the input rectangle coordinates. Examples -------- >>> from photutils import BoundingBox >>> BoundingBox._from_float(xmin=1.0, xmax=10.0, ymin=2.0, ymax=20.0) BoundingBox(ixmin=1, ixmax=11, iymin=2, iymax=21) >>> BoundingBox._from_float(xmin=1.4, xmax=10.4, ymin=1.6, ymax=10.6) BoundingBox(ixmin=1, ixmax=11, iymin=2, iymax=12) """ ixmin = int(np.floor(xmin + 0.5)) ixmax = int(np.ceil(xmax + 0.5)) iymin = int(np.floor(ymin + 0.5)) iymax = int(np.ceil(ymax + 0.5)) return cls(ixmin, ixmax, iymin, iymax)
[ "def", "_from_float", "(", "cls", ",", "xmin", ",", "xmax", ",", "ymin", ",", "ymax", ")", ":", "ixmin", "=", "int", "(", "np", ".", "floor", "(", "xmin", "+", "0.5", ")", ")", "ixmax", "=", "int", "(", "np", ".", "ceil", "(", "xmax", "+", "0.5", ")", ")", "iymin", "=", "int", "(", "np", ".", "floor", "(", "ymin", "+", "0.5", ")", ")", "iymax", "=", "int", "(", "np", ".", "ceil", "(", "ymax", "+", "0.5", ")", ")", "return", "cls", "(", "ixmin", ",", "ixmax", ",", "iymin", ",", "iymax", ")" ]
Return the smallest bounding box that fully contains a given rectangle defined by float coordinate values. Following the pixel index convention, an integer index corresponds to the center of a pixel and the pixel edges span from (index - 0.5) to (index + 0.5). For example, the pixel edge spans of the following pixels are: - pixel 0: from -0.5 to 0.5 - pixel 1: from 0.5 to 1.5 - pixel 2: from 1.5 to 2.5 In addition, because `BoundingBox` upper limits are exclusive (by definition), 1 is added to the upper pixel edges. See examples below. Parameters ---------- xmin, xmax, ymin, ymax : float Float coordinates defining a rectangle. The lower values (``xmin`` and ``ymin``) must not be greater than the respective upper values (``xmax`` and ``ymax``). Returns ------- bbox : `BoundingBox` object The minimal ``BoundingBox`` object fully containing the input rectangle coordinates. Examples -------- >>> from photutils import BoundingBox >>> BoundingBox._from_float(xmin=1.0, xmax=10.0, ymin=2.0, ymax=20.0) BoundingBox(ixmin=1, ixmax=11, iymin=2, iymax=21) >>> BoundingBox._from_float(xmin=1.4, xmax=10.4, ymin=1.6, ymax=10.6) BoundingBox(ixmin=1, ixmax=11, iymin=2, iymax=12)
[ "Return", "the", "smallest", "bounding", "box", "that", "fully", "contains", "a", "given", "rectangle", "defined", "by", "float", "coordinate", "values", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/aperture/bounding_box.py#L73-L119
10,577
astropy/photutils
photutils/aperture/bounding_box.py
BoundingBox.slices
def slices(self): """ The bounding box as a tuple of `slice` objects. The slice tuple is in numpy axis order (i.e. ``(y, x)``) and therefore can be used to slice numpy arrays. """ return (slice(self.iymin, self.iymax), slice(self.ixmin, self.ixmax))
python
def slices(self): """ The bounding box as a tuple of `slice` objects. The slice tuple is in numpy axis order (i.e. ``(y, x)``) and therefore can be used to slice numpy arrays. """ return (slice(self.iymin, self.iymax), slice(self.ixmin, self.ixmax))
[ "def", "slices", "(", "self", ")", ":", "return", "(", "slice", "(", "self", ".", "iymin", ",", "self", ".", "iymax", ")", ",", "slice", "(", "self", ".", "ixmin", ",", "self", ".", "ixmax", ")", ")" ]
The bounding box as a tuple of `slice` objects. The slice tuple is in numpy axis order (i.e. ``(y, x)``) and therefore can be used to slice numpy arrays.
[ "The", "bounding", "box", "as", "a", "tuple", "of", "slice", "objects", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/aperture/bounding_box.py#L149-L157
10,578
astropy/photutils
photutils/aperture/bounding_box.py
BoundingBox.as_patch
def as_patch(self, **kwargs): """ Return a `matplotlib.patches.Rectangle` that represents the bounding box. Parameters ---------- kwargs Any keyword arguments accepted by `matplotlib.patches.Patch`. Returns ------- result : `matplotlib.patches.Rectangle` A matplotlib rectangular patch. Examples -------- .. plot:: :include-source: import matplotlib.pyplot as plt from photutils import BoundingBox bbox = BoundingBox(2, 7, 3, 8) fig = plt.figure() ax = fig.add_subplot(1, 1, 1) np.random.seed(12345) ax.imshow(np.random.random((10, 10)), interpolation='nearest', cmap='viridis') ax.add_patch(bbox.as_patch(facecolor='none', edgecolor='white', lw=2.)) """ from matplotlib.patches import Rectangle return Rectangle(xy=(self.extent[0], self.extent[2]), width=self.shape[1], height=self.shape[0], **kwargs)
python
def as_patch(self, **kwargs): """ Return a `matplotlib.patches.Rectangle` that represents the bounding box. Parameters ---------- kwargs Any keyword arguments accepted by `matplotlib.patches.Patch`. Returns ------- result : `matplotlib.patches.Rectangle` A matplotlib rectangular patch. Examples -------- .. plot:: :include-source: import matplotlib.pyplot as plt from photutils import BoundingBox bbox = BoundingBox(2, 7, 3, 8) fig = plt.figure() ax = fig.add_subplot(1, 1, 1) np.random.seed(12345) ax.imshow(np.random.random((10, 10)), interpolation='nearest', cmap='viridis') ax.add_patch(bbox.as_patch(facecolor='none', edgecolor='white', lw=2.)) """ from matplotlib.patches import Rectangle return Rectangle(xy=(self.extent[0], self.extent[2]), width=self.shape[1], height=self.shape[0], **kwargs)
[ "def", "as_patch", "(", "self", ",", "*", "*", "kwargs", ")", ":", "from", "matplotlib", ".", "patches", "import", "Rectangle", "return", "Rectangle", "(", "xy", "=", "(", "self", ".", "extent", "[", "0", "]", ",", "self", ".", "extent", "[", "2", "]", ")", ",", "width", "=", "self", ".", "shape", "[", "1", "]", ",", "height", "=", "self", ".", "shape", "[", "0", "]", ",", "*", "*", "kwargs", ")" ]
Return a `matplotlib.patches.Rectangle` that represents the bounding box. Parameters ---------- kwargs Any keyword arguments accepted by `matplotlib.patches.Patch`. Returns ------- result : `matplotlib.patches.Rectangle` A matplotlib rectangular patch. Examples -------- .. plot:: :include-source: import matplotlib.pyplot as plt from photutils import BoundingBox bbox = BoundingBox(2, 7, 3, 8) fig = plt.figure() ax = fig.add_subplot(1, 1, 1) np.random.seed(12345) ax.imshow(np.random.random((10, 10)), interpolation='nearest', cmap='viridis') ax.add_patch(bbox.as_patch(facecolor='none', edgecolor='white', lw=2.))
[ "Return", "a", "matplotlib", ".", "patches", ".", "Rectangle", "that", "represents", "the", "bounding", "box", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/aperture/bounding_box.py#L180-L216
10,579
astropy/photutils
photutils/aperture/bounding_box.py
BoundingBox.to_aperture
def to_aperture(self): """ Return a `~photutils.aperture.RectangularAperture` that represents the bounding box. """ from .rectangle import RectangularAperture xpos = (self.extent[1] + self.extent[0]) / 2. ypos = (self.extent[3] + self.extent[2]) / 2. xypos = (xpos, ypos) h, w = self.shape return RectangularAperture(xypos, w=w, h=h, theta=0.)
python
def to_aperture(self): """ Return a `~photutils.aperture.RectangularAperture` that represents the bounding box. """ from .rectangle import RectangularAperture xpos = (self.extent[1] + self.extent[0]) / 2. ypos = (self.extent[3] + self.extent[2]) / 2. xypos = (xpos, ypos) h, w = self.shape return RectangularAperture(xypos, w=w, h=h, theta=0.)
[ "def", "to_aperture", "(", "self", ")", ":", "from", ".", "rectangle", "import", "RectangularAperture", "xpos", "=", "(", "self", ".", "extent", "[", "1", "]", "+", "self", ".", "extent", "[", "0", "]", ")", "/", "2.", "ypos", "=", "(", "self", ".", "extent", "[", "3", "]", "+", "self", ".", "extent", "[", "2", "]", ")", "/", "2.", "xypos", "=", "(", "xpos", ",", "ypos", ")", "h", ",", "w", "=", "self", ".", "shape", "return", "RectangularAperture", "(", "xypos", ",", "w", "=", "w", ",", "h", "=", "h", ",", "theta", "=", "0.", ")" ]
Return a `~photutils.aperture.RectangularAperture` that represents the bounding box.
[ "Return", "a", "~photutils", ".", "aperture", ".", "RectangularAperture", "that", "represents", "the", "bounding", "box", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/aperture/bounding_box.py#L218-L231
10,580
astropy/photutils
photutils/aperture/bounding_box.py
BoundingBox.plot
def plot(self, origin=(0, 0), ax=None, fill=False, **kwargs): """ Plot the `BoundingBox` on a matplotlib `~matplotlib.axes.Axes` instance. Parameters ---------- origin : array_like, optional The ``(x, y)`` position of the origin of the displayed image. ax : `matplotlib.axes.Axes` instance, optional If `None`, then the current `~matplotlib.axes.Axes` instance is used. fill : bool, optional Set whether to fill the aperture patch. The default is `False`. kwargs Any keyword arguments accepted by `matplotlib.patches.Patch`. """ aper = self.to_aperture() aper.plot(origin=origin, ax=ax, fill=fill, **kwargs)
python
def plot(self, origin=(0, 0), ax=None, fill=False, **kwargs): """ Plot the `BoundingBox` on a matplotlib `~matplotlib.axes.Axes` instance. Parameters ---------- origin : array_like, optional The ``(x, y)`` position of the origin of the displayed image. ax : `matplotlib.axes.Axes` instance, optional If `None`, then the current `~matplotlib.axes.Axes` instance is used. fill : bool, optional Set whether to fill the aperture patch. The default is `False`. kwargs Any keyword arguments accepted by `matplotlib.patches.Patch`. """ aper = self.to_aperture() aper.plot(origin=origin, ax=ax, fill=fill, **kwargs)
[ "def", "plot", "(", "self", ",", "origin", "=", "(", "0", ",", "0", ")", ",", "ax", "=", "None", ",", "fill", "=", "False", ",", "*", "*", "kwargs", ")", ":", "aper", "=", "self", ".", "to_aperture", "(", ")", "aper", ".", "plot", "(", "origin", "=", "origin", ",", "ax", "=", "ax", ",", "fill", "=", "fill", ",", "*", "*", "kwargs", ")" ]
Plot the `BoundingBox` on a matplotlib `~matplotlib.axes.Axes` instance. Parameters ---------- origin : array_like, optional The ``(x, y)`` position of the origin of the displayed image. ax : `matplotlib.axes.Axes` instance, optional If `None`, then the current `~matplotlib.axes.Axes` instance is used. fill : bool, optional Set whether to fill the aperture patch. The default is `False`. kwargs Any keyword arguments accepted by `matplotlib.patches.Patch`.
[ "Plot", "the", "BoundingBox", "on", "a", "matplotlib", "~matplotlib", ".", "axes", ".", "Axes", "instance", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/aperture/bounding_box.py#L233-L257
10,581
astropy/photutils
photutils/detection/findstars.py
_find_stars
def _find_stars(data, kernel, threshold_eff, min_separation=None, mask=None, exclude_border=False): """ Find stars in an image. Parameters ---------- data : 2D array_like The 2D array of the image. kernel : `_StarFinderKernel` The convolution kernel. threshold_eff : float The absolute image value above which to select sources. This threshold should be the threshold input to the star finder class multiplied by the kernel relerr. mask : 2D bool array, optional A boolean mask with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked pixels are ignored when searching for stars. exclude_border : bool, optional Set to `True` to exclude sources found within half the size of the convolution kernel from the image borders. The default is `False`, which is the mode used by IRAF's `DAOFIND`_ and `starfind`_ tasks. Returns ------- objects : list of `_StarCutout` A list of `_StarCutout` objects containing the image cutout for each source. .. _DAOFIND: http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?daofind .. _starfind: http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?starfind """ convolved_data = filter_data(data, kernel.data, mode='constant', fill_value=0.0, check_normalization=False) # define a local footprint for the peak finder if min_separation is None: # daofind footprint = kernel.mask.astype(np.bool) else: # define a circular footprint idx = np.arange(-min_separation, min_separation + 1) xx, yy = np.meshgrid(idx, idx) footprint = np.array((xx**2 + yy**2) <= min_separation**2, dtype=int) # pad the data and convolved image by the kernel x/y radius to allow # for detections near the edges if not exclude_border: ypad = kernel.yradius xpad = kernel.xradius pad = ((ypad, ypad), (xpad, xpad)) # mode must be a string for numpy < 0.11 # (see https://github.com/numpy/numpy/issues/7112) mode = str('constant') data = np.pad(data, pad, mode=mode, constant_values=[0.]) if mask is not None: mask = np.pad(mask, pad, mode=mode, constant_values=[0.]) convolved_data = np.pad(convolved_data, pad, mode=mode, constant_values=[0.]) # find local peaks in the convolved data with warnings.catch_warnings(): # suppress any NoDetectionsWarning from find_peaks warnings.filterwarnings('ignore', category=NoDetectionsWarning) tbl = find_peaks(convolved_data, threshold_eff, footprint=footprint, mask=mask) if tbl is None: return None coords = np.transpose([tbl['y_peak'], tbl['x_peak']]) star_cutouts = [] for (ypeak, xpeak) in coords: # now extract the object from the data, centered on the peak # pixel in the convolved image, with the same size as the kernel x0 = xpeak - kernel.xradius x1 = xpeak + kernel.xradius + 1 y0 = ypeak - kernel.yradius y1 = ypeak + kernel.yradius + 1 if x0 < 0 or x1 > data.shape[1]: continue # pragma: no cover if y0 < 0 or y1 > data.shape[0]: continue # pragma: no cover slices = (slice(y0, y1), slice(x0, x1)) data_cutout = data[slices] convdata_cutout = convolved_data[slices] # correct pixel values for the previous image padding if not exclude_border: x0 -= kernel.xradius x1 -= kernel.xradius y0 -= kernel.yradius y1 -= kernel.yradius xpeak -= kernel.xradius ypeak -= kernel.yradius slices = (slice(y0, y1), slice(x0, x1)) star_cutouts.append(_StarCutout(data_cutout, convdata_cutout, slices, xpeak, ypeak, kernel, threshold_eff)) return star_cutouts
python
def _find_stars(data, kernel, threshold_eff, min_separation=None, mask=None, exclude_border=False): """ Find stars in an image. Parameters ---------- data : 2D array_like The 2D array of the image. kernel : `_StarFinderKernel` The convolution kernel. threshold_eff : float The absolute image value above which to select sources. This threshold should be the threshold input to the star finder class multiplied by the kernel relerr. mask : 2D bool array, optional A boolean mask with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked pixels are ignored when searching for stars. exclude_border : bool, optional Set to `True` to exclude sources found within half the size of the convolution kernel from the image borders. The default is `False`, which is the mode used by IRAF's `DAOFIND`_ and `starfind`_ tasks. Returns ------- objects : list of `_StarCutout` A list of `_StarCutout` objects containing the image cutout for each source. .. _DAOFIND: http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?daofind .. _starfind: http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?starfind """ convolved_data = filter_data(data, kernel.data, mode='constant', fill_value=0.0, check_normalization=False) # define a local footprint for the peak finder if min_separation is None: # daofind footprint = kernel.mask.astype(np.bool) else: # define a circular footprint idx = np.arange(-min_separation, min_separation + 1) xx, yy = np.meshgrid(idx, idx) footprint = np.array((xx**2 + yy**2) <= min_separation**2, dtype=int) # pad the data and convolved image by the kernel x/y radius to allow # for detections near the edges if not exclude_border: ypad = kernel.yradius xpad = kernel.xradius pad = ((ypad, ypad), (xpad, xpad)) # mode must be a string for numpy < 0.11 # (see https://github.com/numpy/numpy/issues/7112) mode = str('constant') data = np.pad(data, pad, mode=mode, constant_values=[0.]) if mask is not None: mask = np.pad(mask, pad, mode=mode, constant_values=[0.]) convolved_data = np.pad(convolved_data, pad, mode=mode, constant_values=[0.]) # find local peaks in the convolved data with warnings.catch_warnings(): # suppress any NoDetectionsWarning from find_peaks warnings.filterwarnings('ignore', category=NoDetectionsWarning) tbl = find_peaks(convolved_data, threshold_eff, footprint=footprint, mask=mask) if tbl is None: return None coords = np.transpose([tbl['y_peak'], tbl['x_peak']]) star_cutouts = [] for (ypeak, xpeak) in coords: # now extract the object from the data, centered on the peak # pixel in the convolved image, with the same size as the kernel x0 = xpeak - kernel.xradius x1 = xpeak + kernel.xradius + 1 y0 = ypeak - kernel.yradius y1 = ypeak + kernel.yradius + 1 if x0 < 0 or x1 > data.shape[1]: continue # pragma: no cover if y0 < 0 or y1 > data.shape[0]: continue # pragma: no cover slices = (slice(y0, y1), slice(x0, x1)) data_cutout = data[slices] convdata_cutout = convolved_data[slices] # correct pixel values for the previous image padding if not exclude_border: x0 -= kernel.xradius x1 -= kernel.xradius y0 -= kernel.yradius y1 -= kernel.yradius xpeak -= kernel.xradius ypeak -= kernel.yradius slices = (slice(y0, y1), slice(x0, x1)) star_cutouts.append(_StarCutout(data_cutout, convdata_cutout, slices, xpeak, ypeak, kernel, threshold_eff)) return star_cutouts
[ "def", "_find_stars", "(", "data", ",", "kernel", ",", "threshold_eff", ",", "min_separation", "=", "None", ",", "mask", "=", "None", ",", "exclude_border", "=", "False", ")", ":", "convolved_data", "=", "filter_data", "(", "data", ",", "kernel", ".", "data", ",", "mode", "=", "'constant'", ",", "fill_value", "=", "0.0", ",", "check_normalization", "=", "False", ")", "# define a local footprint for the peak finder", "if", "min_separation", "is", "None", ":", "# daofind", "footprint", "=", "kernel", ".", "mask", ".", "astype", "(", "np", ".", "bool", ")", "else", ":", "# define a circular footprint", "idx", "=", "np", ".", "arange", "(", "-", "min_separation", ",", "min_separation", "+", "1", ")", "xx", ",", "yy", "=", "np", ".", "meshgrid", "(", "idx", ",", "idx", ")", "footprint", "=", "np", ".", "array", "(", "(", "xx", "**", "2", "+", "yy", "**", "2", ")", "<=", "min_separation", "**", "2", ",", "dtype", "=", "int", ")", "# pad the data and convolved image by the kernel x/y radius to allow", "# for detections near the edges", "if", "not", "exclude_border", ":", "ypad", "=", "kernel", ".", "yradius", "xpad", "=", "kernel", ".", "xradius", "pad", "=", "(", "(", "ypad", ",", "ypad", ")", ",", "(", "xpad", ",", "xpad", ")", ")", "# mode must be a string for numpy < 0.11", "# (see https://github.com/numpy/numpy/issues/7112)", "mode", "=", "str", "(", "'constant'", ")", "data", "=", "np", ".", "pad", "(", "data", ",", "pad", ",", "mode", "=", "mode", ",", "constant_values", "=", "[", "0.", "]", ")", "if", "mask", "is", "not", "None", ":", "mask", "=", "np", ".", "pad", "(", "mask", ",", "pad", ",", "mode", "=", "mode", ",", "constant_values", "=", "[", "0.", "]", ")", "convolved_data", "=", "np", ".", "pad", "(", "convolved_data", ",", "pad", ",", "mode", "=", "mode", ",", "constant_values", "=", "[", "0.", "]", ")", "# find local peaks in the convolved data", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "# suppress any NoDetectionsWarning from find_peaks", "warnings", ".", "filterwarnings", "(", "'ignore'", ",", "category", "=", "NoDetectionsWarning", ")", "tbl", "=", "find_peaks", "(", "convolved_data", ",", "threshold_eff", ",", "footprint", "=", "footprint", ",", "mask", "=", "mask", ")", "if", "tbl", "is", "None", ":", "return", "None", "coords", "=", "np", ".", "transpose", "(", "[", "tbl", "[", "'y_peak'", "]", ",", "tbl", "[", "'x_peak'", "]", "]", ")", "star_cutouts", "=", "[", "]", "for", "(", "ypeak", ",", "xpeak", ")", "in", "coords", ":", "# now extract the object from the data, centered on the peak", "# pixel in the convolved image, with the same size as the kernel", "x0", "=", "xpeak", "-", "kernel", ".", "xradius", "x1", "=", "xpeak", "+", "kernel", ".", "xradius", "+", "1", "y0", "=", "ypeak", "-", "kernel", ".", "yradius", "y1", "=", "ypeak", "+", "kernel", ".", "yradius", "+", "1", "if", "x0", "<", "0", "or", "x1", ">", "data", ".", "shape", "[", "1", "]", ":", "continue", "# pragma: no cover", "if", "y0", "<", "0", "or", "y1", ">", "data", ".", "shape", "[", "0", "]", ":", "continue", "# pragma: no cover", "slices", "=", "(", "slice", "(", "y0", ",", "y1", ")", ",", "slice", "(", "x0", ",", "x1", ")", ")", "data_cutout", "=", "data", "[", "slices", "]", "convdata_cutout", "=", "convolved_data", "[", "slices", "]", "# correct pixel values for the previous image padding", "if", "not", "exclude_border", ":", "x0", "-=", "kernel", ".", "xradius", "x1", "-=", "kernel", ".", "xradius", "y0", "-=", "kernel", ".", "yradius", "y1", "-=", "kernel", ".", "yradius", "xpeak", "-=", "kernel", ".", "xradius", "ypeak", "-=", "kernel", ".", "yradius", "slices", "=", "(", "slice", "(", "y0", ",", "y1", ")", ",", "slice", "(", "x0", ",", "x1", ")", ")", "star_cutouts", ".", "append", "(", "_StarCutout", "(", "data_cutout", ",", "convdata_cutout", ",", "slices", ",", "xpeak", ",", "ypeak", ",", "kernel", ",", "threshold_eff", ")", ")", "return", "star_cutouts" ]
Find stars in an image. Parameters ---------- data : 2D array_like The 2D array of the image. kernel : `_StarFinderKernel` The convolution kernel. threshold_eff : float The absolute image value above which to select sources. This threshold should be the threshold input to the star finder class multiplied by the kernel relerr. mask : 2D bool array, optional A boolean mask with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked pixels are ignored when searching for stars. exclude_border : bool, optional Set to `True` to exclude sources found within half the size of the convolution kernel from the image borders. The default is `False`, which is the mode used by IRAF's `DAOFIND`_ and `starfind`_ tasks. Returns ------- objects : list of `_StarCutout` A list of `_StarCutout` objects containing the image cutout for each source. .. _DAOFIND: http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?daofind .. _starfind: http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?starfind
[ "Find", "stars", "in", "an", "image", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/detection/findstars.py#L595-L704
10,582
astropy/photutils
photutils/detection/findstars.py
_DAOFind_Properties.roundness2
def roundness2(self): """ The star roundness. This roundness parameter represents the ratio of the difference in the height of the best fitting Gaussian function in x minus the best fitting Gaussian function in y, divided by the average of the best fitting Gaussian functions in x and y. A circular source will have a zero roundness. A source extended in x or y will have a negative or positive roundness, respectively. """ if np.isnan(self.hx) or np.isnan(self.hy): return np.nan else: return 2.0 * (self.hx - self.hy) / (self.hx + self.hy)
python
def roundness2(self): """ The star roundness. This roundness parameter represents the ratio of the difference in the height of the best fitting Gaussian function in x minus the best fitting Gaussian function in y, divided by the average of the best fitting Gaussian functions in x and y. A circular source will have a zero roundness. A source extended in x or y will have a negative or positive roundness, respectively. """ if np.isnan(self.hx) or np.isnan(self.hy): return np.nan else: return 2.0 * (self.hx - self.hy) / (self.hx + self.hy)
[ "def", "roundness2", "(", "self", ")", ":", "if", "np", ".", "isnan", "(", "self", ".", "hx", ")", "or", "np", ".", "isnan", "(", "self", ".", "hy", ")", ":", "return", "np", ".", "nan", "else", ":", "return", "2.0", "*", "(", "self", ".", "hx", "-", "self", ".", "hy", ")", "/", "(", "self", ".", "hx", "+", "self", ".", "hy", ")" ]
The star roundness. This roundness parameter represents the ratio of the difference in the height of the best fitting Gaussian function in x minus the best fitting Gaussian function in y, divided by the average of the best fitting Gaussian functions in x and y. A circular source will have a zero roundness. A source extended in x or y will have a negative or positive roundness, respectively.
[ "The", "star", "roundness", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/detection/findstars.py#L426-L441
10,583
astropy/photutils
photutils/segmentation/detect.py
detect_sources
def detect_sources(data, threshold, npixels, filter_kernel=None, connectivity=8, mask=None): """ Detect sources above a specified threshold value in an image and return a `~photutils.segmentation.SegmentationImage` object. Detected sources must have ``npixels`` connected pixels that are each greater than the ``threshold`` value. If the filtering option is used, then the ``threshold`` is applied to the filtered image. The input ``mask`` can be used to mask pixels in the input data. Masked pixels will not be included in any source. This function does not deblend overlapping sources. First use this function to detect sources followed by :func:`~photutils.segmentation.deblend_sources` to deblend sources. Parameters ---------- data : array_like The 2D array of the image. threshold : float or array-like The data value or pixel-wise data values to be used for the detection threshold. A 2D ``threshold`` must have the same shape as ``data``. See `~photutils.detection.detect_threshold` for one way to create a ``threshold`` image. npixels : int The number of connected pixels, each greater than ``threshold``, that an object must have to be detected. ``npixels`` must be a positive integer. filter_kernel : array-like (2D) or `~astropy.convolution.Kernel2D`, optional The 2D array of the kernel used to filter the image before thresholding. Filtering the image will smooth the noise and maximize detectability of objects with a shape similar to the kernel. connectivity : {4, 8}, optional The type of pixel connectivity used in determining how pixels are grouped into a detected source. The options are 4 or 8 (default). 4-connected pixels touch along their edges. 8-connected pixels touch along their edges or corners. For reference, SExtractor uses 8-connected pixels. mask : array_like (bool) A boolean mask, with the same shape as the input ``data``, where `True` values indicate masked pixels. Masked pixels will not be included in any source. Returns ------- segment_image : `~photutils.segmentation.SegmentationImage` or `None` A 2D segmentation image, with the same shape as ``data``, where sources are marked by different positive integer values. A value of zero is reserved for the background. If no sources are found then `None` is returned. See Also -------- :func:`photutils.detection.detect_threshold`, :class:`photutils.segmentation.SegmentationImage`, :func:`photutils.segmentation.source_properties` :func:`photutils.segmentation.deblend_sources` Examples -------- .. plot:: :include-source: # make a table of Gaussian sources from astropy.table import Table table = Table() table['amplitude'] = [50, 70, 150, 210] table['x_mean'] = [160, 25, 150, 90] table['y_mean'] = [70, 40, 25, 60] table['x_stddev'] = [15.2, 5.1, 3., 8.1] table['y_stddev'] = [2.6, 2.5, 3., 4.7] table['theta'] = np.array([145., 20., 0., 60.]) * np.pi / 180. # make an image of the sources with Gaussian noise from photutils.datasets import make_gaussian_sources_image from photutils.datasets import make_noise_image shape = (100, 200) sources = make_gaussian_sources_image(shape, table) noise = make_noise_image(shape, type='gaussian', mean=0., stddev=5., random_state=12345) image = sources + noise # detect the sources from photutils import detect_threshold, detect_sources threshold = detect_threshold(image, snr=3) from astropy.convolution import Gaussian2DKernel sigma = 3.0 / (2.0 * np.sqrt(2.0 * np.log(2.0))) # FWHM = 3 kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3) kernel.normalize() segm = detect_sources(image, threshold, npixels=5, filter_kernel=kernel) # plot the image and the segmentation image import matplotlib.pyplot as plt fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8)) ax1.imshow(image, origin='lower', interpolation='nearest') ax2.imshow(segm.data, origin='lower', interpolation='nearest') """ from scipy import ndimage if (npixels <= 0) or (int(npixels) != npixels): raise ValueError('npixels must be a positive integer, got ' '"{0}"'.format(npixels)) image = (filter_data(data, filter_kernel, mode='constant', fill_value=0.0, check_normalization=True) > threshold) if mask is not None: if mask.shape != image.shape: raise ValueError('mask must have the same shape as the input ' 'image.') image &= ~mask if connectivity == 4: selem = ndimage.generate_binary_structure(2, 1) elif connectivity == 8: selem = ndimage.generate_binary_structure(2, 2) else: raise ValueError('Invalid connectivity={0}. ' 'Options are 4 or 8'.format(connectivity)) segm_img, nobj = ndimage.label(image, structure=selem) # remove objects with less than npixels # NOTE: for typical data, making the cutout images is ~10x faster # than using segm_img directly segm_slices = ndimage.find_objects(segm_img) for i, slices in enumerate(segm_slices): cutout = segm_img[slices] segment_mask = (cutout == (i+1)) if np.count_nonzero(segment_mask) < npixels: cutout[segment_mask] = 0 # now relabel to make consecutive label indices segm_img, nobj = ndimage.label(segm_img, structure=selem) if nobj == 0: warnings.warn('No sources were found.', NoDetectionsWarning) return None else: return SegmentationImage(segm_img)
python
def detect_sources(data, threshold, npixels, filter_kernel=None, connectivity=8, mask=None): """ Detect sources above a specified threshold value in an image and return a `~photutils.segmentation.SegmentationImage` object. Detected sources must have ``npixels`` connected pixels that are each greater than the ``threshold`` value. If the filtering option is used, then the ``threshold`` is applied to the filtered image. The input ``mask`` can be used to mask pixels in the input data. Masked pixels will not be included in any source. This function does not deblend overlapping sources. First use this function to detect sources followed by :func:`~photutils.segmentation.deblend_sources` to deblend sources. Parameters ---------- data : array_like The 2D array of the image. threshold : float or array-like The data value or pixel-wise data values to be used for the detection threshold. A 2D ``threshold`` must have the same shape as ``data``. See `~photutils.detection.detect_threshold` for one way to create a ``threshold`` image. npixels : int The number of connected pixels, each greater than ``threshold``, that an object must have to be detected. ``npixels`` must be a positive integer. filter_kernel : array-like (2D) or `~astropy.convolution.Kernel2D`, optional The 2D array of the kernel used to filter the image before thresholding. Filtering the image will smooth the noise and maximize detectability of objects with a shape similar to the kernel. connectivity : {4, 8}, optional The type of pixel connectivity used in determining how pixels are grouped into a detected source. The options are 4 or 8 (default). 4-connected pixels touch along their edges. 8-connected pixels touch along their edges or corners. For reference, SExtractor uses 8-connected pixels. mask : array_like (bool) A boolean mask, with the same shape as the input ``data``, where `True` values indicate masked pixels. Masked pixels will not be included in any source. Returns ------- segment_image : `~photutils.segmentation.SegmentationImage` or `None` A 2D segmentation image, with the same shape as ``data``, where sources are marked by different positive integer values. A value of zero is reserved for the background. If no sources are found then `None` is returned. See Also -------- :func:`photutils.detection.detect_threshold`, :class:`photutils.segmentation.SegmentationImage`, :func:`photutils.segmentation.source_properties` :func:`photutils.segmentation.deblend_sources` Examples -------- .. plot:: :include-source: # make a table of Gaussian sources from astropy.table import Table table = Table() table['amplitude'] = [50, 70, 150, 210] table['x_mean'] = [160, 25, 150, 90] table['y_mean'] = [70, 40, 25, 60] table['x_stddev'] = [15.2, 5.1, 3., 8.1] table['y_stddev'] = [2.6, 2.5, 3., 4.7] table['theta'] = np.array([145., 20., 0., 60.]) * np.pi / 180. # make an image of the sources with Gaussian noise from photutils.datasets import make_gaussian_sources_image from photutils.datasets import make_noise_image shape = (100, 200) sources = make_gaussian_sources_image(shape, table) noise = make_noise_image(shape, type='gaussian', mean=0., stddev=5., random_state=12345) image = sources + noise # detect the sources from photutils import detect_threshold, detect_sources threshold = detect_threshold(image, snr=3) from astropy.convolution import Gaussian2DKernel sigma = 3.0 / (2.0 * np.sqrt(2.0 * np.log(2.0))) # FWHM = 3 kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3) kernel.normalize() segm = detect_sources(image, threshold, npixels=5, filter_kernel=kernel) # plot the image and the segmentation image import matplotlib.pyplot as plt fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8)) ax1.imshow(image, origin='lower', interpolation='nearest') ax2.imshow(segm.data, origin='lower', interpolation='nearest') """ from scipy import ndimage if (npixels <= 0) or (int(npixels) != npixels): raise ValueError('npixels must be a positive integer, got ' '"{0}"'.format(npixels)) image = (filter_data(data, filter_kernel, mode='constant', fill_value=0.0, check_normalization=True) > threshold) if mask is not None: if mask.shape != image.shape: raise ValueError('mask must have the same shape as the input ' 'image.') image &= ~mask if connectivity == 4: selem = ndimage.generate_binary_structure(2, 1) elif connectivity == 8: selem = ndimage.generate_binary_structure(2, 2) else: raise ValueError('Invalid connectivity={0}. ' 'Options are 4 or 8'.format(connectivity)) segm_img, nobj = ndimage.label(image, structure=selem) # remove objects with less than npixels # NOTE: for typical data, making the cutout images is ~10x faster # than using segm_img directly segm_slices = ndimage.find_objects(segm_img) for i, slices in enumerate(segm_slices): cutout = segm_img[slices] segment_mask = (cutout == (i+1)) if np.count_nonzero(segment_mask) < npixels: cutout[segment_mask] = 0 # now relabel to make consecutive label indices segm_img, nobj = ndimage.label(segm_img, structure=selem) if nobj == 0: warnings.warn('No sources were found.', NoDetectionsWarning) return None else: return SegmentationImage(segm_img)
[ "def", "detect_sources", "(", "data", ",", "threshold", ",", "npixels", ",", "filter_kernel", "=", "None", ",", "connectivity", "=", "8", ",", "mask", "=", "None", ")", ":", "from", "scipy", "import", "ndimage", "if", "(", "npixels", "<=", "0", ")", "or", "(", "int", "(", "npixels", ")", "!=", "npixels", ")", ":", "raise", "ValueError", "(", "'npixels must be a positive integer, got '", "'\"{0}\"'", ".", "format", "(", "npixels", ")", ")", "image", "=", "(", "filter_data", "(", "data", ",", "filter_kernel", ",", "mode", "=", "'constant'", ",", "fill_value", "=", "0.0", ",", "check_normalization", "=", "True", ")", ">", "threshold", ")", "if", "mask", "is", "not", "None", ":", "if", "mask", ".", "shape", "!=", "image", ".", "shape", ":", "raise", "ValueError", "(", "'mask must have the same shape as the input '", "'image.'", ")", "image", "&=", "~", "mask", "if", "connectivity", "==", "4", ":", "selem", "=", "ndimage", ".", "generate_binary_structure", "(", "2", ",", "1", ")", "elif", "connectivity", "==", "8", ":", "selem", "=", "ndimage", ".", "generate_binary_structure", "(", "2", ",", "2", ")", "else", ":", "raise", "ValueError", "(", "'Invalid connectivity={0}. '", "'Options are 4 or 8'", ".", "format", "(", "connectivity", ")", ")", "segm_img", ",", "nobj", "=", "ndimage", ".", "label", "(", "image", ",", "structure", "=", "selem", ")", "# remove objects with less than npixels", "# NOTE: for typical data, making the cutout images is ~10x faster", "# than using segm_img directly", "segm_slices", "=", "ndimage", ".", "find_objects", "(", "segm_img", ")", "for", "i", ",", "slices", "in", "enumerate", "(", "segm_slices", ")", ":", "cutout", "=", "segm_img", "[", "slices", "]", "segment_mask", "=", "(", "cutout", "==", "(", "i", "+", "1", ")", ")", "if", "np", ".", "count_nonzero", "(", "segment_mask", ")", "<", "npixels", ":", "cutout", "[", "segment_mask", "]", "=", "0", "# now relabel to make consecutive label indices", "segm_img", ",", "nobj", "=", "ndimage", ".", "label", "(", "segm_img", ",", "structure", "=", "selem", ")", "if", "nobj", "==", "0", ":", "warnings", ".", "warn", "(", "'No sources were found.'", ",", "NoDetectionsWarning", ")", "return", "None", "else", ":", "return", "SegmentationImage", "(", "segm_img", ")" ]
Detect sources above a specified threshold value in an image and return a `~photutils.segmentation.SegmentationImage` object. Detected sources must have ``npixels`` connected pixels that are each greater than the ``threshold`` value. If the filtering option is used, then the ``threshold`` is applied to the filtered image. The input ``mask`` can be used to mask pixels in the input data. Masked pixels will not be included in any source. This function does not deblend overlapping sources. First use this function to detect sources followed by :func:`~photutils.segmentation.deblend_sources` to deblend sources. Parameters ---------- data : array_like The 2D array of the image. threshold : float or array-like The data value or pixel-wise data values to be used for the detection threshold. A 2D ``threshold`` must have the same shape as ``data``. See `~photutils.detection.detect_threshold` for one way to create a ``threshold`` image. npixels : int The number of connected pixels, each greater than ``threshold``, that an object must have to be detected. ``npixels`` must be a positive integer. filter_kernel : array-like (2D) or `~astropy.convolution.Kernel2D`, optional The 2D array of the kernel used to filter the image before thresholding. Filtering the image will smooth the noise and maximize detectability of objects with a shape similar to the kernel. connectivity : {4, 8}, optional The type of pixel connectivity used in determining how pixels are grouped into a detected source. The options are 4 or 8 (default). 4-connected pixels touch along their edges. 8-connected pixels touch along their edges or corners. For reference, SExtractor uses 8-connected pixels. mask : array_like (bool) A boolean mask, with the same shape as the input ``data``, where `True` values indicate masked pixels. Masked pixels will not be included in any source. Returns ------- segment_image : `~photutils.segmentation.SegmentationImage` or `None` A 2D segmentation image, with the same shape as ``data``, where sources are marked by different positive integer values. A value of zero is reserved for the background. If no sources are found then `None` is returned. See Also -------- :func:`photutils.detection.detect_threshold`, :class:`photutils.segmentation.SegmentationImage`, :func:`photutils.segmentation.source_properties` :func:`photutils.segmentation.deblend_sources` Examples -------- .. plot:: :include-source: # make a table of Gaussian sources from astropy.table import Table table = Table() table['amplitude'] = [50, 70, 150, 210] table['x_mean'] = [160, 25, 150, 90] table['y_mean'] = [70, 40, 25, 60] table['x_stddev'] = [15.2, 5.1, 3., 8.1] table['y_stddev'] = [2.6, 2.5, 3., 4.7] table['theta'] = np.array([145., 20., 0., 60.]) * np.pi / 180. # make an image of the sources with Gaussian noise from photutils.datasets import make_gaussian_sources_image from photutils.datasets import make_noise_image shape = (100, 200) sources = make_gaussian_sources_image(shape, table) noise = make_noise_image(shape, type='gaussian', mean=0., stddev=5., random_state=12345) image = sources + noise # detect the sources from photutils import detect_threshold, detect_sources threshold = detect_threshold(image, snr=3) from astropy.convolution import Gaussian2DKernel sigma = 3.0 / (2.0 * np.sqrt(2.0 * np.log(2.0))) # FWHM = 3 kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3) kernel.normalize() segm = detect_sources(image, threshold, npixels=5, filter_kernel=kernel) # plot the image and the segmentation image import matplotlib.pyplot as plt fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8)) ax1.imshow(image, origin='lower', interpolation='nearest') ax2.imshow(segm.data, origin='lower', interpolation='nearest')
[ "Detect", "sources", "above", "a", "specified", "threshold", "value", "in", "an", "image", "and", "return", "a", "~photutils", ".", "segmentation", ".", "SegmentationImage", "object", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/detect.py#L18-L167
10,584
astropy/photutils
photutils/segmentation/detect.py
make_source_mask
def make_source_mask(data, snr, npixels, mask=None, mask_value=None, filter_fwhm=None, filter_size=3, filter_kernel=None, sigclip_sigma=3.0, sigclip_iters=5, dilate_size=11): """ Make a source mask using source segmentation and binary dilation. Parameters ---------- data : array_like The 2D array of the image. snr : float The signal-to-noise ratio per pixel above the ``background`` for which to consider a pixel as possibly being part of a source. npixels : int The number of connected pixels, each greater than ``threshold``, that an object must have to be detected. ``npixels`` must be a positive integer. mask : array_like, bool, optional A boolean mask with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked pixels are ignored when computing the image background statistics. mask_value : float, optional An image data value (e.g., ``0.0``) that is ignored when computing the image background statistics. ``mask_value`` will be ignored if ``mask`` is input. filter_fwhm : float, optional The full-width at half-maximum (FWHM) of the Gaussian kernel to filter the image before thresholding. ``filter_fwhm`` and ``filter_size`` are ignored if ``filter_kernel`` is defined. filter_size : float, optional The size of the square Gaussian kernel image. Used only if ``filter_fwhm`` is defined. ``filter_fwhm`` and ``filter_size`` are ignored if ``filter_kernel`` is defined. filter_kernel : array-like (2D) or `~astropy.convolution.Kernel2D`, optional The 2D array of the kernel used to filter the image before thresholding. Filtering the image will smooth the noise and maximize detectability of objects with a shape similar to the kernel. ``filter_kernel`` overrides ``filter_fwhm`` and ``filter_size``. sigclip_sigma : float, optional The number of standard deviations to use as the clipping limit when calculating the image background statistics. sigclip_iters : int, optional The number of iterations to perform sigma clipping, or `None` to clip until convergence is achieved (i.e., continue until the last iteration clips nothing) when calculating the image background statistics. dilate_size : int, optional The size of the square array used to dilate the segmentation image. Returns ------- mask : 2D `~numpy.ndarray`, bool A 2D boolean image containing the source mask. """ from scipy import ndimage threshold = detect_threshold(data, snr, background=None, error=None, mask=mask, mask_value=None, sigclip_sigma=sigclip_sigma, sigclip_iters=sigclip_iters) kernel = None if filter_kernel is not None: kernel = filter_kernel if filter_fwhm is not None: sigma = filter_fwhm * gaussian_fwhm_to_sigma kernel = Gaussian2DKernel(sigma, x_size=filter_size, y_size=filter_size) if kernel is not None: kernel.normalize() segm = detect_sources(data, threshold, npixels, filter_kernel=kernel) selem = np.ones((dilate_size, dilate_size)) return ndimage.binary_dilation(segm.data.astype(np.bool), selem)
python
def make_source_mask(data, snr, npixels, mask=None, mask_value=None, filter_fwhm=None, filter_size=3, filter_kernel=None, sigclip_sigma=3.0, sigclip_iters=5, dilate_size=11): """ Make a source mask using source segmentation and binary dilation. Parameters ---------- data : array_like The 2D array of the image. snr : float The signal-to-noise ratio per pixel above the ``background`` for which to consider a pixel as possibly being part of a source. npixels : int The number of connected pixels, each greater than ``threshold``, that an object must have to be detected. ``npixels`` must be a positive integer. mask : array_like, bool, optional A boolean mask with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked pixels are ignored when computing the image background statistics. mask_value : float, optional An image data value (e.g., ``0.0``) that is ignored when computing the image background statistics. ``mask_value`` will be ignored if ``mask`` is input. filter_fwhm : float, optional The full-width at half-maximum (FWHM) of the Gaussian kernel to filter the image before thresholding. ``filter_fwhm`` and ``filter_size`` are ignored if ``filter_kernel`` is defined. filter_size : float, optional The size of the square Gaussian kernel image. Used only if ``filter_fwhm`` is defined. ``filter_fwhm`` and ``filter_size`` are ignored if ``filter_kernel`` is defined. filter_kernel : array-like (2D) or `~astropy.convolution.Kernel2D`, optional The 2D array of the kernel used to filter the image before thresholding. Filtering the image will smooth the noise and maximize detectability of objects with a shape similar to the kernel. ``filter_kernel`` overrides ``filter_fwhm`` and ``filter_size``. sigclip_sigma : float, optional The number of standard deviations to use as the clipping limit when calculating the image background statistics. sigclip_iters : int, optional The number of iterations to perform sigma clipping, or `None` to clip until convergence is achieved (i.e., continue until the last iteration clips nothing) when calculating the image background statistics. dilate_size : int, optional The size of the square array used to dilate the segmentation image. Returns ------- mask : 2D `~numpy.ndarray`, bool A 2D boolean image containing the source mask. """ from scipy import ndimage threshold = detect_threshold(data, snr, background=None, error=None, mask=mask, mask_value=None, sigclip_sigma=sigclip_sigma, sigclip_iters=sigclip_iters) kernel = None if filter_kernel is not None: kernel = filter_kernel if filter_fwhm is not None: sigma = filter_fwhm * gaussian_fwhm_to_sigma kernel = Gaussian2DKernel(sigma, x_size=filter_size, y_size=filter_size) if kernel is not None: kernel.normalize() segm = detect_sources(data, threshold, npixels, filter_kernel=kernel) selem = np.ones((dilate_size, dilate_size)) return ndimage.binary_dilation(segm.data.astype(np.bool), selem)
[ "def", "make_source_mask", "(", "data", ",", "snr", ",", "npixels", ",", "mask", "=", "None", ",", "mask_value", "=", "None", ",", "filter_fwhm", "=", "None", ",", "filter_size", "=", "3", ",", "filter_kernel", "=", "None", ",", "sigclip_sigma", "=", "3.0", ",", "sigclip_iters", "=", "5", ",", "dilate_size", "=", "11", ")", ":", "from", "scipy", "import", "ndimage", "threshold", "=", "detect_threshold", "(", "data", ",", "snr", ",", "background", "=", "None", ",", "error", "=", "None", ",", "mask", "=", "mask", ",", "mask_value", "=", "None", ",", "sigclip_sigma", "=", "sigclip_sigma", ",", "sigclip_iters", "=", "sigclip_iters", ")", "kernel", "=", "None", "if", "filter_kernel", "is", "not", "None", ":", "kernel", "=", "filter_kernel", "if", "filter_fwhm", "is", "not", "None", ":", "sigma", "=", "filter_fwhm", "*", "gaussian_fwhm_to_sigma", "kernel", "=", "Gaussian2DKernel", "(", "sigma", ",", "x_size", "=", "filter_size", ",", "y_size", "=", "filter_size", ")", "if", "kernel", "is", "not", "None", ":", "kernel", ".", "normalize", "(", ")", "segm", "=", "detect_sources", "(", "data", ",", "threshold", ",", "npixels", ",", "filter_kernel", "=", "kernel", ")", "selem", "=", "np", ".", "ones", "(", "(", "dilate_size", ",", "dilate_size", ")", ")", "return", "ndimage", ".", "binary_dilation", "(", "segm", ".", "data", ".", "astype", "(", "np", ".", "bool", ")", ",", "selem", ")" ]
Make a source mask using source segmentation and binary dilation. Parameters ---------- data : array_like The 2D array of the image. snr : float The signal-to-noise ratio per pixel above the ``background`` for which to consider a pixel as possibly being part of a source. npixels : int The number of connected pixels, each greater than ``threshold``, that an object must have to be detected. ``npixels`` must be a positive integer. mask : array_like, bool, optional A boolean mask with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked pixels are ignored when computing the image background statistics. mask_value : float, optional An image data value (e.g., ``0.0``) that is ignored when computing the image background statistics. ``mask_value`` will be ignored if ``mask`` is input. filter_fwhm : float, optional The full-width at half-maximum (FWHM) of the Gaussian kernel to filter the image before thresholding. ``filter_fwhm`` and ``filter_size`` are ignored if ``filter_kernel`` is defined. filter_size : float, optional The size of the square Gaussian kernel image. Used only if ``filter_fwhm`` is defined. ``filter_fwhm`` and ``filter_size`` are ignored if ``filter_kernel`` is defined. filter_kernel : array-like (2D) or `~astropy.convolution.Kernel2D`, optional The 2D array of the kernel used to filter the image before thresholding. Filtering the image will smooth the noise and maximize detectability of objects with a shape similar to the kernel. ``filter_kernel`` overrides ``filter_fwhm`` and ``filter_size``. sigclip_sigma : float, optional The number of standard deviations to use as the clipping limit when calculating the image background statistics. sigclip_iters : int, optional The number of iterations to perform sigma clipping, or `None` to clip until convergence is achieved (i.e., continue until the last iteration clips nothing) when calculating the image background statistics. dilate_size : int, optional The size of the square array used to dilate the segmentation image. Returns ------- mask : 2D `~numpy.ndarray`, bool A 2D boolean image containing the source mask.
[ "Make", "a", "source", "mask", "using", "source", "segmentation", "and", "binary", "dilation", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/detect.py#L170-L258
10,585
astropy/photutils
photutils/segmentation/core.py
Segment.data_ma
def data_ma(self): """ A 2D `~numpy.ma.MaskedArray` cutout image of the segment using the minimal bounding box. The mask is `True` for pixels outside of the source segment (i.e. neighboring segments within the rectangular cutout image are masked). """ mask = (self._segment_img[self.slices] != self.label) return np.ma.masked_array(self._segment_img[self.slices], mask=mask)
python
def data_ma(self): """ A 2D `~numpy.ma.MaskedArray` cutout image of the segment using the minimal bounding box. The mask is `True` for pixels outside of the source segment (i.e. neighboring segments within the rectangular cutout image are masked). """ mask = (self._segment_img[self.slices] != self.label) return np.ma.masked_array(self._segment_img[self.slices], mask=mask)
[ "def", "data_ma", "(", "self", ")", ":", "mask", "=", "(", "self", ".", "_segment_img", "[", "self", ".", "slices", "]", "!=", "self", ".", "label", ")", "return", "np", ".", "ma", ".", "masked_array", "(", "self", ".", "_segment_img", "[", "self", ".", "slices", "]", ",", "mask", "=", "mask", ")" ]
A 2D `~numpy.ma.MaskedArray` cutout image of the segment using the minimal bounding box. The mask is `True` for pixels outside of the source segment (i.e. neighboring segments within the rectangular cutout image are masked).
[ "A", "2D", "~numpy", ".", "ma", ".", "MaskedArray", "cutout", "image", "of", "the", "segment", "using", "the", "minimal", "bounding", "box", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/core.py#L85-L96
10,586
astropy/photutils
photutils/segmentation/core.py
SegmentationImage._reset_lazy_properties
def _reset_lazy_properties(self): """Reset all lazy properties.""" for key, value in self.__class__.__dict__.items(): if isinstance(value, lazyproperty): self.__dict__.pop(key, None)
python
def _reset_lazy_properties(self): """Reset all lazy properties.""" for key, value in self.__class__.__dict__.items(): if isinstance(value, lazyproperty): self.__dict__.pop(key, None)
[ "def", "_reset_lazy_properties", "(", "self", ")", ":", "for", "key", ",", "value", "in", "self", ".", "__class__", ".", "__dict__", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "lazyproperty", ")", ":", "self", ".", "__dict__", ".", "pop", "(", "key", ",", "None", ")" ]
Reset all lazy properties.
[ "Reset", "all", "lazy", "properties", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/core.py#L200-L205
10,587
astropy/photutils
photutils/segmentation/core.py
SegmentationImage.segments
def segments(self): """ A list of `Segment` objects. The list starts with the *non-zero* label. The returned list has a length equal to the number of labels and matches the order of the ``labels`` attribute. """ segments = [] for label, slc in zip(self.labels, self.slices): segments.append(Segment(self.data, label, slc, self.get_area(label))) return segments
python
def segments(self): """ A list of `Segment` objects. The list starts with the *non-zero* label. The returned list has a length equal to the number of labels and matches the order of the ``labels`` attribute. """ segments = [] for label, slc in zip(self.labels, self.slices): segments.append(Segment(self.data, label, slc, self.get_area(label))) return segments
[ "def", "segments", "(", "self", ")", ":", "segments", "=", "[", "]", "for", "label", ",", "slc", "in", "zip", "(", "self", ".", "labels", ",", "self", ".", "slices", ")", ":", "segments", ".", "append", "(", "Segment", "(", "self", ".", "data", ",", "label", ",", "slc", ",", "self", ".", "get_area", "(", "label", ")", ")", ")", "return", "segments" ]
A list of `Segment` objects. The list starts with the *non-zero* label. The returned list has a length equal to the number of labels and matches the order of the ``labels`` attribute.
[ "A", "list", "of", "Segment", "objects", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/core.py#L258-L271
10,588
astropy/photutils
photutils/segmentation/core.py
SegmentationImage.get_index
def get_index(self, label): """ Find the index of the input ``label``. Parameters ---------- labels : int The label numbers to find. Returns ------- index : int The array index. Raises ------ ValueError If ``label`` is invalid. """ self.check_labels(label) return np.searchsorted(self.labels, label)
python
def get_index(self, label): """ Find the index of the input ``label``. Parameters ---------- labels : int The label numbers to find. Returns ------- index : int The array index. Raises ------ ValueError If ``label`` is invalid. """ self.check_labels(label) return np.searchsorted(self.labels, label)
[ "def", "get_index", "(", "self", ",", "label", ")", ":", "self", ".", "check_labels", "(", "label", ")", "return", "np", ".", "searchsorted", "(", "self", ".", "labels", ",", "label", ")" ]
Find the index of the input ``label``. Parameters ---------- labels : int The label numbers to find. Returns ------- index : int The array index. Raises ------ ValueError If ``label`` is invalid.
[ "Find", "the", "index", "of", "the", "input", "label", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/core.py#L333-L354
10,589
astropy/photutils
photutils/segmentation/core.py
SegmentationImage.get_indices
def get_indices(self, labels): """ Find the indices of the input ``labels``. Parameters ---------- labels : int, array-like (1D, int) The label numbers(s) to find. Returns ------- indices : int `~numpy.ndarray` An integer array of indices with the same shape as ``labels``. If ``labels`` is a scalar, then the returned index will also be a scalar. Raises ------ ValueError If any input ``labels`` are invalid. """ self.check_labels(labels) return np.searchsorted(self.labels, labels)
python
def get_indices(self, labels): """ Find the indices of the input ``labels``. Parameters ---------- labels : int, array-like (1D, int) The label numbers(s) to find. Returns ------- indices : int `~numpy.ndarray` An integer array of indices with the same shape as ``labels``. If ``labels`` is a scalar, then the returned index will also be a scalar. Raises ------ ValueError If any input ``labels`` are invalid. """ self.check_labels(labels) return np.searchsorted(self.labels, labels)
[ "def", "get_indices", "(", "self", ",", "labels", ")", ":", "self", ".", "check_labels", "(", "labels", ")", "return", "np", ".", "searchsorted", "(", "self", ".", "labels", ",", "labels", ")" ]
Find the indices of the input ``labels``. Parameters ---------- labels : int, array-like (1D, int) The label numbers(s) to find. Returns ------- indices : int `~numpy.ndarray` An integer array of indices with the same shape as ``labels``. If ``labels`` is a scalar, then the returned index will also be a scalar. Raises ------ ValueError If any input ``labels`` are invalid.
[ "Find", "the", "indices", "of", "the", "input", "labels", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/core.py#L356-L379
10,590
astropy/photutils
photutils/segmentation/core.py
SegmentationImage.slices
def slices(self): """ A list of tuples, where each tuple contains two slices representing the minimal box that contains the labeled region. The list starts with the *non-zero* label. The returned list has a length equal to the number of labels and matches the order of the ``labels`` attribute. """ from scipy.ndimage import find_objects return [slc for slc in find_objects(self._data) if slc is not None]
python
def slices(self): """ A list of tuples, where each tuple contains two slices representing the minimal box that contains the labeled region. The list starts with the *non-zero* label. The returned list has a length equal to the number of labels and matches the order of the ``labels`` attribute. """ from scipy.ndimage import find_objects return [slc for slc in find_objects(self._data) if slc is not None]
[ "def", "slices", "(", "self", ")", ":", "from", "scipy", ".", "ndimage", "import", "find_objects", "return", "[", "slc", "for", "slc", "in", "find_objects", "(", "self", ".", "_data", ")", "if", "slc", "is", "not", "None", "]" ]
A list of tuples, where each tuple contains two slices representing the minimal box that contains the labeled region. The list starts with the *non-zero* label. The returned list has a length equal to the number of labels and matches the order of the ``labels`` attribute.
[ "A", "list", "of", "tuples", "where", "each", "tuple", "contains", "two", "slices", "representing", "the", "minimal", "box", "that", "contains", "the", "labeled", "region", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/core.py#L382-L394
10,591
astropy/photutils
photutils/segmentation/core.py
SegmentationImage.missing_labels
def missing_labels(self): """ A 1D `~numpy.ndarray` of the sorted non-zero labels that are missing in the consecutive sequence from zero to the maximum label number. """ return np.array(sorted(set(range(0, self.max_label + 1)). difference(np.insert(self.labels, 0, 0))))
python
def missing_labels(self): """ A 1D `~numpy.ndarray` of the sorted non-zero labels that are missing in the consecutive sequence from zero to the maximum label number. """ return np.array(sorted(set(range(0, self.max_label + 1)). difference(np.insert(self.labels, 0, 0))))
[ "def", "missing_labels", "(", "self", ")", ":", "return", "np", ".", "array", "(", "sorted", "(", "set", "(", "range", "(", "0", ",", "self", ".", "max_label", "+", "1", ")", ")", ".", "difference", "(", "np", ".", "insert", "(", "self", ".", "labels", ",", "0", ",", "0", ")", ")", ")", ")" ]
A 1D `~numpy.ndarray` of the sorted non-zero labels that are missing in the consecutive sequence from zero to the maximum label number.
[ "A", "1D", "~numpy", ".", "ndarray", "of", "the", "sorted", "non", "-", "zero", "labels", "that", "are", "missing", "in", "the", "consecutive", "sequence", "from", "zero", "to", "the", "maximum", "label", "number", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/core.py#L466-L474
10,592
astropy/photutils
photutils/segmentation/core.py
SegmentationImage.reassign_label
def reassign_label(self, label, new_label, relabel=False): """ Reassign a label number to a new number. If ``new_label`` is already present in the segmentation image, then it will be combined with the input ``label`` number. Parameters ---------- labels : int The label number to reassign. new_label : int The newly assigned label number. relabel : bool, optional If `True`, then the segmentation image will be relabeled such that the labels are in consecutive order starting from 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.reassign_label(label=1, new_label=2) >>> segm.data array([[2, 2, 0, 0, 4, 4], [0, 0, 0, 0, 0, 4], [0, 0, 3, 3, 0, 0], [7, 0, 0, 0, 0, 5], [7, 7, 0, 5, 5, 5], [7, 7, 0, 0, 5, 5]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.reassign_label(label=1, new_label=4) >>> segm.data array([[4, 4, 0, 0, 4, 4], [0, 0, 0, 0, 0, 4], [0, 0, 3, 3, 0, 0], [7, 0, 0, 0, 0, 5], [7, 7, 0, 5, 5, 5], [7, 7, 0, 0, 5, 5]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.reassign_label(label=1, new_label=4, relabel=True) >>> segm.data array([[2, 2, 0, 0, 2, 2], [0, 0, 0, 0, 0, 2], [0, 0, 1, 1, 0, 0], [4, 0, 0, 0, 0, 3], [4, 4, 0, 3, 3, 3], [4, 4, 0, 0, 3, 3]]) """ self.reassign_labels(label, new_label, relabel=relabel)
python
def reassign_label(self, label, new_label, relabel=False): """ Reassign a label number to a new number. If ``new_label`` is already present in the segmentation image, then it will be combined with the input ``label`` number. Parameters ---------- labels : int The label number to reassign. new_label : int The newly assigned label number. relabel : bool, optional If `True`, then the segmentation image will be relabeled such that the labels are in consecutive order starting from 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.reassign_label(label=1, new_label=2) >>> segm.data array([[2, 2, 0, 0, 4, 4], [0, 0, 0, 0, 0, 4], [0, 0, 3, 3, 0, 0], [7, 0, 0, 0, 0, 5], [7, 7, 0, 5, 5, 5], [7, 7, 0, 0, 5, 5]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.reassign_label(label=1, new_label=4) >>> segm.data array([[4, 4, 0, 0, 4, 4], [0, 0, 0, 0, 0, 4], [0, 0, 3, 3, 0, 0], [7, 0, 0, 0, 0, 5], [7, 7, 0, 5, 5, 5], [7, 7, 0, 0, 5, 5]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.reassign_label(label=1, new_label=4, relabel=True) >>> segm.data array([[2, 2, 0, 0, 2, 2], [0, 0, 0, 0, 0, 2], [0, 0, 1, 1, 0, 0], [4, 0, 0, 0, 0, 3], [4, 4, 0, 3, 3, 3], [4, 4, 0, 0, 3, 3]]) """ self.reassign_labels(label, new_label, relabel=relabel)
[ "def", "reassign_label", "(", "self", ",", "label", ",", "new_label", ",", "relabel", "=", "False", ")", ":", "self", ".", "reassign_labels", "(", "label", ",", "new_label", ",", "relabel", "=", "relabel", ")" ]
Reassign a label number to a new number. If ``new_label`` is already present in the segmentation image, then it will be combined with the input ``label`` number. Parameters ---------- labels : int The label number to reassign. new_label : int The newly assigned label number. relabel : bool, optional If `True`, then the segmentation image will be relabeled such that the labels are in consecutive order starting from 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.reassign_label(label=1, new_label=2) >>> segm.data array([[2, 2, 0, 0, 4, 4], [0, 0, 0, 0, 0, 4], [0, 0, 3, 3, 0, 0], [7, 0, 0, 0, 0, 5], [7, 7, 0, 5, 5, 5], [7, 7, 0, 0, 5, 5]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.reassign_label(label=1, new_label=4) >>> segm.data array([[4, 4, 0, 0, 4, 4], [0, 0, 0, 0, 0, 4], [0, 0, 3, 3, 0, 0], [7, 0, 0, 0, 0, 5], [7, 7, 0, 5, 5, 5], [7, 7, 0, 0, 5, 5]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.reassign_label(label=1, new_label=4, relabel=True) >>> segm.data array([[2, 2, 0, 0, 2, 2], [0, 0, 0, 0, 0, 2], [0, 0, 1, 1, 0, 0], [4, 0, 0, 0, 0, 3], [4, 4, 0, 3, 3, 3], [4, 4, 0, 0, 3, 3]])
[ "Reassign", "a", "label", "number", "to", "a", "new", "number", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/core.py#L611-L680
10,593
astropy/photutils
photutils/segmentation/core.py
SegmentationImage.reassign_labels
def reassign_labels(self, labels, new_label, relabel=False): """ Reassign one or more label numbers. Multiple input ``labels`` will all be reassigned to the same ``new_label`` number. If ``new_label`` is already present in the segmentation image, then it will be combined with the input ``labels``. Parameters ---------- labels : int, array-like (1D, int) The label numbers(s) to reassign. new_label : int The reassigned label number. relabel : bool, optional If `True`, then the segmentation image will be relabeled such that the labels are in consecutive order starting from 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.reassign_labels(labels=[1, 7], new_label=2) >>> segm.data array([[2, 2, 0, 0, 4, 4], [0, 0, 0, 0, 0, 4], [0, 0, 3, 3, 0, 0], [2, 0, 0, 0, 0, 5], [2, 2, 0, 5, 5, 5], [2, 2, 0, 0, 5, 5]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.reassign_labels(labels=[1, 7], new_label=4) >>> segm.data array([[4, 4, 0, 0, 4, 4], [0, 0, 0, 0, 0, 4], [0, 0, 3, 3, 0, 0], [4, 0, 0, 0, 0, 5], [4, 4, 0, 5, 5, 5], [4, 4, 0, 0, 5, 5]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.reassign_labels(labels=[1, 7], new_label=2, relabel=True) >>> segm.data array([[1, 1, 0, 0, 3, 3], [0, 0, 0, 0, 0, 3], [0, 0, 2, 2, 0, 0], [1, 0, 0, 0, 0, 4], [1, 1, 0, 4, 4, 4], [1, 1, 0, 0, 4, 4]]) """ self.check_labels(labels) labels = np.atleast_1d(labels) if len(labels) == 0: return idx = np.zeros(self.max_label + 1, dtype=int) idx[self.labels] = self.labels idx[labels] = new_label # calling the data setter resets all cached properties self.data = idx[self.data] if relabel: self.relabel_consecutive()
python
def reassign_labels(self, labels, new_label, relabel=False): """ Reassign one or more label numbers. Multiple input ``labels`` will all be reassigned to the same ``new_label`` number. If ``new_label`` is already present in the segmentation image, then it will be combined with the input ``labels``. Parameters ---------- labels : int, array-like (1D, int) The label numbers(s) to reassign. new_label : int The reassigned label number. relabel : bool, optional If `True`, then the segmentation image will be relabeled such that the labels are in consecutive order starting from 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.reassign_labels(labels=[1, 7], new_label=2) >>> segm.data array([[2, 2, 0, 0, 4, 4], [0, 0, 0, 0, 0, 4], [0, 0, 3, 3, 0, 0], [2, 0, 0, 0, 0, 5], [2, 2, 0, 5, 5, 5], [2, 2, 0, 0, 5, 5]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.reassign_labels(labels=[1, 7], new_label=4) >>> segm.data array([[4, 4, 0, 0, 4, 4], [0, 0, 0, 0, 0, 4], [0, 0, 3, 3, 0, 0], [4, 0, 0, 0, 0, 5], [4, 4, 0, 5, 5, 5], [4, 4, 0, 0, 5, 5]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.reassign_labels(labels=[1, 7], new_label=2, relabel=True) >>> segm.data array([[1, 1, 0, 0, 3, 3], [0, 0, 0, 0, 0, 3], [0, 0, 2, 2, 0, 0], [1, 0, 0, 0, 0, 4], [1, 1, 0, 4, 4, 4], [1, 1, 0, 0, 4, 4]]) """ self.check_labels(labels) labels = np.atleast_1d(labels) if len(labels) == 0: return idx = np.zeros(self.max_label + 1, dtype=int) idx[self.labels] = self.labels idx[labels] = new_label # calling the data setter resets all cached properties self.data = idx[self.data] if relabel: self.relabel_consecutive()
[ "def", "reassign_labels", "(", "self", ",", "labels", ",", "new_label", ",", "relabel", "=", "False", ")", ":", "self", ".", "check_labels", "(", "labels", ")", "labels", "=", "np", ".", "atleast_1d", "(", "labels", ")", "if", "len", "(", "labels", ")", "==", "0", ":", "return", "idx", "=", "np", ".", "zeros", "(", "self", ".", "max_label", "+", "1", ",", "dtype", "=", "int", ")", "idx", "[", "self", ".", "labels", "]", "=", "self", ".", "labels", "idx", "[", "labels", "]", "=", "new_label", "# calling the data setter resets all cached properties", "self", ".", "data", "=", "idx", "[", "self", ".", "data", "]", "if", "relabel", ":", "self", ".", "relabel_consecutive", "(", ")" ]
Reassign one or more label numbers. Multiple input ``labels`` will all be reassigned to the same ``new_label`` number. If ``new_label`` is already present in the segmentation image, then it will be combined with the input ``labels``. Parameters ---------- labels : int, array-like (1D, int) The label numbers(s) to reassign. new_label : int The reassigned label number. relabel : bool, optional If `True`, then the segmentation image will be relabeled such that the labels are in consecutive order starting from 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.reassign_labels(labels=[1, 7], new_label=2) >>> segm.data array([[2, 2, 0, 0, 4, 4], [0, 0, 0, 0, 0, 4], [0, 0, 3, 3, 0, 0], [2, 0, 0, 0, 0, 5], [2, 2, 0, 5, 5, 5], [2, 2, 0, 0, 5, 5]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.reassign_labels(labels=[1, 7], new_label=4) >>> segm.data array([[4, 4, 0, 0, 4, 4], [0, 0, 0, 0, 0, 4], [0, 0, 3, 3, 0, 0], [4, 0, 0, 0, 0, 5], [4, 4, 0, 5, 5, 5], [4, 4, 0, 0, 5, 5]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.reassign_labels(labels=[1, 7], new_label=2, relabel=True) >>> segm.data array([[1, 1, 0, 0, 3, 3], [0, 0, 0, 0, 0, 3], [0, 0, 2, 2, 0, 0], [1, 0, 0, 0, 0, 4], [1, 1, 0, 4, 4, 4], [1, 1, 0, 0, 4, 4]])
[ "Reassign", "one", "or", "more", "label", "numbers", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/core.py#L682-L767
10,594
astropy/photutils
photutils/segmentation/core.py
SegmentationImage.relabel_consecutive
def relabel_consecutive(self, start_label=1): """ Reassign the label numbers consecutively, such that there are no missing label numbers. Parameters ---------- start_label : int, optional The starting label number, which should be a positive integer. The default is 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.relabel_consecutive() >>> segm.data array([[1, 1, 0, 0, 3, 3], [0, 0, 0, 0, 0, 3], [0, 0, 2, 2, 0, 0], [5, 0, 0, 0, 0, 4], [5, 5, 0, 4, 4, 4], [5, 5, 0, 0, 4, 4]]) """ if start_label <= 0: raise ValueError('start_label must be > 0.') if self.is_consecutive and (self.labels[0] == start_label): return new_labels = np.zeros(self.max_label + 1, dtype=np.int) new_labels[self.labels] = np.arange(self.nlabels) + start_label self.data = new_labels[self.data]
python
def relabel_consecutive(self, start_label=1): """ Reassign the label numbers consecutively, such that there are no missing label numbers. Parameters ---------- start_label : int, optional The starting label number, which should be a positive integer. The default is 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.relabel_consecutive() >>> segm.data array([[1, 1, 0, 0, 3, 3], [0, 0, 0, 0, 0, 3], [0, 0, 2, 2, 0, 0], [5, 0, 0, 0, 0, 4], [5, 5, 0, 4, 4, 4], [5, 5, 0, 0, 4, 4]]) """ if start_label <= 0: raise ValueError('start_label must be > 0.') if self.is_consecutive and (self.labels[0] == start_label): return new_labels = np.zeros(self.max_label + 1, dtype=np.int) new_labels[self.labels] = np.arange(self.nlabels) + start_label self.data = new_labels[self.data]
[ "def", "relabel_consecutive", "(", "self", ",", "start_label", "=", "1", ")", ":", "if", "start_label", "<=", "0", ":", "raise", "ValueError", "(", "'start_label must be > 0.'", ")", "if", "self", ".", "is_consecutive", "and", "(", "self", ".", "labels", "[", "0", "]", "==", "start_label", ")", ":", "return", "new_labels", "=", "np", ".", "zeros", "(", "self", ".", "max_label", "+", "1", ",", "dtype", "=", "np", ".", "int", ")", "new_labels", "[", "self", ".", "labels", "]", "=", "np", ".", "arange", "(", "self", ".", "nlabels", ")", "+", "start_label", "self", ".", "data", "=", "new_labels", "[", "self", ".", "data", "]" ]
Reassign the label numbers consecutively, such that there are no missing label numbers. Parameters ---------- start_label : int, optional The starting label number, which should be a positive integer. The default is 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.relabel_consecutive() >>> segm.data array([[1, 1, 0, 0, 3, 3], [0, 0, 0, 0, 0, 3], [0, 0, 2, 2, 0, 0], [5, 0, 0, 0, 0, 4], [5, 5, 0, 4, 4, 4], [5, 5, 0, 0, 4, 4]])
[ "Reassign", "the", "label", "numbers", "consecutively", "such", "that", "there", "are", "no", "missing", "label", "numbers", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/core.py#L769-L807
10,595
astropy/photutils
photutils/segmentation/core.py
SegmentationImage.keep_label
def keep_label(self, label, relabel=False): """ Keep only the specified label. Parameters ---------- label : int The label number to keep. relabel : bool, optional If `True`, then the single segment will be assigned a label value of 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.keep_label(label=3) >>> segm.data array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 3, 3, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.keep_label(label=3, relabel=True) >>> segm.data array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]) """ self.keep_labels(label, relabel=relabel)
python
def keep_label(self, label, relabel=False): """ Keep only the specified label. Parameters ---------- label : int The label number to keep. relabel : bool, optional If `True`, then the single segment will be assigned a label value of 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.keep_label(label=3) >>> segm.data array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 3, 3, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.keep_label(label=3, relabel=True) >>> segm.data array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]) """ self.keep_labels(label, relabel=relabel)
[ "def", "keep_label", "(", "self", ",", "label", ",", "relabel", "=", "False", ")", ":", "self", ".", "keep_labels", "(", "label", ",", "relabel", "=", "relabel", ")" ]
Keep only the specified label. Parameters ---------- label : int The label number to keep. relabel : bool, optional If `True`, then the single segment will be assigned a label value of 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.keep_label(label=3) >>> segm.data array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 3, 3, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.keep_label(label=3, relabel=True) >>> segm.data array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]])
[ "Keep", "only", "the", "specified", "label", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/core.py#L809-L856
10,596
astropy/photutils
photutils/segmentation/core.py
SegmentationImage.keep_labels
def keep_labels(self, labels, relabel=False): """ Keep only the specified labels. Parameters ---------- labels : int, array-like (1D, int) The label number(s) to keep. relabel : bool, optional If `True`, then the segmentation image will be relabeled such that the labels are in consecutive order starting from 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.keep_labels(labels=[5, 3]) >>> segm.data array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 3, 3, 0, 0], [0, 0, 0, 0, 0, 5], [0, 0, 0, 5, 5, 5], [0, 0, 0, 0, 5, 5]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.keep_labels(labels=[5, 3], relabel=True) >>> segm.data array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 2], [0, 0, 0, 2, 2, 2], [0, 0, 0, 0, 2, 2]]) """ self.check_labels(labels) labels = np.atleast_1d(labels) labels_tmp = list(set(self.labels) - set(labels)) self.remove_labels(labels_tmp, relabel=relabel)
python
def keep_labels(self, labels, relabel=False): """ Keep only the specified labels. Parameters ---------- labels : int, array-like (1D, int) The label number(s) to keep. relabel : bool, optional If `True`, then the segmentation image will be relabeled such that the labels are in consecutive order starting from 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.keep_labels(labels=[5, 3]) >>> segm.data array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 3, 3, 0, 0], [0, 0, 0, 0, 0, 5], [0, 0, 0, 5, 5, 5], [0, 0, 0, 0, 5, 5]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.keep_labels(labels=[5, 3], relabel=True) >>> segm.data array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 2], [0, 0, 0, 2, 2, 2], [0, 0, 0, 0, 2, 2]]) """ self.check_labels(labels) labels = np.atleast_1d(labels) labels_tmp = list(set(self.labels) - set(labels)) self.remove_labels(labels_tmp, relabel=relabel)
[ "def", "keep_labels", "(", "self", ",", "labels", ",", "relabel", "=", "False", ")", ":", "self", ".", "check_labels", "(", "labels", ")", "labels", "=", "np", ".", "atleast_1d", "(", "labels", ")", "labels_tmp", "=", "list", "(", "set", "(", "self", ".", "labels", ")", "-", "set", "(", "labels", ")", ")", "self", ".", "remove_labels", "(", "labels_tmp", ",", "relabel", "=", "relabel", ")" ]
Keep only the specified labels. Parameters ---------- labels : int, array-like (1D, int) The label number(s) to keep. relabel : bool, optional If `True`, then the segmentation image will be relabeled such that the labels are in consecutive order starting from 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.keep_labels(labels=[5, 3]) >>> segm.data array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 3, 3, 0, 0], [0, 0, 0, 0, 0, 5], [0, 0, 0, 5, 5, 5], [0, 0, 0, 0, 5, 5]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.keep_labels(labels=[5, 3], relabel=True) >>> segm.data array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 2], [0, 0, 0, 2, 2, 2], [0, 0, 0, 0, 2, 2]])
[ "Keep", "only", "the", "specified", "labels", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/core.py#L858-L910
10,597
astropy/photutils
photutils/segmentation/core.py
SegmentationImage.remove_label
def remove_label(self, label, relabel=False): """ Remove the label number. The removed label is assigned a value of zero (i.e., background). Parameters ---------- label : int The label number to remove. relabel : bool, optional If `True`, then the segmentation image will be relabeled such that the labels are in consecutive order starting from 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_label(label=5) >>> segm.data array([[1, 1, 0, 0, 4, 4], [0, 0, 0, 0, 0, 4], [0, 0, 3, 3, 0, 0], [7, 0, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_label(label=5, relabel=True) >>> segm.data array([[1, 1, 0, 0, 3, 3], [0, 0, 0, 0, 0, 3], [0, 0, 2, 2, 0, 0], [4, 0, 0, 0, 0, 0], [4, 4, 0, 0, 0, 0], [4, 4, 0, 0, 0, 0]]) """ self.remove_labels(label, relabel=relabel)
python
def remove_label(self, label, relabel=False): """ Remove the label number. The removed label is assigned a value of zero (i.e., background). Parameters ---------- label : int The label number to remove. relabel : bool, optional If `True`, then the segmentation image will be relabeled such that the labels are in consecutive order starting from 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_label(label=5) >>> segm.data array([[1, 1, 0, 0, 4, 4], [0, 0, 0, 0, 0, 4], [0, 0, 3, 3, 0, 0], [7, 0, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_label(label=5, relabel=True) >>> segm.data array([[1, 1, 0, 0, 3, 3], [0, 0, 0, 0, 0, 3], [0, 0, 2, 2, 0, 0], [4, 0, 0, 0, 0, 0], [4, 4, 0, 0, 0, 0], [4, 4, 0, 0, 0, 0]]) """ self.remove_labels(label, relabel=relabel)
[ "def", "remove_label", "(", "self", ",", "label", ",", "relabel", "=", "False", ")", ":", "self", ".", "remove_labels", "(", "label", ",", "relabel", "=", "relabel", ")" ]
Remove the label number. The removed label is assigned a value of zero (i.e., background). Parameters ---------- label : int The label number to remove. relabel : bool, optional If `True`, then the segmentation image will be relabeled such that the labels are in consecutive order starting from 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_label(label=5) >>> segm.data array([[1, 1, 0, 0, 4, 4], [0, 0, 0, 0, 0, 4], [0, 0, 3, 3, 0, 0], [7, 0, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_label(label=5, relabel=True) >>> segm.data array([[1, 1, 0, 0, 3, 3], [0, 0, 0, 0, 0, 3], [0, 0, 2, 2, 0, 0], [4, 0, 0, 0, 0, 0], [4, 4, 0, 0, 0, 0], [4, 4, 0, 0, 0, 0]])
[ "Remove", "the", "label", "number", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/core.py#L912-L963
10,598
astropy/photutils
photutils/segmentation/core.py
SegmentationImage.remove_labels
def remove_labels(self, labels, relabel=False): """ Remove one or more labels. Removed labels are assigned a value of zero (i.e., background). Parameters ---------- labels : int, array-like (1D, int) The label number(s) to remove. relabel : bool, optional If `True`, then the segmentation image will be relabeled such that the labels are in consecutive order starting from 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_labels(labels=[5, 3]) >>> segm.data array([[1, 1, 0, 0, 4, 4], [0, 0, 0, 0, 0, 4], [0, 0, 0, 0, 0, 0], [7, 0, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_labels(labels=[5, 3], relabel=True) >>> segm.data array([[1, 1, 0, 0, 2, 2], [0, 0, 0, 0, 0, 2], [0, 0, 0, 0, 0, 0], [3, 0, 0, 0, 0, 0], [3, 3, 0, 0, 0, 0], [3, 3, 0, 0, 0, 0]]) """ self.check_labels(labels) self.reassign_label(labels, new_label=0) if relabel: self.relabel_consecutive()
python
def remove_labels(self, labels, relabel=False): """ Remove one or more labels. Removed labels are assigned a value of zero (i.e., background). Parameters ---------- labels : int, array-like (1D, int) The label number(s) to remove. relabel : bool, optional If `True`, then the segmentation image will be relabeled such that the labels are in consecutive order starting from 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_labels(labels=[5, 3]) >>> segm.data array([[1, 1, 0, 0, 4, 4], [0, 0, 0, 0, 0, 4], [0, 0, 0, 0, 0, 0], [7, 0, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_labels(labels=[5, 3], relabel=True) >>> segm.data array([[1, 1, 0, 0, 2, 2], [0, 0, 0, 0, 0, 2], [0, 0, 0, 0, 0, 0], [3, 0, 0, 0, 0, 0], [3, 3, 0, 0, 0, 0], [3, 3, 0, 0, 0, 0]]) """ self.check_labels(labels) self.reassign_label(labels, new_label=0) if relabel: self.relabel_consecutive()
[ "def", "remove_labels", "(", "self", ",", "labels", ",", "relabel", "=", "False", ")", ":", "self", ".", "check_labels", "(", "labels", ")", "self", ".", "reassign_label", "(", "labels", ",", "new_label", "=", "0", ")", "if", "relabel", ":", "self", ".", "relabel_consecutive", "(", ")" ]
Remove one or more labels. Removed labels are assigned a value of zero (i.e., background). Parameters ---------- labels : int, array-like (1D, int) The label number(s) to remove. relabel : bool, optional If `True`, then the segmentation image will be relabeled such that the labels are in consecutive order starting from 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_labels(labels=[5, 3]) >>> segm.data array([[1, 1, 0, 0, 4, 4], [0, 0, 0, 0, 0, 4], [0, 0, 0, 0, 0, 0], [7, 0, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_labels(labels=[5, 3], relabel=True) >>> segm.data array([[1, 1, 0, 0, 2, 2], [0, 0, 0, 0, 0, 2], [0, 0, 0, 0, 0, 0], [3, 0, 0, 0, 0, 0], [3, 3, 0, 0, 0, 0], [3, 3, 0, 0, 0, 0]])
[ "Remove", "one", "or", "more", "labels", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/core.py#L965-L1019
10,599
astropy/photutils
photutils/segmentation/core.py
SegmentationImage.remove_border_labels
def remove_border_labels(self, border_width, partial_overlap=True, relabel=False): """ Remove labeled segments near the image border. Labels within the defined border region will be removed. Parameters ---------- border_width : int The width of the border region in pixels. partial_overlap : bool, optional If this is set to `True` (the default), a segment that partially extends into the border region will be removed. Segments that are completely within the border region are always removed. relabel : bool, optional If `True`, then the segmentation image will be relabeled such that the labels are in consecutive order starting from 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_border_labels(border_width=1) >>> segm.data array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 3, 3, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_border_labels(border_width=1, ... partial_overlap=False) >>> segm.data array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 3, 3, 0, 0], [7, 0, 0, 0, 0, 5], [7, 7, 0, 5, 5, 5], [7, 7, 0, 0, 5, 5]]) """ if border_width >= min(self.shape) / 2: raise ValueError('border_width must be smaller than half the ' 'image size in either dimension') border = np.zeros(self.shape, dtype=np.bool) border[:border_width, :] = True border[-border_width:, :] = True border[:, :border_width] = True border[:, -border_width:] = True self.remove_masked_labels(border, partial_overlap=partial_overlap, relabel=relabel)
python
def remove_border_labels(self, border_width, partial_overlap=True, relabel=False): """ Remove labeled segments near the image border. Labels within the defined border region will be removed. Parameters ---------- border_width : int The width of the border region in pixels. partial_overlap : bool, optional If this is set to `True` (the default), a segment that partially extends into the border region will be removed. Segments that are completely within the border region are always removed. relabel : bool, optional If `True`, then the segmentation image will be relabeled such that the labels are in consecutive order starting from 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_border_labels(border_width=1) >>> segm.data array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 3, 3, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_border_labels(border_width=1, ... partial_overlap=False) >>> segm.data array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 3, 3, 0, 0], [7, 0, 0, 0, 0, 5], [7, 7, 0, 5, 5, 5], [7, 7, 0, 0, 5, 5]]) """ if border_width >= min(self.shape) / 2: raise ValueError('border_width must be smaller than half the ' 'image size in either dimension') border = np.zeros(self.shape, dtype=np.bool) border[:border_width, :] = True border[-border_width:, :] = True border[:, :border_width] = True border[:, -border_width:] = True self.remove_masked_labels(border, partial_overlap=partial_overlap, relabel=relabel)
[ "def", "remove_border_labels", "(", "self", ",", "border_width", ",", "partial_overlap", "=", "True", ",", "relabel", "=", "False", ")", ":", "if", "border_width", ">=", "min", "(", "self", ".", "shape", ")", "/", "2", ":", "raise", "ValueError", "(", "'border_width must be smaller than half the '", "'image size in either dimension'", ")", "border", "=", "np", ".", "zeros", "(", "self", ".", "shape", ",", "dtype", "=", "np", ".", "bool", ")", "border", "[", ":", "border_width", ",", ":", "]", "=", "True", "border", "[", "-", "border_width", ":", ",", ":", "]", "=", "True", "border", "[", ":", ",", ":", "border_width", "]", "=", "True", "border", "[", ":", ",", "-", "border_width", ":", "]", "=", "True", "self", ".", "remove_masked_labels", "(", "border", ",", "partial_overlap", "=", "partial_overlap", ",", "relabel", "=", "relabel", ")" ]
Remove labeled segments near the image border. Labels within the defined border region will be removed. Parameters ---------- border_width : int The width of the border region in pixels. partial_overlap : bool, optional If this is set to `True` (the default), a segment that partially extends into the border region will be removed. Segments that are completely within the border region are always removed. relabel : bool, optional If `True`, then the segmentation image will be relabeled such that the labels are in consecutive order starting from 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_border_labels(border_width=1) >>> segm.data array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 3, 3, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_border_labels(border_width=1, ... partial_overlap=False) >>> segm.data array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 3, 3, 0, 0], [7, 0, 0, 0, 0, 5], [7, 7, 0, 5, 5, 5], [7, 7, 0, 0, 5, 5]])
[ "Remove", "labeled", "segments", "near", "the", "image", "border", "." ]
cc9bb4534ab76bac98cb5f374a348a2573d10401
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/core.py#L1021-L1088