id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
10,600
|
astropy/photutils
|
photutils/segmentation/core.py
|
SegmentationImage.remove_masked_labels
|
def remove_masked_labels(self, mask, partial_overlap=True,
relabel=False):
"""
Remove labeled segments located within a masked region.
Parameters
----------
mask : array_like (bool)
A boolean mask, with the same shape as the segmentation
image, where `True` values indicate masked pixels.
partial_overlap : bool, optional
If this is set to `True` (default), a segment that partially
extends into a masked region will also be removed. Segments
that are completely within a masked region are always
removed.
relabel : bool, optional
If `True`, then the segmentation image will be relabeled
such that the labels are in consecutive order starting from
1.
Examples
--------
>>> from photutils import SegmentationImage
>>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4],
... [0, 0, 0, 0, 0, 4],
... [0, 0, 3, 3, 0, 0],
... [7, 0, 0, 0, 0, 5],
... [7, 7, 0, 5, 5, 5],
... [7, 7, 0, 0, 5, 5]])
>>> mask = np.zeros_like(segm.data, dtype=np.bool)
>>> mask[0, :] = True # mask the first row
>>> segm.remove_masked_labels(mask)
>>> segm.data
array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 3, 3, 0, 0],
[7, 0, 0, 0, 0, 5],
[7, 7, 0, 5, 5, 5],
[7, 7, 0, 0, 5, 5]])
>>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4],
... [0, 0, 0, 0, 0, 4],
... [0, 0, 3, 3, 0, 0],
... [7, 0, 0, 0, 0, 5],
... [7, 7, 0, 5, 5, 5],
... [7, 7, 0, 0, 5, 5]])
>>> segm.remove_masked_labels(mask, partial_overlap=False)
>>> segm.data
array([[0, 0, 0, 0, 4, 4],
[0, 0, 0, 0, 0, 4],
[0, 0, 3, 3, 0, 0],
[7, 0, 0, 0, 0, 5],
[7, 7, 0, 5, 5, 5],
[7, 7, 0, 0, 5, 5]])
"""
if mask.shape != self.shape:
raise ValueError('mask must have the same shape as the '
'segmentation image')
remove_labels = self._get_labels(self.data[mask])
if not partial_overlap:
interior_labels = self._get_labels(self.data[~mask])
remove_labels = list(set(remove_labels) - set(interior_labels))
self.remove_labels(remove_labels, relabel=relabel)
|
python
|
def remove_masked_labels(self, mask, partial_overlap=True,
relabel=False):
"""
Remove labeled segments located within a masked region.
Parameters
----------
mask : array_like (bool)
A boolean mask, with the same shape as the segmentation
image, where `True` values indicate masked pixels.
partial_overlap : bool, optional
If this is set to `True` (default), a segment that partially
extends into a masked region will also be removed. Segments
that are completely within a masked region are always
removed.
relabel : bool, optional
If `True`, then the segmentation image will be relabeled
such that the labels are in consecutive order starting from
1.
Examples
--------
>>> from photutils import SegmentationImage
>>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4],
... [0, 0, 0, 0, 0, 4],
... [0, 0, 3, 3, 0, 0],
... [7, 0, 0, 0, 0, 5],
... [7, 7, 0, 5, 5, 5],
... [7, 7, 0, 0, 5, 5]])
>>> mask = np.zeros_like(segm.data, dtype=np.bool)
>>> mask[0, :] = True # mask the first row
>>> segm.remove_masked_labels(mask)
>>> segm.data
array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 3, 3, 0, 0],
[7, 0, 0, 0, 0, 5],
[7, 7, 0, 5, 5, 5],
[7, 7, 0, 0, 5, 5]])
>>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4],
... [0, 0, 0, 0, 0, 4],
... [0, 0, 3, 3, 0, 0],
... [7, 0, 0, 0, 0, 5],
... [7, 7, 0, 5, 5, 5],
... [7, 7, 0, 0, 5, 5]])
>>> segm.remove_masked_labels(mask, partial_overlap=False)
>>> segm.data
array([[0, 0, 0, 0, 4, 4],
[0, 0, 0, 0, 0, 4],
[0, 0, 3, 3, 0, 0],
[7, 0, 0, 0, 0, 5],
[7, 7, 0, 5, 5, 5],
[7, 7, 0, 0, 5, 5]])
"""
if mask.shape != self.shape:
raise ValueError('mask must have the same shape as the '
'segmentation image')
remove_labels = self._get_labels(self.data[mask])
if not partial_overlap:
interior_labels = self._get_labels(self.data[~mask])
remove_labels = list(set(remove_labels) - set(interior_labels))
self.remove_labels(remove_labels, relabel=relabel)
|
[
"def",
"remove_masked_labels",
"(",
"self",
",",
"mask",
",",
"partial_overlap",
"=",
"True",
",",
"relabel",
"=",
"False",
")",
":",
"if",
"mask",
".",
"shape",
"!=",
"self",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"'mask must have the same shape as the '",
"'segmentation image'",
")",
"remove_labels",
"=",
"self",
".",
"_get_labels",
"(",
"self",
".",
"data",
"[",
"mask",
"]",
")",
"if",
"not",
"partial_overlap",
":",
"interior_labels",
"=",
"self",
".",
"_get_labels",
"(",
"self",
".",
"data",
"[",
"~",
"mask",
"]",
")",
"remove_labels",
"=",
"list",
"(",
"set",
"(",
"remove_labels",
")",
"-",
"set",
"(",
"interior_labels",
")",
")",
"self",
".",
"remove_labels",
"(",
"remove_labels",
",",
"relabel",
"=",
"relabel",
")"
] |
Remove labeled segments located within a masked region.
Parameters
----------
mask : array_like (bool)
A boolean mask, with the same shape as the segmentation
image, where `True` values indicate masked pixels.
partial_overlap : bool, optional
If this is set to `True` (default), a segment that partially
extends into a masked region will also be removed. Segments
that are completely within a masked region are always
removed.
relabel : bool, optional
If `True`, then the segmentation image will be relabeled
such that the labels are in consecutive order starting from
1.
Examples
--------
>>> from photutils import SegmentationImage
>>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4],
... [0, 0, 0, 0, 0, 4],
... [0, 0, 3, 3, 0, 0],
... [7, 0, 0, 0, 0, 5],
... [7, 7, 0, 5, 5, 5],
... [7, 7, 0, 0, 5, 5]])
>>> mask = np.zeros_like(segm.data, dtype=np.bool)
>>> mask[0, :] = True # mask the first row
>>> segm.remove_masked_labels(mask)
>>> segm.data
array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 3, 3, 0, 0],
[7, 0, 0, 0, 0, 5],
[7, 7, 0, 5, 5, 5],
[7, 7, 0, 0, 5, 5]])
>>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4],
... [0, 0, 0, 0, 0, 4],
... [0, 0, 3, 3, 0, 0],
... [7, 0, 0, 0, 0, 5],
... [7, 7, 0, 5, 5, 5],
... [7, 7, 0, 0, 5, 5]])
>>> segm.remove_masked_labels(mask, partial_overlap=False)
>>> segm.data
array([[0, 0, 0, 0, 4, 4],
[0, 0, 0, 0, 0, 4],
[0, 0, 3, 3, 0, 0],
[7, 0, 0, 0, 0, 5],
[7, 7, 0, 5, 5, 5],
[7, 7, 0, 0, 5, 5]])
|
[
"Remove",
"labeled",
"segments",
"located",
"within",
"a",
"masked",
"region",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/core.py#L1090-L1155
|
10,601
|
astropy/photutils
|
photutils/segmentation/core.py
|
SegmentationImage.outline_segments
|
def outline_segments(self, mask_background=False):
"""
Outline the labeled segments.
The "outlines" represent the pixels *just inside* the segments,
leaving the background pixels unmodified.
Parameters
----------
mask_background : bool, optional
Set to `True` to mask the background pixels (labels = 0) in
the returned image. This is useful for overplotting the
segment outlines on an image. The default is `False`.
Returns
-------
boundaries : 2D `~numpy.ndarray` or `~numpy.ma.MaskedArray`
An image with the same shape of the segmentation image
containing only the outlines of the labeled segments. The
pixel values in the outlines correspond to the labels in the
segmentation image. If ``mask_background`` is `True`, then
a `~numpy.ma.MaskedArray` is returned.
Examples
--------
>>> from photutils import SegmentationImage
>>> segm = SegmentationImage([[0, 0, 0, 0, 0, 0],
... [0, 2, 2, 2, 2, 0],
... [0, 2, 2, 2, 2, 0],
... [0, 2, 2, 2, 2, 0],
... [0, 2, 2, 2, 2, 0],
... [0, 0, 0, 0, 0, 0]])
>>> segm.outline_segments()
array([[0, 0, 0, 0, 0, 0],
[0, 2, 2, 2, 2, 0],
[0, 2, 0, 0, 2, 0],
[0, 2, 0, 0, 2, 0],
[0, 2, 2, 2, 2, 0],
[0, 0, 0, 0, 0, 0]])
"""
from scipy.ndimage import grey_erosion, grey_dilation
# mode='constant' ensures outline is included on the image borders
selem = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
eroded = grey_erosion(self.data, footprint=selem, mode='constant',
cval=0.)
dilated = grey_dilation(self.data, footprint=selem, mode='constant',
cval=0.)
outlines = ((dilated != eroded) & (self.data != 0)).astype(int)
outlines *= self.data
if mask_background:
outlines = np.ma.masked_where(outlines == 0, outlines)
return outlines
|
python
|
def outline_segments(self, mask_background=False):
"""
Outline the labeled segments.
The "outlines" represent the pixels *just inside* the segments,
leaving the background pixels unmodified.
Parameters
----------
mask_background : bool, optional
Set to `True` to mask the background pixels (labels = 0) in
the returned image. This is useful for overplotting the
segment outlines on an image. The default is `False`.
Returns
-------
boundaries : 2D `~numpy.ndarray` or `~numpy.ma.MaskedArray`
An image with the same shape of the segmentation image
containing only the outlines of the labeled segments. The
pixel values in the outlines correspond to the labels in the
segmentation image. If ``mask_background`` is `True`, then
a `~numpy.ma.MaskedArray` is returned.
Examples
--------
>>> from photutils import SegmentationImage
>>> segm = SegmentationImage([[0, 0, 0, 0, 0, 0],
... [0, 2, 2, 2, 2, 0],
... [0, 2, 2, 2, 2, 0],
... [0, 2, 2, 2, 2, 0],
... [0, 2, 2, 2, 2, 0],
... [0, 0, 0, 0, 0, 0]])
>>> segm.outline_segments()
array([[0, 0, 0, 0, 0, 0],
[0, 2, 2, 2, 2, 0],
[0, 2, 0, 0, 2, 0],
[0, 2, 0, 0, 2, 0],
[0, 2, 2, 2, 2, 0],
[0, 0, 0, 0, 0, 0]])
"""
from scipy.ndimage import grey_erosion, grey_dilation
# mode='constant' ensures outline is included on the image borders
selem = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
eroded = grey_erosion(self.data, footprint=selem, mode='constant',
cval=0.)
dilated = grey_dilation(self.data, footprint=selem, mode='constant',
cval=0.)
outlines = ((dilated != eroded) & (self.data != 0)).astype(int)
outlines *= self.data
if mask_background:
outlines = np.ma.masked_where(outlines == 0, outlines)
return outlines
|
[
"def",
"outline_segments",
"(",
"self",
",",
"mask_background",
"=",
"False",
")",
":",
"from",
"scipy",
".",
"ndimage",
"import",
"grey_erosion",
",",
"grey_dilation",
"# mode='constant' ensures outline is included on the image borders",
"selem",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"0",
",",
"1",
",",
"0",
"]",
",",
"[",
"1",
",",
"1",
",",
"1",
"]",
",",
"[",
"0",
",",
"1",
",",
"0",
"]",
"]",
")",
"eroded",
"=",
"grey_erosion",
"(",
"self",
".",
"data",
",",
"footprint",
"=",
"selem",
",",
"mode",
"=",
"'constant'",
",",
"cval",
"=",
"0.",
")",
"dilated",
"=",
"grey_dilation",
"(",
"self",
".",
"data",
",",
"footprint",
"=",
"selem",
",",
"mode",
"=",
"'constant'",
",",
"cval",
"=",
"0.",
")",
"outlines",
"=",
"(",
"(",
"dilated",
"!=",
"eroded",
")",
"&",
"(",
"self",
".",
"data",
"!=",
"0",
")",
")",
".",
"astype",
"(",
"int",
")",
"outlines",
"*=",
"self",
".",
"data",
"if",
"mask_background",
":",
"outlines",
"=",
"np",
".",
"ma",
".",
"masked_where",
"(",
"outlines",
"==",
"0",
",",
"outlines",
")",
"return",
"outlines"
] |
Outline the labeled segments.
The "outlines" represent the pixels *just inside* the segments,
leaving the background pixels unmodified.
Parameters
----------
mask_background : bool, optional
Set to `True` to mask the background pixels (labels = 0) in
the returned image. This is useful for overplotting the
segment outlines on an image. The default is `False`.
Returns
-------
boundaries : 2D `~numpy.ndarray` or `~numpy.ma.MaskedArray`
An image with the same shape of the segmentation image
containing only the outlines of the labeled segments. The
pixel values in the outlines correspond to the labels in the
segmentation image. If ``mask_background`` is `True`, then
a `~numpy.ma.MaskedArray` is returned.
Examples
--------
>>> from photutils import SegmentationImage
>>> segm = SegmentationImage([[0, 0, 0, 0, 0, 0],
... [0, 2, 2, 2, 2, 0],
... [0, 2, 2, 2, 2, 0],
... [0, 2, 2, 2, 2, 0],
... [0, 2, 2, 2, 2, 0],
... [0, 0, 0, 0, 0, 0]])
>>> segm.outline_segments()
array([[0, 0, 0, 0, 0, 0],
[0, 2, 2, 2, 2, 0],
[0, 2, 0, 0, 2, 0],
[0, 2, 0, 0, 2, 0],
[0, 2, 2, 2, 2, 0],
[0, 0, 0, 0, 0, 0]])
|
[
"Outline",
"the",
"labeled",
"segments",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/core.py#L1157-L1213
|
10,602
|
astropy/photutils
|
photutils/aperture/mask.py
|
ApertureMask._overlap_slices
|
def _overlap_slices(self, shape):
"""
Calculate the slices for the overlapping part of the bounding
box and an array of the given shape.
Parameters
----------
shape : tuple of int
The ``(ny, nx)`` shape of array where the slices are to be
applied.
Returns
-------
slices_large : tuple of slices
A tuple of slice objects for each axis of the large array,
such that ``large_array[slices_large]`` extracts the region
of the large array that overlaps with the small array.
slices_small : slice
A tuple of slice objects for each axis of the small array,
such that ``small_array[slices_small]`` extracts the region
of the small array that is inside the large array.
"""
if len(shape) != 2:
raise ValueError('input shape must have 2 elements.')
xmin = self.bbox.ixmin
xmax = self.bbox.ixmax
ymin = self.bbox.iymin
ymax = self.bbox.iymax
if xmin >= shape[1] or ymin >= shape[0] or xmax <= 0 or ymax <= 0:
# no overlap of the aperture with the data
return None, None
slices_large = (slice(max(ymin, 0), min(ymax, shape[0])),
slice(max(xmin, 0), min(xmax, shape[1])))
slices_small = (slice(max(-ymin, 0),
min(ymax - ymin, shape[0] - ymin)),
slice(max(-xmin, 0),
min(xmax - xmin, shape[1] - xmin)))
return slices_large, slices_small
|
python
|
def _overlap_slices(self, shape):
"""
Calculate the slices for the overlapping part of the bounding
box and an array of the given shape.
Parameters
----------
shape : tuple of int
The ``(ny, nx)`` shape of array where the slices are to be
applied.
Returns
-------
slices_large : tuple of slices
A tuple of slice objects for each axis of the large array,
such that ``large_array[slices_large]`` extracts the region
of the large array that overlaps with the small array.
slices_small : slice
A tuple of slice objects for each axis of the small array,
such that ``small_array[slices_small]`` extracts the region
of the small array that is inside the large array.
"""
if len(shape) != 2:
raise ValueError('input shape must have 2 elements.')
xmin = self.bbox.ixmin
xmax = self.bbox.ixmax
ymin = self.bbox.iymin
ymax = self.bbox.iymax
if xmin >= shape[1] or ymin >= shape[0] or xmax <= 0 or ymax <= 0:
# no overlap of the aperture with the data
return None, None
slices_large = (slice(max(ymin, 0), min(ymax, shape[0])),
slice(max(xmin, 0), min(xmax, shape[1])))
slices_small = (slice(max(-ymin, 0),
min(ymax - ymin, shape[0] - ymin)),
slice(max(-xmin, 0),
min(xmax - xmin, shape[1] - xmin)))
return slices_large, slices_small
|
[
"def",
"_overlap_slices",
"(",
"self",
",",
"shape",
")",
":",
"if",
"len",
"(",
"shape",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'input shape must have 2 elements.'",
")",
"xmin",
"=",
"self",
".",
"bbox",
".",
"ixmin",
"xmax",
"=",
"self",
".",
"bbox",
".",
"ixmax",
"ymin",
"=",
"self",
".",
"bbox",
".",
"iymin",
"ymax",
"=",
"self",
".",
"bbox",
".",
"iymax",
"if",
"xmin",
">=",
"shape",
"[",
"1",
"]",
"or",
"ymin",
">=",
"shape",
"[",
"0",
"]",
"or",
"xmax",
"<=",
"0",
"or",
"ymax",
"<=",
"0",
":",
"# no overlap of the aperture with the data",
"return",
"None",
",",
"None",
"slices_large",
"=",
"(",
"slice",
"(",
"max",
"(",
"ymin",
",",
"0",
")",
",",
"min",
"(",
"ymax",
",",
"shape",
"[",
"0",
"]",
")",
")",
",",
"slice",
"(",
"max",
"(",
"xmin",
",",
"0",
")",
",",
"min",
"(",
"xmax",
",",
"shape",
"[",
"1",
"]",
")",
")",
")",
"slices_small",
"=",
"(",
"slice",
"(",
"max",
"(",
"-",
"ymin",
",",
"0",
")",
",",
"min",
"(",
"ymax",
"-",
"ymin",
",",
"shape",
"[",
"0",
"]",
"-",
"ymin",
")",
")",
",",
"slice",
"(",
"max",
"(",
"-",
"xmin",
",",
"0",
")",
",",
"min",
"(",
"xmax",
"-",
"xmin",
",",
"shape",
"[",
"1",
"]",
"-",
"xmin",
")",
")",
")",
"return",
"slices_large",
",",
"slices_small"
] |
Calculate the slices for the overlapping part of the bounding
box and an array of the given shape.
Parameters
----------
shape : tuple of int
The ``(ny, nx)`` shape of array where the slices are to be
applied.
Returns
-------
slices_large : tuple of slices
A tuple of slice objects for each axis of the large array,
such that ``large_array[slices_large]`` extracts the region
of the large array that overlaps with the small array.
slices_small : slice
A tuple of slice objects for each axis of the small array,
such that ``small_array[slices_small]`` extracts the region
of the small array that is inside the large array.
|
[
"Calculate",
"the",
"slices",
"for",
"the",
"overlapping",
"part",
"of",
"the",
"bounding",
"box",
"and",
"an",
"array",
"of",
"the",
"given",
"shape",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/aperture/mask.py#L49-L93
|
10,603
|
astropy/photutils
|
photutils/aperture/mask.py
|
ApertureMask.to_image
|
def to_image(self, shape):
"""
Return an image of the mask in a 2D array of the given shape,
taking any edge effects into account.
Parameters
----------
shape : tuple of int
The ``(ny, nx)`` shape of the output array.
Returns
-------
result : `~numpy.ndarray`
A 2D array of the mask.
"""
if len(shape) != 2:
raise ValueError('input shape must have 2 elements.')
image = np.zeros(shape)
if self.bbox.ixmin < 0 or self.bbox.iymin < 0:
return self._to_image_partial_overlap(image)
try:
image[self.bbox.slices] = self.data
except ValueError: # partial or no overlap
image = self._to_image_partial_overlap(image)
return image
|
python
|
def to_image(self, shape):
"""
Return an image of the mask in a 2D array of the given shape,
taking any edge effects into account.
Parameters
----------
shape : tuple of int
The ``(ny, nx)`` shape of the output array.
Returns
-------
result : `~numpy.ndarray`
A 2D array of the mask.
"""
if len(shape) != 2:
raise ValueError('input shape must have 2 elements.')
image = np.zeros(shape)
if self.bbox.ixmin < 0 or self.bbox.iymin < 0:
return self._to_image_partial_overlap(image)
try:
image[self.bbox.slices] = self.data
except ValueError: # partial or no overlap
image = self._to_image_partial_overlap(image)
return image
|
[
"def",
"to_image",
"(",
"self",
",",
"shape",
")",
":",
"if",
"len",
"(",
"shape",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'input shape must have 2 elements.'",
")",
"image",
"=",
"np",
".",
"zeros",
"(",
"shape",
")",
"if",
"self",
".",
"bbox",
".",
"ixmin",
"<",
"0",
"or",
"self",
".",
"bbox",
".",
"iymin",
"<",
"0",
":",
"return",
"self",
".",
"_to_image_partial_overlap",
"(",
"image",
")",
"try",
":",
"image",
"[",
"self",
".",
"bbox",
".",
"slices",
"]",
"=",
"self",
".",
"data",
"except",
"ValueError",
":",
"# partial or no overlap",
"image",
"=",
"self",
".",
"_to_image_partial_overlap",
"(",
"image",
")",
"return",
"image"
] |
Return an image of the mask in a 2D array of the given shape,
taking any edge effects into account.
Parameters
----------
shape : tuple of int
The ``(ny, nx)`` shape of the output array.
Returns
-------
result : `~numpy.ndarray`
A 2D array of the mask.
|
[
"Return",
"an",
"image",
"of",
"the",
"mask",
"in",
"a",
"2D",
"array",
"of",
"the",
"given",
"shape",
"taking",
"any",
"edge",
"effects",
"into",
"account",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/aperture/mask.py#L112-L141
|
10,604
|
astropy/photutils
|
photutils/aperture/mask.py
|
ApertureMask.cutout
|
def cutout(self, data, fill_value=0., copy=False):
"""
Create a cutout from the input data over the mask bounding box,
taking any edge effects into account.
Parameters
----------
data : array_like
A 2D array on which to apply the aperture mask.
fill_value : float, optional
The value is used to fill pixels where the aperture mask
does not overlap with the input ``data``. The default is 0.
copy : bool, optional
If `True` then the returned cutout array will always be hold
a copy of the input ``data``. If `False` and the mask is
fully within the input ``data``, then the returned cutout
array will be a view into the input ``data``. In cases
where the mask partially overlaps or has no overlap with the
input ``data``, the returned cutout array will always hold a
copy of the input ``data`` (i.e. this keyword has no
effect).
Returns
-------
result : `~numpy.ndarray`
A 2D array cut out from the input ``data`` representing the
same cutout region as the aperture mask. If there is a
partial overlap of the aperture mask with the input data,
pixels outside of the data will be assigned to
``fill_value``. `None` is returned if there is no overlap
of the aperture with the input ``data``.
"""
data = np.asanyarray(data)
if data.ndim != 2:
raise ValueError('data must be a 2D array.')
partial_overlap = False
if self.bbox.ixmin < 0 or self.bbox.iymin < 0:
partial_overlap = True
if not partial_overlap:
# try this for speed -- the result may still be a partial
# overlap, in which case the next block will be triggered
if copy:
cutout = np.copy(data[self.bbox.slices])
else:
cutout = data[self.bbox.slices]
if partial_overlap or (cutout.shape != self.shape):
slices_large, slices_small = self._overlap_slices(data.shape)
if slices_small is None:
return None # no overlap
# cutout is a copy
cutout = np.zeros(self.shape, dtype=data.dtype)
cutout[:] = fill_value
cutout[slices_small] = data[slices_large]
if isinstance(data, u.Quantity):
cutout = u.Quantity(cutout, unit=data.unit)
return cutout
|
python
|
def cutout(self, data, fill_value=0., copy=False):
"""
Create a cutout from the input data over the mask bounding box,
taking any edge effects into account.
Parameters
----------
data : array_like
A 2D array on which to apply the aperture mask.
fill_value : float, optional
The value is used to fill pixels where the aperture mask
does not overlap with the input ``data``. The default is 0.
copy : bool, optional
If `True` then the returned cutout array will always be hold
a copy of the input ``data``. If `False` and the mask is
fully within the input ``data``, then the returned cutout
array will be a view into the input ``data``. In cases
where the mask partially overlaps or has no overlap with the
input ``data``, the returned cutout array will always hold a
copy of the input ``data`` (i.e. this keyword has no
effect).
Returns
-------
result : `~numpy.ndarray`
A 2D array cut out from the input ``data`` representing the
same cutout region as the aperture mask. If there is a
partial overlap of the aperture mask with the input data,
pixels outside of the data will be assigned to
``fill_value``. `None` is returned if there is no overlap
of the aperture with the input ``data``.
"""
data = np.asanyarray(data)
if data.ndim != 2:
raise ValueError('data must be a 2D array.')
partial_overlap = False
if self.bbox.ixmin < 0 or self.bbox.iymin < 0:
partial_overlap = True
if not partial_overlap:
# try this for speed -- the result may still be a partial
# overlap, in which case the next block will be triggered
if copy:
cutout = np.copy(data[self.bbox.slices])
else:
cutout = data[self.bbox.slices]
if partial_overlap or (cutout.shape != self.shape):
slices_large, slices_small = self._overlap_slices(data.shape)
if slices_small is None:
return None # no overlap
# cutout is a copy
cutout = np.zeros(self.shape, dtype=data.dtype)
cutout[:] = fill_value
cutout[slices_small] = data[slices_large]
if isinstance(data, u.Quantity):
cutout = u.Quantity(cutout, unit=data.unit)
return cutout
|
[
"def",
"cutout",
"(",
"self",
",",
"data",
",",
"fill_value",
"=",
"0.",
",",
"copy",
"=",
"False",
")",
":",
"data",
"=",
"np",
".",
"asanyarray",
"(",
"data",
")",
"if",
"data",
".",
"ndim",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'data must be a 2D array.'",
")",
"partial_overlap",
"=",
"False",
"if",
"self",
".",
"bbox",
".",
"ixmin",
"<",
"0",
"or",
"self",
".",
"bbox",
".",
"iymin",
"<",
"0",
":",
"partial_overlap",
"=",
"True",
"if",
"not",
"partial_overlap",
":",
"# try this for speed -- the result may still be a partial",
"# overlap, in which case the next block will be triggered",
"if",
"copy",
":",
"cutout",
"=",
"np",
".",
"copy",
"(",
"data",
"[",
"self",
".",
"bbox",
".",
"slices",
"]",
")",
"else",
":",
"cutout",
"=",
"data",
"[",
"self",
".",
"bbox",
".",
"slices",
"]",
"if",
"partial_overlap",
"or",
"(",
"cutout",
".",
"shape",
"!=",
"self",
".",
"shape",
")",
":",
"slices_large",
",",
"slices_small",
"=",
"self",
".",
"_overlap_slices",
"(",
"data",
".",
"shape",
")",
"if",
"slices_small",
"is",
"None",
":",
"return",
"None",
"# no overlap",
"# cutout is a copy",
"cutout",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"shape",
",",
"dtype",
"=",
"data",
".",
"dtype",
")",
"cutout",
"[",
":",
"]",
"=",
"fill_value",
"cutout",
"[",
"slices_small",
"]",
"=",
"data",
"[",
"slices_large",
"]",
"if",
"isinstance",
"(",
"data",
",",
"u",
".",
"Quantity",
")",
":",
"cutout",
"=",
"u",
".",
"Quantity",
"(",
"cutout",
",",
"unit",
"=",
"data",
".",
"unit",
")",
"return",
"cutout"
] |
Create a cutout from the input data over the mask bounding box,
taking any edge effects into account.
Parameters
----------
data : array_like
A 2D array on which to apply the aperture mask.
fill_value : float, optional
The value is used to fill pixels where the aperture mask
does not overlap with the input ``data``. The default is 0.
copy : bool, optional
If `True` then the returned cutout array will always be hold
a copy of the input ``data``. If `False` and the mask is
fully within the input ``data``, then the returned cutout
array will be a view into the input ``data``. In cases
where the mask partially overlaps or has no overlap with the
input ``data``, the returned cutout array will always hold a
copy of the input ``data`` (i.e. this keyword has no
effect).
Returns
-------
result : `~numpy.ndarray`
A 2D array cut out from the input ``data`` representing the
same cutout region as the aperture mask. If there is a
partial overlap of the aperture mask with the input data,
pixels outside of the data will be assigned to
``fill_value``. `None` is returned if there is no overlap
of the aperture with the input ``data``.
|
[
"Create",
"a",
"cutout",
"from",
"the",
"input",
"data",
"over",
"the",
"mask",
"bounding",
"box",
"taking",
"any",
"edge",
"effects",
"into",
"account",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/aperture/mask.py#L143-L208
|
10,605
|
astropy/photutils
|
photutils/aperture/mask.py
|
ApertureMask.multiply
|
def multiply(self, data, fill_value=0.):
"""
Multiply the aperture mask with the input data, taking any edge
effects into account.
The result is a mask-weighted cutout from the data.
Parameters
----------
data : array_like or `~astropy.units.Quantity`
The 2D array to multiply with the aperture mask.
fill_value : float, optional
The value is used to fill pixels where the aperture mask
does not overlap with the input ``data``. The default is 0.
Returns
-------
result : `~numpy.ndarray`
A 2D mask-weighted cutout from the input ``data``. If there
is a partial overlap of the aperture mask with the input
data, pixels outside of the data will be assigned to
``fill_value`` before being multipled with the mask. `None`
is returned if there is no overlap of the aperture with the
input ``data``.
"""
cutout = self.cutout(data, fill_value=fill_value)
if cutout is None:
return None
else:
return cutout * self.data
|
python
|
def multiply(self, data, fill_value=0.):
"""
Multiply the aperture mask with the input data, taking any edge
effects into account.
The result is a mask-weighted cutout from the data.
Parameters
----------
data : array_like or `~astropy.units.Quantity`
The 2D array to multiply with the aperture mask.
fill_value : float, optional
The value is used to fill pixels where the aperture mask
does not overlap with the input ``data``. The default is 0.
Returns
-------
result : `~numpy.ndarray`
A 2D mask-weighted cutout from the input ``data``. If there
is a partial overlap of the aperture mask with the input
data, pixels outside of the data will be assigned to
``fill_value`` before being multipled with the mask. `None`
is returned if there is no overlap of the aperture with the
input ``data``.
"""
cutout = self.cutout(data, fill_value=fill_value)
if cutout is None:
return None
else:
return cutout * self.data
|
[
"def",
"multiply",
"(",
"self",
",",
"data",
",",
"fill_value",
"=",
"0.",
")",
":",
"cutout",
"=",
"self",
".",
"cutout",
"(",
"data",
",",
"fill_value",
"=",
"fill_value",
")",
"if",
"cutout",
"is",
"None",
":",
"return",
"None",
"else",
":",
"return",
"cutout",
"*",
"self",
".",
"data"
] |
Multiply the aperture mask with the input data, taking any edge
effects into account.
The result is a mask-weighted cutout from the data.
Parameters
----------
data : array_like or `~astropy.units.Quantity`
The 2D array to multiply with the aperture mask.
fill_value : float, optional
The value is used to fill pixels where the aperture mask
does not overlap with the input ``data``. The default is 0.
Returns
-------
result : `~numpy.ndarray`
A 2D mask-weighted cutout from the input ``data``. If there
is a partial overlap of the aperture mask with the input
data, pixels outside of the data will be assigned to
``fill_value`` before being multipled with the mask. `None`
is returned if there is no overlap of the aperture with the
input ``data``.
|
[
"Multiply",
"the",
"aperture",
"mask",
"with",
"the",
"input",
"data",
"taking",
"any",
"edge",
"effects",
"into",
"account",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/aperture/mask.py#L210-L241
|
10,606
|
astropy/photutils
|
photutils/segmentation/deblend.py
|
deblend_sources
|
def deblend_sources(data, segment_img, npixels, filter_kernel=None,
labels=None, nlevels=32, contrast=0.001,
mode='exponential', connectivity=8, relabel=True):
"""
Deblend overlapping sources labeled in a segmentation image.
Sources are deblended using a combination of multi-thresholding and
`watershed segmentation
<https://en.wikipedia.org/wiki/Watershed_(image_processing)>`_. In
order to deblend sources, they must be separated enough such that
there is a saddle between them.
Parameters
----------
data : array_like
The 2D array of the image.
segment_img : `~photutils.segmentation.SegmentationImage` or array_like (int)
A 2D segmentation image, either as a
`~photutils.segmentation.SegmentationImage` object or an
`~numpy.ndarray`, with the same shape as ``data`` where sources
are labeled by different positive integer values. A value of
zero is reserved for the background.
npixels : int
The number of connected pixels, each greater than ``threshold``,
that an object must have to be detected. ``npixels`` must be a
positive integer.
filter_kernel : array-like (2D) or `~astropy.convolution.Kernel2D`, optional
The 2D array of the kernel used to filter the image before
thresholding. Filtering the image will smooth the noise and
maximize detectability of objects with a shape similar to the
kernel.
labels : int or array-like of int, optional
The label numbers to deblend. If `None` (default), then all
labels in the segmentation image will be deblended.
nlevels : int, optional
The number of multi-thresholding levels to use. Each source
will be re-thresholded at ``nlevels``, spaced exponentially or
linearly (see the ``mode`` keyword), between its minimum and
maximum values within the source segment.
contrast : float, optional
The fraction of the total (blended) source flux that a local
peak must have to be considered as a separate object.
``contrast`` must be between 0 and 1, inclusive. If ``contrast
= 0`` then every local peak will be made a separate object
(maximum deblending). If ``contrast = 1`` then no deblending
will occur. The default is 0.001, which will deblend sources
with a magnitude difference of about 7.5.
mode : {'exponential', 'linear'}, optional
The mode used in defining the spacing between the
multi-thresholding levels (see the ``nlevels`` keyword). The
default is 'exponential'.
connectivity : {8, 4}, optional
The type of pixel connectivity used in determining how pixels
are grouped into a detected source. The options are 8 (default)
or 4. 8-connected pixels touch along their edges or corners.
4-connected pixels touch along their edges. For reference,
SExtractor uses 8-connected pixels.
relabel : bool
If `True` (default), then the segmentation image will be
relabeled such that the labels are in consecutive order starting
from 1.
Returns
-------
segment_image : `~photutils.segmentation.SegmentationImage`
A 2D segmentation image, with the same shape as ``data``, where
sources are marked by different positive integer values. A
value of zero is reserved for the background.
See Also
--------
:func:`photutils.detect_sources`
"""
if not isinstance(segment_img, SegmentationImage):
segment_img = SegmentationImage(segment_img)
if segment_img.shape != data.shape:
raise ValueError('The data and segmentation image must have '
'the same shape')
if labels is None:
labels = segment_img.labels
labels = np.atleast_1d(labels)
segment_img.check_labels(labels)
data = filter_data(data, filter_kernel, mode='constant', fill_value=0.0)
last_label = segment_img.max_label
segm_deblended = deepcopy(segment_img)
for label in labels:
source_slice = segment_img.slices[segment_img.get_index(label)]
source_data = data[source_slice]
source_segm = SegmentationImage(np.copy(
segment_img.data[source_slice]))
source_segm.keep_labels(label) # include only one label
source_deblended = _deblend_source(
source_data, source_segm, npixels, nlevels=nlevels,
contrast=contrast, mode=mode, connectivity=connectivity)
if not np.array_equal(source_deblended.data.astype(bool),
source_segm.data.astype(bool)):
raise ValueError('Deblending failed for source "{0}". Please '
'ensure you used the same pixel connectivity '
'in detect_sources and deblend_sources. If '
'this issue persists, then please inform the '
'developers.'.format(label))
if source_deblended.nlabels > 1:
# replace the original source with the deblended source
source_mask = (source_deblended.data > 0)
segm_tmp = segm_deblended.data
segm_tmp[source_slice][source_mask] = (
source_deblended.data[source_mask] + last_label)
segm_deblended.data = segm_tmp # needed to call data setter
last_label += source_deblended.nlabels
if relabel:
segm_deblended.relabel_consecutive()
return segm_deblended
|
python
|
def deblend_sources(data, segment_img, npixels, filter_kernel=None,
labels=None, nlevels=32, contrast=0.001,
mode='exponential', connectivity=8, relabel=True):
"""
Deblend overlapping sources labeled in a segmentation image.
Sources are deblended using a combination of multi-thresholding and
`watershed segmentation
<https://en.wikipedia.org/wiki/Watershed_(image_processing)>`_. In
order to deblend sources, they must be separated enough such that
there is a saddle between them.
Parameters
----------
data : array_like
The 2D array of the image.
segment_img : `~photutils.segmentation.SegmentationImage` or array_like (int)
A 2D segmentation image, either as a
`~photutils.segmentation.SegmentationImage` object or an
`~numpy.ndarray`, with the same shape as ``data`` where sources
are labeled by different positive integer values. A value of
zero is reserved for the background.
npixels : int
The number of connected pixels, each greater than ``threshold``,
that an object must have to be detected. ``npixels`` must be a
positive integer.
filter_kernel : array-like (2D) or `~astropy.convolution.Kernel2D`, optional
The 2D array of the kernel used to filter the image before
thresholding. Filtering the image will smooth the noise and
maximize detectability of objects with a shape similar to the
kernel.
labels : int or array-like of int, optional
The label numbers to deblend. If `None` (default), then all
labels in the segmentation image will be deblended.
nlevels : int, optional
The number of multi-thresholding levels to use. Each source
will be re-thresholded at ``nlevels``, spaced exponentially or
linearly (see the ``mode`` keyword), between its minimum and
maximum values within the source segment.
contrast : float, optional
The fraction of the total (blended) source flux that a local
peak must have to be considered as a separate object.
``contrast`` must be between 0 and 1, inclusive. If ``contrast
= 0`` then every local peak will be made a separate object
(maximum deblending). If ``contrast = 1`` then no deblending
will occur. The default is 0.001, which will deblend sources
with a magnitude difference of about 7.5.
mode : {'exponential', 'linear'}, optional
The mode used in defining the spacing between the
multi-thresholding levels (see the ``nlevels`` keyword). The
default is 'exponential'.
connectivity : {8, 4}, optional
The type of pixel connectivity used in determining how pixels
are grouped into a detected source. The options are 8 (default)
or 4. 8-connected pixels touch along their edges or corners.
4-connected pixels touch along their edges. For reference,
SExtractor uses 8-connected pixels.
relabel : bool
If `True` (default), then the segmentation image will be
relabeled such that the labels are in consecutive order starting
from 1.
Returns
-------
segment_image : `~photutils.segmentation.SegmentationImage`
A 2D segmentation image, with the same shape as ``data``, where
sources are marked by different positive integer values. A
value of zero is reserved for the background.
See Also
--------
:func:`photutils.detect_sources`
"""
if not isinstance(segment_img, SegmentationImage):
segment_img = SegmentationImage(segment_img)
if segment_img.shape != data.shape:
raise ValueError('The data and segmentation image must have '
'the same shape')
if labels is None:
labels = segment_img.labels
labels = np.atleast_1d(labels)
segment_img.check_labels(labels)
data = filter_data(data, filter_kernel, mode='constant', fill_value=0.0)
last_label = segment_img.max_label
segm_deblended = deepcopy(segment_img)
for label in labels:
source_slice = segment_img.slices[segment_img.get_index(label)]
source_data = data[source_slice]
source_segm = SegmentationImage(np.copy(
segment_img.data[source_slice]))
source_segm.keep_labels(label) # include only one label
source_deblended = _deblend_source(
source_data, source_segm, npixels, nlevels=nlevels,
contrast=contrast, mode=mode, connectivity=connectivity)
if not np.array_equal(source_deblended.data.astype(bool),
source_segm.data.astype(bool)):
raise ValueError('Deblending failed for source "{0}". Please '
'ensure you used the same pixel connectivity '
'in detect_sources and deblend_sources. If '
'this issue persists, then please inform the '
'developers.'.format(label))
if source_deblended.nlabels > 1:
# replace the original source with the deblended source
source_mask = (source_deblended.data > 0)
segm_tmp = segm_deblended.data
segm_tmp[source_slice][source_mask] = (
source_deblended.data[source_mask] + last_label)
segm_deblended.data = segm_tmp # needed to call data setter
last_label += source_deblended.nlabels
if relabel:
segm_deblended.relabel_consecutive()
return segm_deblended
|
[
"def",
"deblend_sources",
"(",
"data",
",",
"segment_img",
",",
"npixels",
",",
"filter_kernel",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"nlevels",
"=",
"32",
",",
"contrast",
"=",
"0.001",
",",
"mode",
"=",
"'exponential'",
",",
"connectivity",
"=",
"8",
",",
"relabel",
"=",
"True",
")",
":",
"if",
"not",
"isinstance",
"(",
"segment_img",
",",
"SegmentationImage",
")",
":",
"segment_img",
"=",
"SegmentationImage",
"(",
"segment_img",
")",
"if",
"segment_img",
".",
"shape",
"!=",
"data",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"'The data and segmentation image must have '",
"'the same shape'",
")",
"if",
"labels",
"is",
"None",
":",
"labels",
"=",
"segment_img",
".",
"labels",
"labels",
"=",
"np",
".",
"atleast_1d",
"(",
"labels",
")",
"segment_img",
".",
"check_labels",
"(",
"labels",
")",
"data",
"=",
"filter_data",
"(",
"data",
",",
"filter_kernel",
",",
"mode",
"=",
"'constant'",
",",
"fill_value",
"=",
"0.0",
")",
"last_label",
"=",
"segment_img",
".",
"max_label",
"segm_deblended",
"=",
"deepcopy",
"(",
"segment_img",
")",
"for",
"label",
"in",
"labels",
":",
"source_slice",
"=",
"segment_img",
".",
"slices",
"[",
"segment_img",
".",
"get_index",
"(",
"label",
")",
"]",
"source_data",
"=",
"data",
"[",
"source_slice",
"]",
"source_segm",
"=",
"SegmentationImage",
"(",
"np",
".",
"copy",
"(",
"segment_img",
".",
"data",
"[",
"source_slice",
"]",
")",
")",
"source_segm",
".",
"keep_labels",
"(",
"label",
")",
"# include only one label",
"source_deblended",
"=",
"_deblend_source",
"(",
"source_data",
",",
"source_segm",
",",
"npixels",
",",
"nlevels",
"=",
"nlevels",
",",
"contrast",
"=",
"contrast",
",",
"mode",
"=",
"mode",
",",
"connectivity",
"=",
"connectivity",
")",
"if",
"not",
"np",
".",
"array_equal",
"(",
"source_deblended",
".",
"data",
".",
"astype",
"(",
"bool",
")",
",",
"source_segm",
".",
"data",
".",
"astype",
"(",
"bool",
")",
")",
":",
"raise",
"ValueError",
"(",
"'Deblending failed for source \"{0}\". Please '",
"'ensure you used the same pixel connectivity '",
"'in detect_sources and deblend_sources. If '",
"'this issue persists, then please inform the '",
"'developers.'",
".",
"format",
"(",
"label",
")",
")",
"if",
"source_deblended",
".",
"nlabels",
">",
"1",
":",
"# replace the original source with the deblended source",
"source_mask",
"=",
"(",
"source_deblended",
".",
"data",
">",
"0",
")",
"segm_tmp",
"=",
"segm_deblended",
".",
"data",
"segm_tmp",
"[",
"source_slice",
"]",
"[",
"source_mask",
"]",
"=",
"(",
"source_deblended",
".",
"data",
"[",
"source_mask",
"]",
"+",
"last_label",
")",
"segm_deblended",
".",
"data",
"=",
"segm_tmp",
"# needed to call data setter",
"last_label",
"+=",
"source_deblended",
".",
"nlabels",
"if",
"relabel",
":",
"segm_deblended",
".",
"relabel_consecutive",
"(",
")",
"return",
"segm_deblended"
] |
Deblend overlapping sources labeled in a segmentation image.
Sources are deblended using a combination of multi-thresholding and
`watershed segmentation
<https://en.wikipedia.org/wiki/Watershed_(image_processing)>`_. In
order to deblend sources, they must be separated enough such that
there is a saddle between them.
Parameters
----------
data : array_like
The 2D array of the image.
segment_img : `~photutils.segmentation.SegmentationImage` or array_like (int)
A 2D segmentation image, either as a
`~photutils.segmentation.SegmentationImage` object or an
`~numpy.ndarray`, with the same shape as ``data`` where sources
are labeled by different positive integer values. A value of
zero is reserved for the background.
npixels : int
The number of connected pixels, each greater than ``threshold``,
that an object must have to be detected. ``npixels`` must be a
positive integer.
filter_kernel : array-like (2D) or `~astropy.convolution.Kernel2D`, optional
The 2D array of the kernel used to filter the image before
thresholding. Filtering the image will smooth the noise and
maximize detectability of objects with a shape similar to the
kernel.
labels : int or array-like of int, optional
The label numbers to deblend. If `None` (default), then all
labels in the segmentation image will be deblended.
nlevels : int, optional
The number of multi-thresholding levels to use. Each source
will be re-thresholded at ``nlevels``, spaced exponentially or
linearly (see the ``mode`` keyword), between its minimum and
maximum values within the source segment.
contrast : float, optional
The fraction of the total (blended) source flux that a local
peak must have to be considered as a separate object.
``contrast`` must be between 0 and 1, inclusive. If ``contrast
= 0`` then every local peak will be made a separate object
(maximum deblending). If ``contrast = 1`` then no deblending
will occur. The default is 0.001, which will deblend sources
with a magnitude difference of about 7.5.
mode : {'exponential', 'linear'}, optional
The mode used in defining the spacing between the
multi-thresholding levels (see the ``nlevels`` keyword). The
default is 'exponential'.
connectivity : {8, 4}, optional
The type of pixel connectivity used in determining how pixels
are grouped into a detected source. The options are 8 (default)
or 4. 8-connected pixels touch along their edges or corners.
4-connected pixels touch along their edges. For reference,
SExtractor uses 8-connected pixels.
relabel : bool
If `True` (default), then the segmentation image will be
relabeled such that the labels are in consecutive order starting
from 1.
Returns
-------
segment_image : `~photutils.segmentation.SegmentationImage`
A 2D segmentation image, with the same shape as ``data``, where
sources are marked by different positive integer values. A
value of zero is reserved for the background.
See Also
--------
:func:`photutils.detect_sources`
|
[
"Deblend",
"overlapping",
"sources",
"labeled",
"in",
"a",
"segmentation",
"image",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/deblend.py#L18-L147
|
10,607
|
astropy/photutils
|
photutils/utils/_moments.py
|
_moments_central
|
def _moments_central(data, center=None, order=1):
"""
Calculate the central image moments up to the specified order.
Parameters
----------
data : 2D array-like
The input 2D array.
center : tuple of two floats or `None`, optional
The ``(x, y)`` center position. If `None` it will calculated as
the "center of mass" of the input ``data``.
order : int, optional
The maximum order of the moments to calculate.
Returns
-------
moments : 2D `~numpy.ndarray`
The central image moments.
"""
data = np.asarray(data).astype(float)
if data.ndim != 2:
raise ValueError('data must be a 2D array.')
if center is None:
from ..centroids import centroid_com
center = centroid_com(data)
indices = np.ogrid[[slice(0, i) for i in data.shape]]
ypowers = (indices[0] - center[1]) ** np.arange(order + 1)
xpowers = np.transpose(indices[1] - center[0]) ** np.arange(order + 1)
return np.dot(np.dot(np.transpose(ypowers), data), xpowers)
|
python
|
def _moments_central(data, center=None, order=1):
"""
Calculate the central image moments up to the specified order.
Parameters
----------
data : 2D array-like
The input 2D array.
center : tuple of two floats or `None`, optional
The ``(x, y)`` center position. If `None` it will calculated as
the "center of mass" of the input ``data``.
order : int, optional
The maximum order of the moments to calculate.
Returns
-------
moments : 2D `~numpy.ndarray`
The central image moments.
"""
data = np.asarray(data).astype(float)
if data.ndim != 2:
raise ValueError('data must be a 2D array.')
if center is None:
from ..centroids import centroid_com
center = centroid_com(data)
indices = np.ogrid[[slice(0, i) for i in data.shape]]
ypowers = (indices[0] - center[1]) ** np.arange(order + 1)
xpowers = np.transpose(indices[1] - center[0]) ** np.arange(order + 1)
return np.dot(np.dot(np.transpose(ypowers), data), xpowers)
|
[
"def",
"_moments_central",
"(",
"data",
",",
"center",
"=",
"None",
",",
"order",
"=",
"1",
")",
":",
"data",
"=",
"np",
".",
"asarray",
"(",
"data",
")",
".",
"astype",
"(",
"float",
")",
"if",
"data",
".",
"ndim",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'data must be a 2D array.'",
")",
"if",
"center",
"is",
"None",
":",
"from",
".",
".",
"centroids",
"import",
"centroid_com",
"center",
"=",
"centroid_com",
"(",
"data",
")",
"indices",
"=",
"np",
".",
"ogrid",
"[",
"[",
"slice",
"(",
"0",
",",
"i",
")",
"for",
"i",
"in",
"data",
".",
"shape",
"]",
"]",
"ypowers",
"=",
"(",
"indices",
"[",
"0",
"]",
"-",
"center",
"[",
"1",
"]",
")",
"**",
"np",
".",
"arange",
"(",
"order",
"+",
"1",
")",
"xpowers",
"=",
"np",
".",
"transpose",
"(",
"indices",
"[",
"1",
"]",
"-",
"center",
"[",
"0",
"]",
")",
"**",
"np",
".",
"arange",
"(",
"order",
"+",
"1",
")",
"return",
"np",
".",
"dot",
"(",
"np",
".",
"dot",
"(",
"np",
".",
"transpose",
"(",
"ypowers",
")",
",",
"data",
")",
",",
"xpowers",
")"
] |
Calculate the central image moments up to the specified order.
Parameters
----------
data : 2D array-like
The input 2D array.
center : tuple of two floats or `None`, optional
The ``(x, y)`` center position. If `None` it will calculated as
the "center of mass" of the input ``data``.
order : int, optional
The maximum order of the moments to calculate.
Returns
-------
moments : 2D `~numpy.ndarray`
The central image moments.
|
[
"Calculate",
"the",
"central",
"image",
"moments",
"up",
"to",
"the",
"specified",
"order",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/utils/_moments.py#L9-L44
|
10,608
|
astropy/photutils
|
photutils/isophote/harmonics.py
|
first_and_second_harmonic_function
|
def first_and_second_harmonic_function(phi, c):
"""
Compute the harmonic function value used to calculate the
corrections for ellipse fitting.
This function includes simultaneously both the first and second
order harmonics:
.. math::
f(phi) = c[0] + c[1]*\\sin(phi) + c[2]*\\cos(phi) +
c[3]*\\sin(2*phi) + c[4]*\\cos(2*phi)
Parameters
----------
phi : float or `~numpy.ndarray`
The angle(s) along the elliptical path, going towards the positive
y axis, starting coincident with the position angle. That is, the
angles are defined from the semimajor axis that lies in
the positive x quadrant.
c : `~numpy.ndarray` of shape (5,)
Array containing the five harmonic coefficients.
Returns
-------
result : float or `~numpy.ndarray`
The function value(s) at the given input angle(s).
"""
return (c[0] + c[1]*np.sin(phi) + c[2]*np.cos(phi) + c[3]*np.sin(2*phi) +
c[4]*np.cos(2*phi))
|
python
|
def first_and_second_harmonic_function(phi, c):
"""
Compute the harmonic function value used to calculate the
corrections for ellipse fitting.
This function includes simultaneously both the first and second
order harmonics:
.. math::
f(phi) = c[0] + c[1]*\\sin(phi) + c[2]*\\cos(phi) +
c[3]*\\sin(2*phi) + c[4]*\\cos(2*phi)
Parameters
----------
phi : float or `~numpy.ndarray`
The angle(s) along the elliptical path, going towards the positive
y axis, starting coincident with the position angle. That is, the
angles are defined from the semimajor axis that lies in
the positive x quadrant.
c : `~numpy.ndarray` of shape (5,)
Array containing the five harmonic coefficients.
Returns
-------
result : float or `~numpy.ndarray`
The function value(s) at the given input angle(s).
"""
return (c[0] + c[1]*np.sin(phi) + c[2]*np.cos(phi) + c[3]*np.sin(2*phi) +
c[4]*np.cos(2*phi))
|
[
"def",
"first_and_second_harmonic_function",
"(",
"phi",
",",
"c",
")",
":",
"return",
"(",
"c",
"[",
"0",
"]",
"+",
"c",
"[",
"1",
"]",
"*",
"np",
".",
"sin",
"(",
"phi",
")",
"+",
"c",
"[",
"2",
"]",
"*",
"np",
".",
"cos",
"(",
"phi",
")",
"+",
"c",
"[",
"3",
"]",
"*",
"np",
".",
"sin",
"(",
"2",
"*",
"phi",
")",
"+",
"c",
"[",
"4",
"]",
"*",
"np",
".",
"cos",
"(",
"2",
"*",
"phi",
")",
")"
] |
Compute the harmonic function value used to calculate the
corrections for ellipse fitting.
This function includes simultaneously both the first and second
order harmonics:
.. math::
f(phi) = c[0] + c[1]*\\sin(phi) + c[2]*\\cos(phi) +
c[3]*\\sin(2*phi) + c[4]*\\cos(2*phi)
Parameters
----------
phi : float or `~numpy.ndarray`
The angle(s) along the elliptical path, going towards the positive
y axis, starting coincident with the position angle. That is, the
angles are defined from the semimajor axis that lies in
the positive x quadrant.
c : `~numpy.ndarray` of shape (5,)
Array containing the five harmonic coefficients.
Returns
-------
result : float or `~numpy.ndarray`
The function value(s) at the given input angle(s).
|
[
"Compute",
"the",
"harmonic",
"function",
"value",
"used",
"to",
"calculate",
"the",
"corrections",
"for",
"ellipse",
"fitting",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/isophote/harmonics.py#L23-L53
|
10,609
|
astropy/photutils
|
photutils/psf/matching/windows.py
|
_radial_distance
|
def _radial_distance(shape):
"""
Return an array where each value is the Euclidean distance from the
array center.
Parameters
----------
shape : tuple of int
The size of the output array along each axis.
Returns
-------
result : `~numpy.ndarray`
An array containing the Euclidian radial distances from the
array center.
"""
if len(shape) != 2:
raise ValueError('shape must have only 2 elements')
position = (np.asarray(shape) - 1) / 2.
x = np.arange(shape[1]) - position[1]
y = np.arange(shape[0]) - position[0]
xx, yy = np.meshgrid(x, y)
return np.sqrt(xx**2 + yy**2)
|
python
|
def _radial_distance(shape):
"""
Return an array where each value is the Euclidean distance from the
array center.
Parameters
----------
shape : tuple of int
The size of the output array along each axis.
Returns
-------
result : `~numpy.ndarray`
An array containing the Euclidian radial distances from the
array center.
"""
if len(shape) != 2:
raise ValueError('shape must have only 2 elements')
position = (np.asarray(shape) - 1) / 2.
x = np.arange(shape[1]) - position[1]
y = np.arange(shape[0]) - position[0]
xx, yy = np.meshgrid(x, y)
return np.sqrt(xx**2 + yy**2)
|
[
"def",
"_radial_distance",
"(",
"shape",
")",
":",
"if",
"len",
"(",
"shape",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'shape must have only 2 elements'",
")",
"position",
"=",
"(",
"np",
".",
"asarray",
"(",
"shape",
")",
"-",
"1",
")",
"/",
"2.",
"x",
"=",
"np",
".",
"arange",
"(",
"shape",
"[",
"1",
"]",
")",
"-",
"position",
"[",
"1",
"]",
"y",
"=",
"np",
".",
"arange",
"(",
"shape",
"[",
"0",
"]",
")",
"-",
"position",
"[",
"0",
"]",
"xx",
",",
"yy",
"=",
"np",
".",
"meshgrid",
"(",
"x",
",",
"y",
")",
"return",
"np",
".",
"sqrt",
"(",
"xx",
"**",
"2",
"+",
"yy",
"**",
"2",
")"
] |
Return an array where each value is the Euclidean distance from the
array center.
Parameters
----------
shape : tuple of int
The size of the output array along each axis.
Returns
-------
result : `~numpy.ndarray`
An array containing the Euclidian radial distances from the
array center.
|
[
"Return",
"an",
"array",
"where",
"each",
"value",
"is",
"the",
"Euclidean",
"distance",
"from",
"the",
"array",
"center",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/matching/windows.py#L13-L36
|
10,610
|
astropy/photutils
|
photutils/datasets/load.py
|
load_spitzer_image
|
def load_spitzer_image(show_progress=False): # pragma: no cover
"""
Load a 4.5 micron Spitzer image.
The catalog for this image is returned by
:func:`load_spitzer_catalog`.
Parameters
----------
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `False`).
Returns
-------
hdu : `~astropy.io.fits.ImageHDU`
The 4.5 micron Spitzer image in a FITS image HDU.
See Also
--------
load_spitzer_catalog
Examples
--------
.. plot::
:include-source:
from photutils import datasets
hdu = datasets.load_spitzer_image()
plt.imshow(hdu.data, origin='lower', vmax=50)
"""
path = get_path('spitzer_example_image.fits', location='remote',
show_progress=show_progress)
hdu = fits.open(path)[0]
return hdu
|
python
|
def load_spitzer_image(show_progress=False): # pragma: no cover
"""
Load a 4.5 micron Spitzer image.
The catalog for this image is returned by
:func:`load_spitzer_catalog`.
Parameters
----------
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `False`).
Returns
-------
hdu : `~astropy.io.fits.ImageHDU`
The 4.5 micron Spitzer image in a FITS image HDU.
See Also
--------
load_spitzer_catalog
Examples
--------
.. plot::
:include-source:
from photutils import datasets
hdu = datasets.load_spitzer_image()
plt.imshow(hdu.data, origin='lower', vmax=50)
"""
path = get_path('spitzer_example_image.fits', location='remote',
show_progress=show_progress)
hdu = fits.open(path)[0]
return hdu
|
[
"def",
"load_spitzer_image",
"(",
"show_progress",
"=",
"False",
")",
":",
"# pragma: no cover",
"path",
"=",
"get_path",
"(",
"'spitzer_example_image.fits'",
",",
"location",
"=",
"'remote'",
",",
"show_progress",
"=",
"show_progress",
")",
"hdu",
"=",
"fits",
".",
"open",
"(",
"path",
")",
"[",
"0",
"]",
"return",
"hdu"
] |
Load a 4.5 micron Spitzer image.
The catalog for this image is returned by
:func:`load_spitzer_catalog`.
Parameters
----------
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `False`).
Returns
-------
hdu : `~astropy.io.fits.ImageHDU`
The 4.5 micron Spitzer image in a FITS image HDU.
See Also
--------
load_spitzer_catalog
Examples
--------
.. plot::
:include-source:
from photutils import datasets
hdu = datasets.load_spitzer_image()
plt.imshow(hdu.data, origin='lower', vmax=50)
|
[
"Load",
"a",
"4",
".",
"5",
"micron",
"Spitzer",
"image",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/datasets/load.py#L73-L109
|
10,611
|
astropy/photutils
|
photutils/datasets/load.py
|
load_spitzer_catalog
|
def load_spitzer_catalog(show_progress=False): # pragma: no cover
"""
Load a 4.5 micron Spitzer catalog.
The image from which this catalog was derived is returned by
:func:`load_spitzer_image`.
Parameters
----------
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `False`).
Returns
-------
catalog : `~astropy.table.Table`
The catalog of sources.
See Also
--------
load_spitzer_image
Examples
--------
.. plot::
:include-source:
from photutils import datasets
catalog = datasets.load_spitzer_catalog()
plt.scatter(catalog['l'], catalog['b'])
plt.xlabel('Galactic l')
plt.ylabel('Galactic b')
plt.xlim(18.39, 18.05)
plt.ylim(0.13, 0.30)
"""
path = get_path('spitzer_example_catalog.xml', location='remote',
show_progress=show_progress)
table = Table.read(path)
return table
|
python
|
def load_spitzer_catalog(show_progress=False): # pragma: no cover
"""
Load a 4.5 micron Spitzer catalog.
The image from which this catalog was derived is returned by
:func:`load_spitzer_image`.
Parameters
----------
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `False`).
Returns
-------
catalog : `~astropy.table.Table`
The catalog of sources.
See Also
--------
load_spitzer_image
Examples
--------
.. plot::
:include-source:
from photutils import datasets
catalog = datasets.load_spitzer_catalog()
plt.scatter(catalog['l'], catalog['b'])
plt.xlabel('Galactic l')
plt.ylabel('Galactic b')
plt.xlim(18.39, 18.05)
plt.ylim(0.13, 0.30)
"""
path = get_path('spitzer_example_catalog.xml', location='remote',
show_progress=show_progress)
table = Table.read(path)
return table
|
[
"def",
"load_spitzer_catalog",
"(",
"show_progress",
"=",
"False",
")",
":",
"# pragma: no cover",
"path",
"=",
"get_path",
"(",
"'spitzer_example_catalog.xml'",
",",
"location",
"=",
"'remote'",
",",
"show_progress",
"=",
"show_progress",
")",
"table",
"=",
"Table",
".",
"read",
"(",
"path",
")",
"return",
"table"
] |
Load a 4.5 micron Spitzer catalog.
The image from which this catalog was derived is returned by
:func:`load_spitzer_image`.
Parameters
----------
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `False`).
Returns
-------
catalog : `~astropy.table.Table`
The catalog of sources.
See Also
--------
load_spitzer_image
Examples
--------
.. plot::
:include-source:
from photutils import datasets
catalog = datasets.load_spitzer_catalog()
plt.scatter(catalog['l'], catalog['b'])
plt.xlabel('Galactic l')
plt.ylabel('Galactic b')
plt.xlim(18.39, 18.05)
plt.ylim(0.13, 0.30)
|
[
"Load",
"a",
"4",
".",
"5",
"micron",
"Spitzer",
"catalog",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/datasets/load.py#L112-L152
|
10,612
|
astropy/photutils
|
photutils/datasets/load.py
|
load_irac_psf
|
def load_irac_psf(channel, show_progress=False): # pragma: no cover
"""
Load a Spitzer IRAC PSF image.
Parameters
----------
channel : int (1-4)
The IRAC channel number:
* Channel 1: 3.6 microns
* Channel 2: 4.5 microns
* Channel 3: 5.8 microns
* Channel 4: 8.0 microns
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `False`).
Returns
-------
hdu : `~astropy.io.fits.ImageHDU`
The IRAC PSF in a FITS image HDU.
Examples
--------
.. plot::
:include-source:
from astropy.visualization import LogStretch, ImageNormalize
from photutils.datasets import load_irac_psf
hdu1 = load_irac_psf(1)
hdu2 = load_irac_psf(2)
hdu3 = load_irac_psf(3)
hdu4 = load_irac_psf(4)
norm = ImageNormalize(hdu1.data, stretch=LogStretch())
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
ax1.imshow(hdu1.data, origin='lower', interpolation='nearest',
norm=norm)
ax1.set_title('IRAC Ch1 PSF')
ax2.imshow(hdu2.data, origin='lower', interpolation='nearest',
norm=norm)
ax2.set_title('IRAC Ch2 PSF')
ax3.imshow(hdu3.data, origin='lower', interpolation='nearest',
norm=norm)
ax3.set_title('IRAC Ch3 PSF')
ax4.imshow(hdu4.data, origin='lower', interpolation='nearest',
norm=norm)
ax4.set_title('IRAC Ch4 PSF')
plt.tight_layout()
plt.show()
"""
channel = int(channel)
if channel < 1 or channel > 4:
raise ValueError('channel must be 1, 2, 3, or 4')
fn = 'irac_ch{0}_flight.fits'.format(channel)
path = get_path(fn, location='remote', show_progress=show_progress)
hdu = fits.open(path)[0]
return hdu
|
python
|
def load_irac_psf(channel, show_progress=False): # pragma: no cover
"""
Load a Spitzer IRAC PSF image.
Parameters
----------
channel : int (1-4)
The IRAC channel number:
* Channel 1: 3.6 microns
* Channel 2: 4.5 microns
* Channel 3: 5.8 microns
* Channel 4: 8.0 microns
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `False`).
Returns
-------
hdu : `~astropy.io.fits.ImageHDU`
The IRAC PSF in a FITS image HDU.
Examples
--------
.. plot::
:include-source:
from astropy.visualization import LogStretch, ImageNormalize
from photutils.datasets import load_irac_psf
hdu1 = load_irac_psf(1)
hdu2 = load_irac_psf(2)
hdu3 = load_irac_psf(3)
hdu4 = load_irac_psf(4)
norm = ImageNormalize(hdu1.data, stretch=LogStretch())
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
ax1.imshow(hdu1.data, origin='lower', interpolation='nearest',
norm=norm)
ax1.set_title('IRAC Ch1 PSF')
ax2.imshow(hdu2.data, origin='lower', interpolation='nearest',
norm=norm)
ax2.set_title('IRAC Ch2 PSF')
ax3.imshow(hdu3.data, origin='lower', interpolation='nearest',
norm=norm)
ax3.set_title('IRAC Ch3 PSF')
ax4.imshow(hdu4.data, origin='lower', interpolation='nearest',
norm=norm)
ax4.set_title('IRAC Ch4 PSF')
plt.tight_layout()
plt.show()
"""
channel = int(channel)
if channel < 1 or channel > 4:
raise ValueError('channel must be 1, 2, 3, or 4')
fn = 'irac_ch{0}_flight.fits'.format(channel)
path = get_path(fn, location='remote', show_progress=show_progress)
hdu = fits.open(path)[0]
return hdu
|
[
"def",
"load_irac_psf",
"(",
"channel",
",",
"show_progress",
"=",
"False",
")",
":",
"# pragma: no cover",
"channel",
"=",
"int",
"(",
"channel",
")",
"if",
"channel",
"<",
"1",
"or",
"channel",
">",
"4",
":",
"raise",
"ValueError",
"(",
"'channel must be 1, 2, 3, or 4'",
")",
"fn",
"=",
"'irac_ch{0}_flight.fits'",
".",
"format",
"(",
"channel",
")",
"path",
"=",
"get_path",
"(",
"fn",
",",
"location",
"=",
"'remote'",
",",
"show_progress",
"=",
"show_progress",
")",
"hdu",
"=",
"fits",
".",
"open",
"(",
"path",
")",
"[",
"0",
"]",
"return",
"hdu"
] |
Load a Spitzer IRAC PSF image.
Parameters
----------
channel : int (1-4)
The IRAC channel number:
* Channel 1: 3.6 microns
* Channel 2: 4.5 microns
* Channel 3: 5.8 microns
* Channel 4: 8.0 microns
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `False`).
Returns
-------
hdu : `~astropy.io.fits.ImageHDU`
The IRAC PSF in a FITS image HDU.
Examples
--------
.. plot::
:include-source:
from astropy.visualization import LogStretch, ImageNormalize
from photutils.datasets import load_irac_psf
hdu1 = load_irac_psf(1)
hdu2 = load_irac_psf(2)
hdu3 = load_irac_psf(3)
hdu4 = load_irac_psf(4)
norm = ImageNormalize(hdu1.data, stretch=LogStretch())
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
ax1.imshow(hdu1.data, origin='lower', interpolation='nearest',
norm=norm)
ax1.set_title('IRAC Ch1 PSF')
ax2.imshow(hdu2.data, origin='lower', interpolation='nearest',
norm=norm)
ax2.set_title('IRAC Ch2 PSF')
ax3.imshow(hdu3.data, origin='lower', interpolation='nearest',
norm=norm)
ax3.set_title('IRAC Ch3 PSF')
ax4.imshow(hdu4.data, origin='lower', interpolation='nearest',
norm=norm)
ax4.set_title('IRAC Ch4 PSF')
plt.tight_layout()
plt.show()
|
[
"Load",
"a",
"Spitzer",
"IRAC",
"PSF",
"image",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/datasets/load.py#L155-L217
|
10,613
|
astropy/photutils
|
photutils/isophote/ellipse.py
|
Ellipse.fit_image
|
def fit_image(self, sma0=None, minsma=0., maxsma=None, step=0.1,
conver=DEFAULT_CONVERGENCE, minit=DEFAULT_MINIT,
maxit=DEFAULT_MAXIT, fflag=DEFAULT_FFLAG,
maxgerr=DEFAULT_MAXGERR, sclip=3., nclip=0,
integrmode=BILINEAR, linear=False, maxrit=None):
# This parameter list is quite large and should in principle be
# simplified by re-distributing these controls to somewhere else.
# We keep this design though because it better mimics the flat
# architecture used in the original STSDAS task `ellipse`.
"""
Fit multiple isophotes to the image array.
This method loops over each value of the semimajor axis (sma)
length (constructed from the input parameters), fitting a single
isophote at each sma. The entire set of isophotes is returned
in an `~photutils.isophote.IsophoteList` instance.
Parameters
----------
sma0 : float, optional
The starting value for the semimajor axis length (pixels).
This value must not be the minimum or maximum semimajor axis
length, but something in between. The algorithm can't start
from the very center of the galaxy image because the
modelling of elliptical isophotes on that region is poor and
it will diverge very easily if not tied to other previously
fit isophotes. It can't start from the maximum value either
because the maximum is not known beforehand, depending on
signal-to-noise. The ``sma0`` value should be selected such
that the corresponding isophote has a good signal-to-noise
ratio and a clearly defined geometry. If set to `None` (the
default), one of two actions will be taken: if a
`~photutils.isophote.EllipseGeometry` instance was input to
the `~photutils.isophote.Ellipse` constructor, its ``sma``
value will be used. Otherwise, a default value of 10. will
be used.
minsma : float, optional
The minimum value for the semimajor axis length (pixels).
The default is 0.
maxsma : float or `None`, optional
The maximum value for the semimajor axis length (pixels).
When set to `None` (default), the algorithm will increase
the semimajor axis until one of several conditions will
cause it to stop and revert to fit ellipses with sma <
``sma0``.
step : float, optional
The step value used to grow/shrink the semimajor axis length
(pixels if ``linear=True``, or a relative value if
``linear=False``). See the ``linear`` parameter. The
default is 0.1.
conver : float, optional
The main convergence criterion. Iterations stop when the
largest harmonic amplitude becomes smaller (in absolute
value) than ``conver`` times the harmonic fit rms. The
default is 0.05.
minit : int, optional
The minimum number of iterations to perform. A minimum of 10
(the default) iterations guarantees that, on average, 2
iterations will be available for fitting each independent
parameter (the four harmonic amplitudes and the intensity
level). For the first isophote, the minimum number of
iterations is 2 * ``minit`` to ensure that, even departing
from not-so-good initial values, the algorithm has a better
chance to converge to a sensible solution.
maxit : int, optional
The maximum number of iterations to perform. The default is
50.
fflag : float, optional
The acceptable fraction of flagged data points in the
sample. If the actual fraction of valid data points is
smaller than this, the iterations will stop and the current
`~photutils.isophote.Isophote` will be returned. Flagged
data points are points that either lie outside the image
frame, are masked, or were rejected by sigma-clipping. The
default is 0.7.
maxgerr : float, optional
The maximum acceptable relative error in the local radial
intensity gradient. This is the main control for preventing
ellipses to grow to regions of too low signal-to-noise
ratio. It specifies the maximum acceptable relative error
in the local radial intensity gradient. `Busko (1996; ASPC
101, 139)
<http://adsabs.harvard.edu/abs/1996ASPC..101..139B>`_ showed
that the fitting precision relates to that relative error.
The usual behavior of the gradient relative error is to
increase with semimajor axis, being larger in outer, fainter
regions of a galaxy image. In the current implementation,
the ``maxgerr`` criterion is triggered only when two
consecutive isophotes exceed the value specified by the
parameter. This prevents premature stopping caused by
contamination such as stars and HII regions.
A number of actions may happen when the gradient error
exceeds ``maxgerr`` (or becomes non-significant and is set
to `None`). If the maximum semimajor axis specified by
``maxsma`` is set to `None`, semimajor axis growth is
stopped and the algorithm proceeds inwards to the galaxy
center. If ``maxsma`` is set to some finite value, and this
value is larger than the current semimajor axis length, the
algorithm enters non-iterative mode and proceeds outwards
until reaching ``maxsma``. The default is 0.5.
sclip : float, optional
The sigma-clip sigma value. The default is 3.0.
nclip : int, optional
The number of sigma-clip interations. The default is 0,
which means sigma-clipping is skipped.
integrmode : {'bilinear', 'nearest_neighbor', 'mean', 'median'}, optional
The area integration mode. The default is 'bilinear'.
linear : bool, optional
The semimajor axis growing/shrinking mode. If `False`
(default), the geometric growing mode is chosen, thus the
semimajor axis length is increased by a factor of (1. +
``step``), and the process is repeated until either the
semimajor axis value reaches the value of parameter
``maxsma``, or the last fitted ellipse has more than a given
fraction of its sampled points flagged out (see ``fflag``).
The process then resumes from the first fitted ellipse (at
``sma0``) inwards, in steps of (1./(1. + ``step``)), until
the semimajor axis length reaches the value ``minsma``. In
case of linear growing, the increment or decrement value is
given directly by ``step`` in pixels. If ``maxsma`` is set
to `None`, the semimajor axis will grow until a low
signal-to-noise criterion is met. See ``maxgerr``.
maxrit : float or `None`, optional
The maximum value of semimajor axis to perform an actual
fit. Whenever the current semimajor axis length is larger
than ``maxrit``, the isophotes will be extracted using the
current geometry, without being fitted. This non-iterative
mode may be useful for sampling regions of very low surface
brightness, where the algorithm may become unstable and
unable to recover reliable geometry information.
Non-iterative mode can also be entered automatically
whenever the ellipticity exceeds 1.0 or the ellipse center
crosses the image boundaries. If `None` (default), then no
maximum value is used.
Returns
-------
result : `~photutils.isophote.IsophoteList` instance
A list-like object of `~photutils.isophote.Isophote`
instances, sorted by increasing semimajor axis length.
"""
# multiple fitted isophotes will be stored here
isophote_list = []
# get starting sma from appropriate source: keyword parameter,
# internal EllipseGeometry instance, or fixed default value.
if not sma0:
if self._geometry:
sma = self._geometry.sma
else:
sma = 10.
else:
sma = sma0
# first, go from initial sma outwards until
# hitting one of several stopping criteria.
noiter = False
first_isophote = True
while True:
# first isophote runs longer
minit_a = 2 * minit if first_isophote else minit
first_isophote = False
isophote = self.fit_isophote(sma, step, conver, minit_a, maxit,
fflag, maxgerr, sclip, nclip,
integrmode, linear, maxrit,
noniterate=noiter,
isophote_list=isophote_list)
# check for failed fit.
if (isophote.stop_code < 0 or isophote.stop_code == 1):
# in case the fit failed right at the outset, return an
# empty list. This is the usual case when the user
# provides initial guesses that are too way off to enable
# the fitting algorithm to find any meaningful solution.
if len(isophote_list) == 1:
warnings.warn('No meaningful fit was possible.',
AstropyUserWarning)
return IsophoteList([])
self._fix_last_isophote(isophote_list, -1)
# get last isophote from the actual list, since the last
# `isophote` instance in this context may no longer be OK.
isophote = isophote_list[-1]
# if two consecutive isophotes failed to fit,
# shut off iterative mode. Or, bail out and
# change to go inwards.
if len(isophote_list) > 2:
if ((isophote.stop_code == 5 and
isophote_list[-2].stop_code == 5)
or isophote.stop_code == 1):
if maxsma and maxsma > isophote.sma:
# if a maximum sma value was provided by
# user, and the current sma is smaller than
# maxsma, keep growing sma in non-iterative
# mode until reaching it.
noiter = True
else:
# if no maximum sma, stop growing and change
# to go inwards.
break
# reset variable from the actual list, since the last
# `isophote` instance may no longer be OK.
isophote = isophote_list[-1]
# update sma. If exceeded user-defined
# maximum, bail out from this loop.
sma = isophote.sample.geometry.update_sma(step)
if maxsma and sma >= maxsma:
break
# reset sma so as to go inwards.
first_isophote = isophote_list[0]
sma, step = first_isophote.sample.geometry.reset_sma(step)
# now, go from initial sma inwards towards center.
while True:
isophote = self.fit_isophote(sma, step, conver, minit, maxit,
fflag, maxgerr, sclip, nclip,
integrmode, linear, maxrit,
going_inwards=True,
isophote_list=isophote_list)
# if abnormal condition, fix isophote but keep going.
if isophote.stop_code < 0:
self._fix_last_isophote(isophote_list, 0)
# reset variable from the actual list, since the last
# `isophote` instance may no longer be OK.
isophote = isophote_list[-1]
# figure out next sma; if exceeded user-defined
# minimum, or too small, bail out from this loop
sma = isophote.sample.geometry.update_sma(step)
if sma <= max(minsma, 0.5):
break
# if user asked for minsma=0, extract special isophote there
if minsma == 0.0:
isophote = self.fit_isophote(0.0, isophote_list=isophote_list)
# sort list of isophotes according to sma
isophote_list.sort()
return IsophoteList(isophote_list)
|
python
|
def fit_image(self, sma0=None, minsma=0., maxsma=None, step=0.1,
conver=DEFAULT_CONVERGENCE, minit=DEFAULT_MINIT,
maxit=DEFAULT_MAXIT, fflag=DEFAULT_FFLAG,
maxgerr=DEFAULT_MAXGERR, sclip=3., nclip=0,
integrmode=BILINEAR, linear=False, maxrit=None):
# This parameter list is quite large and should in principle be
# simplified by re-distributing these controls to somewhere else.
# We keep this design though because it better mimics the flat
# architecture used in the original STSDAS task `ellipse`.
"""
Fit multiple isophotes to the image array.
This method loops over each value of the semimajor axis (sma)
length (constructed from the input parameters), fitting a single
isophote at each sma. The entire set of isophotes is returned
in an `~photutils.isophote.IsophoteList` instance.
Parameters
----------
sma0 : float, optional
The starting value for the semimajor axis length (pixels).
This value must not be the minimum or maximum semimajor axis
length, but something in between. The algorithm can't start
from the very center of the galaxy image because the
modelling of elliptical isophotes on that region is poor and
it will diverge very easily if not tied to other previously
fit isophotes. It can't start from the maximum value either
because the maximum is not known beforehand, depending on
signal-to-noise. The ``sma0`` value should be selected such
that the corresponding isophote has a good signal-to-noise
ratio and a clearly defined geometry. If set to `None` (the
default), one of two actions will be taken: if a
`~photutils.isophote.EllipseGeometry` instance was input to
the `~photutils.isophote.Ellipse` constructor, its ``sma``
value will be used. Otherwise, a default value of 10. will
be used.
minsma : float, optional
The minimum value for the semimajor axis length (pixels).
The default is 0.
maxsma : float or `None`, optional
The maximum value for the semimajor axis length (pixels).
When set to `None` (default), the algorithm will increase
the semimajor axis until one of several conditions will
cause it to stop and revert to fit ellipses with sma <
``sma0``.
step : float, optional
The step value used to grow/shrink the semimajor axis length
(pixels if ``linear=True``, or a relative value if
``linear=False``). See the ``linear`` parameter. The
default is 0.1.
conver : float, optional
The main convergence criterion. Iterations stop when the
largest harmonic amplitude becomes smaller (in absolute
value) than ``conver`` times the harmonic fit rms. The
default is 0.05.
minit : int, optional
The minimum number of iterations to perform. A minimum of 10
(the default) iterations guarantees that, on average, 2
iterations will be available for fitting each independent
parameter (the four harmonic amplitudes and the intensity
level). For the first isophote, the minimum number of
iterations is 2 * ``minit`` to ensure that, even departing
from not-so-good initial values, the algorithm has a better
chance to converge to a sensible solution.
maxit : int, optional
The maximum number of iterations to perform. The default is
50.
fflag : float, optional
The acceptable fraction of flagged data points in the
sample. If the actual fraction of valid data points is
smaller than this, the iterations will stop and the current
`~photutils.isophote.Isophote` will be returned. Flagged
data points are points that either lie outside the image
frame, are masked, or were rejected by sigma-clipping. The
default is 0.7.
maxgerr : float, optional
The maximum acceptable relative error in the local radial
intensity gradient. This is the main control for preventing
ellipses to grow to regions of too low signal-to-noise
ratio. It specifies the maximum acceptable relative error
in the local radial intensity gradient. `Busko (1996; ASPC
101, 139)
<http://adsabs.harvard.edu/abs/1996ASPC..101..139B>`_ showed
that the fitting precision relates to that relative error.
The usual behavior of the gradient relative error is to
increase with semimajor axis, being larger in outer, fainter
regions of a galaxy image. In the current implementation,
the ``maxgerr`` criterion is triggered only when two
consecutive isophotes exceed the value specified by the
parameter. This prevents premature stopping caused by
contamination such as stars and HII regions.
A number of actions may happen when the gradient error
exceeds ``maxgerr`` (or becomes non-significant and is set
to `None`). If the maximum semimajor axis specified by
``maxsma`` is set to `None`, semimajor axis growth is
stopped and the algorithm proceeds inwards to the galaxy
center. If ``maxsma`` is set to some finite value, and this
value is larger than the current semimajor axis length, the
algorithm enters non-iterative mode and proceeds outwards
until reaching ``maxsma``. The default is 0.5.
sclip : float, optional
The sigma-clip sigma value. The default is 3.0.
nclip : int, optional
The number of sigma-clip interations. The default is 0,
which means sigma-clipping is skipped.
integrmode : {'bilinear', 'nearest_neighbor', 'mean', 'median'}, optional
The area integration mode. The default is 'bilinear'.
linear : bool, optional
The semimajor axis growing/shrinking mode. If `False`
(default), the geometric growing mode is chosen, thus the
semimajor axis length is increased by a factor of (1. +
``step``), and the process is repeated until either the
semimajor axis value reaches the value of parameter
``maxsma``, or the last fitted ellipse has more than a given
fraction of its sampled points flagged out (see ``fflag``).
The process then resumes from the first fitted ellipse (at
``sma0``) inwards, in steps of (1./(1. + ``step``)), until
the semimajor axis length reaches the value ``minsma``. In
case of linear growing, the increment or decrement value is
given directly by ``step`` in pixels. If ``maxsma`` is set
to `None`, the semimajor axis will grow until a low
signal-to-noise criterion is met. See ``maxgerr``.
maxrit : float or `None`, optional
The maximum value of semimajor axis to perform an actual
fit. Whenever the current semimajor axis length is larger
than ``maxrit``, the isophotes will be extracted using the
current geometry, without being fitted. This non-iterative
mode may be useful for sampling regions of very low surface
brightness, where the algorithm may become unstable and
unable to recover reliable geometry information.
Non-iterative mode can also be entered automatically
whenever the ellipticity exceeds 1.0 or the ellipse center
crosses the image boundaries. If `None` (default), then no
maximum value is used.
Returns
-------
result : `~photutils.isophote.IsophoteList` instance
A list-like object of `~photutils.isophote.Isophote`
instances, sorted by increasing semimajor axis length.
"""
# multiple fitted isophotes will be stored here
isophote_list = []
# get starting sma from appropriate source: keyword parameter,
# internal EllipseGeometry instance, or fixed default value.
if not sma0:
if self._geometry:
sma = self._geometry.sma
else:
sma = 10.
else:
sma = sma0
# first, go from initial sma outwards until
# hitting one of several stopping criteria.
noiter = False
first_isophote = True
while True:
# first isophote runs longer
minit_a = 2 * minit if first_isophote else minit
first_isophote = False
isophote = self.fit_isophote(sma, step, conver, minit_a, maxit,
fflag, maxgerr, sclip, nclip,
integrmode, linear, maxrit,
noniterate=noiter,
isophote_list=isophote_list)
# check for failed fit.
if (isophote.stop_code < 0 or isophote.stop_code == 1):
# in case the fit failed right at the outset, return an
# empty list. This is the usual case when the user
# provides initial guesses that are too way off to enable
# the fitting algorithm to find any meaningful solution.
if len(isophote_list) == 1:
warnings.warn('No meaningful fit was possible.',
AstropyUserWarning)
return IsophoteList([])
self._fix_last_isophote(isophote_list, -1)
# get last isophote from the actual list, since the last
# `isophote` instance in this context may no longer be OK.
isophote = isophote_list[-1]
# if two consecutive isophotes failed to fit,
# shut off iterative mode. Or, bail out and
# change to go inwards.
if len(isophote_list) > 2:
if ((isophote.stop_code == 5 and
isophote_list[-2].stop_code == 5)
or isophote.stop_code == 1):
if maxsma and maxsma > isophote.sma:
# if a maximum sma value was provided by
# user, and the current sma is smaller than
# maxsma, keep growing sma in non-iterative
# mode until reaching it.
noiter = True
else:
# if no maximum sma, stop growing and change
# to go inwards.
break
# reset variable from the actual list, since the last
# `isophote` instance may no longer be OK.
isophote = isophote_list[-1]
# update sma. If exceeded user-defined
# maximum, bail out from this loop.
sma = isophote.sample.geometry.update_sma(step)
if maxsma and sma >= maxsma:
break
# reset sma so as to go inwards.
first_isophote = isophote_list[0]
sma, step = first_isophote.sample.geometry.reset_sma(step)
# now, go from initial sma inwards towards center.
while True:
isophote = self.fit_isophote(sma, step, conver, minit, maxit,
fflag, maxgerr, sclip, nclip,
integrmode, linear, maxrit,
going_inwards=True,
isophote_list=isophote_list)
# if abnormal condition, fix isophote but keep going.
if isophote.stop_code < 0:
self._fix_last_isophote(isophote_list, 0)
# reset variable from the actual list, since the last
# `isophote` instance may no longer be OK.
isophote = isophote_list[-1]
# figure out next sma; if exceeded user-defined
# minimum, or too small, bail out from this loop
sma = isophote.sample.geometry.update_sma(step)
if sma <= max(minsma, 0.5):
break
# if user asked for minsma=0, extract special isophote there
if minsma == 0.0:
isophote = self.fit_isophote(0.0, isophote_list=isophote_list)
# sort list of isophotes according to sma
isophote_list.sort()
return IsophoteList(isophote_list)
|
[
"def",
"fit_image",
"(",
"self",
",",
"sma0",
"=",
"None",
",",
"minsma",
"=",
"0.",
",",
"maxsma",
"=",
"None",
",",
"step",
"=",
"0.1",
",",
"conver",
"=",
"DEFAULT_CONVERGENCE",
",",
"minit",
"=",
"DEFAULT_MINIT",
",",
"maxit",
"=",
"DEFAULT_MAXIT",
",",
"fflag",
"=",
"DEFAULT_FFLAG",
",",
"maxgerr",
"=",
"DEFAULT_MAXGERR",
",",
"sclip",
"=",
"3.",
",",
"nclip",
"=",
"0",
",",
"integrmode",
"=",
"BILINEAR",
",",
"linear",
"=",
"False",
",",
"maxrit",
"=",
"None",
")",
":",
"# This parameter list is quite large and should in principle be",
"# simplified by re-distributing these controls to somewhere else.",
"# We keep this design though because it better mimics the flat",
"# architecture used in the original STSDAS task `ellipse`.",
"# multiple fitted isophotes will be stored here",
"isophote_list",
"=",
"[",
"]",
"# get starting sma from appropriate source: keyword parameter,",
"# internal EllipseGeometry instance, or fixed default value.",
"if",
"not",
"sma0",
":",
"if",
"self",
".",
"_geometry",
":",
"sma",
"=",
"self",
".",
"_geometry",
".",
"sma",
"else",
":",
"sma",
"=",
"10.",
"else",
":",
"sma",
"=",
"sma0",
"# first, go from initial sma outwards until",
"# hitting one of several stopping criteria.",
"noiter",
"=",
"False",
"first_isophote",
"=",
"True",
"while",
"True",
":",
"# first isophote runs longer",
"minit_a",
"=",
"2",
"*",
"minit",
"if",
"first_isophote",
"else",
"minit",
"first_isophote",
"=",
"False",
"isophote",
"=",
"self",
".",
"fit_isophote",
"(",
"sma",
",",
"step",
",",
"conver",
",",
"minit_a",
",",
"maxit",
",",
"fflag",
",",
"maxgerr",
",",
"sclip",
",",
"nclip",
",",
"integrmode",
",",
"linear",
",",
"maxrit",
",",
"noniterate",
"=",
"noiter",
",",
"isophote_list",
"=",
"isophote_list",
")",
"# check for failed fit.",
"if",
"(",
"isophote",
".",
"stop_code",
"<",
"0",
"or",
"isophote",
".",
"stop_code",
"==",
"1",
")",
":",
"# in case the fit failed right at the outset, return an",
"# empty list. This is the usual case when the user",
"# provides initial guesses that are too way off to enable",
"# the fitting algorithm to find any meaningful solution.",
"if",
"len",
"(",
"isophote_list",
")",
"==",
"1",
":",
"warnings",
".",
"warn",
"(",
"'No meaningful fit was possible.'",
",",
"AstropyUserWarning",
")",
"return",
"IsophoteList",
"(",
"[",
"]",
")",
"self",
".",
"_fix_last_isophote",
"(",
"isophote_list",
",",
"-",
"1",
")",
"# get last isophote from the actual list, since the last",
"# `isophote` instance in this context may no longer be OK.",
"isophote",
"=",
"isophote_list",
"[",
"-",
"1",
"]",
"# if two consecutive isophotes failed to fit,",
"# shut off iterative mode. Or, bail out and",
"# change to go inwards.",
"if",
"len",
"(",
"isophote_list",
")",
">",
"2",
":",
"if",
"(",
"(",
"isophote",
".",
"stop_code",
"==",
"5",
"and",
"isophote_list",
"[",
"-",
"2",
"]",
".",
"stop_code",
"==",
"5",
")",
"or",
"isophote",
".",
"stop_code",
"==",
"1",
")",
":",
"if",
"maxsma",
"and",
"maxsma",
">",
"isophote",
".",
"sma",
":",
"# if a maximum sma value was provided by",
"# user, and the current sma is smaller than",
"# maxsma, keep growing sma in non-iterative",
"# mode until reaching it.",
"noiter",
"=",
"True",
"else",
":",
"# if no maximum sma, stop growing and change",
"# to go inwards.",
"break",
"# reset variable from the actual list, since the last",
"# `isophote` instance may no longer be OK.",
"isophote",
"=",
"isophote_list",
"[",
"-",
"1",
"]",
"# update sma. If exceeded user-defined",
"# maximum, bail out from this loop.",
"sma",
"=",
"isophote",
".",
"sample",
".",
"geometry",
".",
"update_sma",
"(",
"step",
")",
"if",
"maxsma",
"and",
"sma",
">=",
"maxsma",
":",
"break",
"# reset sma so as to go inwards.",
"first_isophote",
"=",
"isophote_list",
"[",
"0",
"]",
"sma",
",",
"step",
"=",
"first_isophote",
".",
"sample",
".",
"geometry",
".",
"reset_sma",
"(",
"step",
")",
"# now, go from initial sma inwards towards center.",
"while",
"True",
":",
"isophote",
"=",
"self",
".",
"fit_isophote",
"(",
"sma",
",",
"step",
",",
"conver",
",",
"minit",
",",
"maxit",
",",
"fflag",
",",
"maxgerr",
",",
"sclip",
",",
"nclip",
",",
"integrmode",
",",
"linear",
",",
"maxrit",
",",
"going_inwards",
"=",
"True",
",",
"isophote_list",
"=",
"isophote_list",
")",
"# if abnormal condition, fix isophote but keep going.",
"if",
"isophote",
".",
"stop_code",
"<",
"0",
":",
"self",
".",
"_fix_last_isophote",
"(",
"isophote_list",
",",
"0",
")",
"# reset variable from the actual list, since the last",
"# `isophote` instance may no longer be OK.",
"isophote",
"=",
"isophote_list",
"[",
"-",
"1",
"]",
"# figure out next sma; if exceeded user-defined",
"# minimum, or too small, bail out from this loop",
"sma",
"=",
"isophote",
".",
"sample",
".",
"geometry",
".",
"update_sma",
"(",
"step",
")",
"if",
"sma",
"<=",
"max",
"(",
"minsma",
",",
"0.5",
")",
":",
"break",
"# if user asked for minsma=0, extract special isophote there",
"if",
"minsma",
"==",
"0.0",
":",
"isophote",
"=",
"self",
".",
"fit_isophote",
"(",
"0.0",
",",
"isophote_list",
"=",
"isophote_list",
")",
"# sort list of isophotes according to sma",
"isophote_list",
".",
"sort",
"(",
")",
"return",
"IsophoteList",
"(",
"isophote_list",
")"
] |
Fit multiple isophotes to the image array.
This method loops over each value of the semimajor axis (sma)
length (constructed from the input parameters), fitting a single
isophote at each sma. The entire set of isophotes is returned
in an `~photutils.isophote.IsophoteList` instance.
Parameters
----------
sma0 : float, optional
The starting value for the semimajor axis length (pixels).
This value must not be the minimum or maximum semimajor axis
length, but something in between. The algorithm can't start
from the very center of the galaxy image because the
modelling of elliptical isophotes on that region is poor and
it will diverge very easily if not tied to other previously
fit isophotes. It can't start from the maximum value either
because the maximum is not known beforehand, depending on
signal-to-noise. The ``sma0`` value should be selected such
that the corresponding isophote has a good signal-to-noise
ratio and a clearly defined geometry. If set to `None` (the
default), one of two actions will be taken: if a
`~photutils.isophote.EllipseGeometry` instance was input to
the `~photutils.isophote.Ellipse` constructor, its ``sma``
value will be used. Otherwise, a default value of 10. will
be used.
minsma : float, optional
The minimum value for the semimajor axis length (pixels).
The default is 0.
maxsma : float or `None`, optional
The maximum value for the semimajor axis length (pixels).
When set to `None` (default), the algorithm will increase
the semimajor axis until one of several conditions will
cause it to stop and revert to fit ellipses with sma <
``sma0``.
step : float, optional
The step value used to grow/shrink the semimajor axis length
(pixels if ``linear=True``, or a relative value if
``linear=False``). See the ``linear`` parameter. The
default is 0.1.
conver : float, optional
The main convergence criterion. Iterations stop when the
largest harmonic amplitude becomes smaller (in absolute
value) than ``conver`` times the harmonic fit rms. The
default is 0.05.
minit : int, optional
The minimum number of iterations to perform. A minimum of 10
(the default) iterations guarantees that, on average, 2
iterations will be available for fitting each independent
parameter (the four harmonic amplitudes and the intensity
level). For the first isophote, the minimum number of
iterations is 2 * ``minit`` to ensure that, even departing
from not-so-good initial values, the algorithm has a better
chance to converge to a sensible solution.
maxit : int, optional
The maximum number of iterations to perform. The default is
50.
fflag : float, optional
The acceptable fraction of flagged data points in the
sample. If the actual fraction of valid data points is
smaller than this, the iterations will stop and the current
`~photutils.isophote.Isophote` will be returned. Flagged
data points are points that either lie outside the image
frame, are masked, or were rejected by sigma-clipping. The
default is 0.7.
maxgerr : float, optional
The maximum acceptable relative error in the local radial
intensity gradient. This is the main control for preventing
ellipses to grow to regions of too low signal-to-noise
ratio. It specifies the maximum acceptable relative error
in the local radial intensity gradient. `Busko (1996; ASPC
101, 139)
<http://adsabs.harvard.edu/abs/1996ASPC..101..139B>`_ showed
that the fitting precision relates to that relative error.
The usual behavior of the gradient relative error is to
increase with semimajor axis, being larger in outer, fainter
regions of a galaxy image. In the current implementation,
the ``maxgerr`` criterion is triggered only when two
consecutive isophotes exceed the value specified by the
parameter. This prevents premature stopping caused by
contamination such as stars and HII regions.
A number of actions may happen when the gradient error
exceeds ``maxgerr`` (or becomes non-significant and is set
to `None`). If the maximum semimajor axis specified by
``maxsma`` is set to `None`, semimajor axis growth is
stopped and the algorithm proceeds inwards to the galaxy
center. If ``maxsma`` is set to some finite value, and this
value is larger than the current semimajor axis length, the
algorithm enters non-iterative mode and proceeds outwards
until reaching ``maxsma``. The default is 0.5.
sclip : float, optional
The sigma-clip sigma value. The default is 3.0.
nclip : int, optional
The number of sigma-clip interations. The default is 0,
which means sigma-clipping is skipped.
integrmode : {'bilinear', 'nearest_neighbor', 'mean', 'median'}, optional
The area integration mode. The default is 'bilinear'.
linear : bool, optional
The semimajor axis growing/shrinking mode. If `False`
(default), the geometric growing mode is chosen, thus the
semimajor axis length is increased by a factor of (1. +
``step``), and the process is repeated until either the
semimajor axis value reaches the value of parameter
``maxsma``, or the last fitted ellipse has more than a given
fraction of its sampled points flagged out (see ``fflag``).
The process then resumes from the first fitted ellipse (at
``sma0``) inwards, in steps of (1./(1. + ``step``)), until
the semimajor axis length reaches the value ``minsma``. In
case of linear growing, the increment or decrement value is
given directly by ``step`` in pixels. If ``maxsma`` is set
to `None`, the semimajor axis will grow until a low
signal-to-noise criterion is met. See ``maxgerr``.
maxrit : float or `None`, optional
The maximum value of semimajor axis to perform an actual
fit. Whenever the current semimajor axis length is larger
than ``maxrit``, the isophotes will be extracted using the
current geometry, without being fitted. This non-iterative
mode may be useful for sampling regions of very low surface
brightness, where the algorithm may become unstable and
unable to recover reliable geometry information.
Non-iterative mode can also be entered automatically
whenever the ellipticity exceeds 1.0 or the ellipse center
crosses the image boundaries. If `None` (default), then no
maximum value is used.
Returns
-------
result : `~photutils.isophote.IsophoteList` instance
A list-like object of `~photutils.isophote.Isophote`
instances, sorted by increasing semimajor axis length.
|
[
"Fit",
"multiple",
"isophotes",
"to",
"the",
"image",
"array",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/isophote/ellipse.py#L199-L449
|
10,614
|
astropy/photutils
|
photutils/isophote/ellipse.py
|
Ellipse.fit_isophote
|
def fit_isophote(self, sma, step=0.1, conver=DEFAULT_CONVERGENCE,
minit=DEFAULT_MINIT, maxit=DEFAULT_MAXIT,
fflag=DEFAULT_FFLAG, maxgerr=DEFAULT_MAXGERR,
sclip=3., nclip=0, integrmode=BILINEAR,
linear=False, maxrit=None, noniterate=False,
going_inwards=False, isophote_list=None):
"""
Fit a single isophote with a given semimajor axis length.
The ``step`` and ``linear`` parameters are not used to actually
grow or shrink the current fitting semimajor axis length. They
are necessary so the sampling algorithm can know where to start
the gradient computation and also how to compute the elliptical
sector areas (when area integration mode is selected).
Parameters
----------
sma : float
The semimajor axis length (pixels).
step : float, optional
The step value used to grow/shrink the semimajor axis length
(pixels if ``linear=True``, or a relative value if
``linear=False``). See the ``linear`` parameter. The
default is 0.1.
conver : float, optional
The main convergence criterion. Iterations stop when the
largest harmonic amplitude becomes smaller (in absolute
value) than ``conver`` times the harmonic fit rms. The
default is 0.05.
minit : int, optional
The minimum number of iterations to perform. A minimum of 10
(the default) iterations guarantees that, on average, 2
iterations will be available for fitting each independent
parameter (the four harmonic amplitudes and the intensity
level). For the first isophote, the minimum number of
iterations is 2 * ``minit`` to ensure that, even departing
from not-so-good initial values, the algorithm has a better
chance to converge to a sensible solution.
maxit : int, optional
The maximum number of iterations to perform. The default is
50.
fflag : float, optional
The acceptable fraction of flagged data points in the
sample. If the actual fraction of valid data points is
smaller than this, the iterations will stop and the current
`~photutils.isophote.Isophote` will be returned. Flagged
data points are points that either lie outside the image
frame, are masked, or were rejected by sigma-clipping. The
default is 0.7.
maxgerr : float, optional
The maximum acceptable relative error in the local radial
intensity gradient. When fitting a single isophote by
itself this paramter doesn't have any effect on the outcome.
sclip : float, optional
The sigma-clip sigma value. The default is 3.0.
nclip : int, optional
The number of sigma-clip interations. The default is 0,
which means sigma-clipping is skipped.
integrmode : {'bilinear', 'nearest_neighbor', 'mean', 'median'}, optional
The area integration mode. The default is 'bilinear'.
linear : bool, optional
The semimajor axis growing/shrinking mode. When fitting
just one isophote, this parameter is used only by the code
that define the details of how elliptical arc segments
("sectors") are extracted from the image when using area
extraction modes (see the ``integrmode`` parameter).
maxrit : float or `None`, optional
The maximum value of semimajor axis to perform an actual
fit. Whenever the current semimajor axis length is larger
than ``maxrit``, the isophotes will be extracted using the
current geometry, without being fitted. This non-iterative
mode may be useful for sampling regions of very low surface
brightness, where the algorithm may become unstable and
unable to recover reliable geometry information.
Non-iterative mode can also be entered automatically
whenever the ellipticity exceeds 1.0 or the ellipse center
crosses the image boundaries. If `None` (default), then no
maximum value is used.
noniterate : bool, optional
Whether the fitting algorithm should be bypassed and an
isophote should be extracted with the geometry taken
directly from the most recent `~photutils.isophote.Isophote`
instance stored in the ``isophote_list`` parameter. This
parameter is mainly used when running the method in a loop
over different values of semimajor axis length, and we want
to change from iterative to non-iterative mode somewhere
along the sequence of isophotes. When set to `True`, this
parameter overrides the behavior associated with parameter
``maxrit``. The default is `False`.
going_inwards : bool, optional
Parameter to define the sense of SMA growth. When fitting
just one isophote, this parameter is used only by the code
that defines the details of how elliptical arc segments
("sectors") are extracted from the image, when using area
extraction modes (see the ``integrmode`` parameter). The
default is `False`.
isophote_list : list or `None`, optional
If not `None` (the default), the fitted
`~photutils.isophote.Isophote` instance is appended to this
list. It must be created and managed by the caller.
Returns
-------
result : `~photutils.isophote.Isophote` instance
The fitted isophote. The fitted isophote is also appended to
the input list input to the ``isophote_list`` parameter.
"""
geometry = self._geometry
# if available, geometry from last fitted isophote will be
# used as initial guess for next isophote.
if isophote_list is not None and len(isophote_list) > 0:
geometry = isophote_list[-1].sample.geometry
# do the fit
if noniterate or (maxrit and sma > maxrit):
isophote = self._non_iterative(sma, step, linear, geometry,
sclip, nclip, integrmode)
else:
isophote = self._iterative(sma, step, linear, geometry, sclip,
nclip, integrmode, conver, minit,
maxit, fflag, maxgerr, going_inwards)
# store result in list
if isophote_list is not None and isophote.valid:
isophote_list.append(isophote)
return isophote
|
python
|
def fit_isophote(self, sma, step=0.1, conver=DEFAULT_CONVERGENCE,
minit=DEFAULT_MINIT, maxit=DEFAULT_MAXIT,
fflag=DEFAULT_FFLAG, maxgerr=DEFAULT_MAXGERR,
sclip=3., nclip=0, integrmode=BILINEAR,
linear=False, maxrit=None, noniterate=False,
going_inwards=False, isophote_list=None):
"""
Fit a single isophote with a given semimajor axis length.
The ``step`` and ``linear`` parameters are not used to actually
grow or shrink the current fitting semimajor axis length. They
are necessary so the sampling algorithm can know where to start
the gradient computation and also how to compute the elliptical
sector areas (when area integration mode is selected).
Parameters
----------
sma : float
The semimajor axis length (pixels).
step : float, optional
The step value used to grow/shrink the semimajor axis length
(pixels if ``linear=True``, or a relative value if
``linear=False``). See the ``linear`` parameter. The
default is 0.1.
conver : float, optional
The main convergence criterion. Iterations stop when the
largest harmonic amplitude becomes smaller (in absolute
value) than ``conver`` times the harmonic fit rms. The
default is 0.05.
minit : int, optional
The minimum number of iterations to perform. A minimum of 10
(the default) iterations guarantees that, on average, 2
iterations will be available for fitting each independent
parameter (the four harmonic amplitudes and the intensity
level). For the first isophote, the minimum number of
iterations is 2 * ``minit`` to ensure that, even departing
from not-so-good initial values, the algorithm has a better
chance to converge to a sensible solution.
maxit : int, optional
The maximum number of iterations to perform. The default is
50.
fflag : float, optional
The acceptable fraction of flagged data points in the
sample. If the actual fraction of valid data points is
smaller than this, the iterations will stop and the current
`~photutils.isophote.Isophote` will be returned. Flagged
data points are points that either lie outside the image
frame, are masked, or were rejected by sigma-clipping. The
default is 0.7.
maxgerr : float, optional
The maximum acceptable relative error in the local radial
intensity gradient. When fitting a single isophote by
itself this paramter doesn't have any effect on the outcome.
sclip : float, optional
The sigma-clip sigma value. The default is 3.0.
nclip : int, optional
The number of sigma-clip interations. The default is 0,
which means sigma-clipping is skipped.
integrmode : {'bilinear', 'nearest_neighbor', 'mean', 'median'}, optional
The area integration mode. The default is 'bilinear'.
linear : bool, optional
The semimajor axis growing/shrinking mode. When fitting
just one isophote, this parameter is used only by the code
that define the details of how elliptical arc segments
("sectors") are extracted from the image when using area
extraction modes (see the ``integrmode`` parameter).
maxrit : float or `None`, optional
The maximum value of semimajor axis to perform an actual
fit. Whenever the current semimajor axis length is larger
than ``maxrit``, the isophotes will be extracted using the
current geometry, without being fitted. This non-iterative
mode may be useful for sampling regions of very low surface
brightness, where the algorithm may become unstable and
unable to recover reliable geometry information.
Non-iterative mode can also be entered automatically
whenever the ellipticity exceeds 1.0 or the ellipse center
crosses the image boundaries. If `None` (default), then no
maximum value is used.
noniterate : bool, optional
Whether the fitting algorithm should be bypassed and an
isophote should be extracted with the geometry taken
directly from the most recent `~photutils.isophote.Isophote`
instance stored in the ``isophote_list`` parameter. This
parameter is mainly used when running the method in a loop
over different values of semimajor axis length, and we want
to change from iterative to non-iterative mode somewhere
along the sequence of isophotes. When set to `True`, this
parameter overrides the behavior associated with parameter
``maxrit``. The default is `False`.
going_inwards : bool, optional
Parameter to define the sense of SMA growth. When fitting
just one isophote, this parameter is used only by the code
that defines the details of how elliptical arc segments
("sectors") are extracted from the image, when using area
extraction modes (see the ``integrmode`` parameter). The
default is `False`.
isophote_list : list or `None`, optional
If not `None` (the default), the fitted
`~photutils.isophote.Isophote` instance is appended to this
list. It must be created and managed by the caller.
Returns
-------
result : `~photutils.isophote.Isophote` instance
The fitted isophote. The fitted isophote is also appended to
the input list input to the ``isophote_list`` parameter.
"""
geometry = self._geometry
# if available, geometry from last fitted isophote will be
# used as initial guess for next isophote.
if isophote_list is not None and len(isophote_list) > 0:
geometry = isophote_list[-1].sample.geometry
# do the fit
if noniterate or (maxrit and sma > maxrit):
isophote = self._non_iterative(sma, step, linear, geometry,
sclip, nclip, integrmode)
else:
isophote = self._iterative(sma, step, linear, geometry, sclip,
nclip, integrmode, conver, minit,
maxit, fflag, maxgerr, going_inwards)
# store result in list
if isophote_list is not None and isophote.valid:
isophote_list.append(isophote)
return isophote
|
[
"def",
"fit_isophote",
"(",
"self",
",",
"sma",
",",
"step",
"=",
"0.1",
",",
"conver",
"=",
"DEFAULT_CONVERGENCE",
",",
"minit",
"=",
"DEFAULT_MINIT",
",",
"maxit",
"=",
"DEFAULT_MAXIT",
",",
"fflag",
"=",
"DEFAULT_FFLAG",
",",
"maxgerr",
"=",
"DEFAULT_MAXGERR",
",",
"sclip",
"=",
"3.",
",",
"nclip",
"=",
"0",
",",
"integrmode",
"=",
"BILINEAR",
",",
"linear",
"=",
"False",
",",
"maxrit",
"=",
"None",
",",
"noniterate",
"=",
"False",
",",
"going_inwards",
"=",
"False",
",",
"isophote_list",
"=",
"None",
")",
":",
"geometry",
"=",
"self",
".",
"_geometry",
"# if available, geometry from last fitted isophote will be",
"# used as initial guess for next isophote.",
"if",
"isophote_list",
"is",
"not",
"None",
"and",
"len",
"(",
"isophote_list",
")",
">",
"0",
":",
"geometry",
"=",
"isophote_list",
"[",
"-",
"1",
"]",
".",
"sample",
".",
"geometry",
"# do the fit",
"if",
"noniterate",
"or",
"(",
"maxrit",
"and",
"sma",
">",
"maxrit",
")",
":",
"isophote",
"=",
"self",
".",
"_non_iterative",
"(",
"sma",
",",
"step",
",",
"linear",
",",
"geometry",
",",
"sclip",
",",
"nclip",
",",
"integrmode",
")",
"else",
":",
"isophote",
"=",
"self",
".",
"_iterative",
"(",
"sma",
",",
"step",
",",
"linear",
",",
"geometry",
",",
"sclip",
",",
"nclip",
",",
"integrmode",
",",
"conver",
",",
"minit",
",",
"maxit",
",",
"fflag",
",",
"maxgerr",
",",
"going_inwards",
")",
"# store result in list",
"if",
"isophote_list",
"is",
"not",
"None",
"and",
"isophote",
".",
"valid",
":",
"isophote_list",
".",
"append",
"(",
"isophote",
")",
"return",
"isophote"
] |
Fit a single isophote with a given semimajor axis length.
The ``step`` and ``linear`` parameters are not used to actually
grow or shrink the current fitting semimajor axis length. They
are necessary so the sampling algorithm can know where to start
the gradient computation and also how to compute the elliptical
sector areas (when area integration mode is selected).
Parameters
----------
sma : float
The semimajor axis length (pixels).
step : float, optional
The step value used to grow/shrink the semimajor axis length
(pixels if ``linear=True``, or a relative value if
``linear=False``). See the ``linear`` parameter. The
default is 0.1.
conver : float, optional
The main convergence criterion. Iterations stop when the
largest harmonic amplitude becomes smaller (in absolute
value) than ``conver`` times the harmonic fit rms. The
default is 0.05.
minit : int, optional
The minimum number of iterations to perform. A minimum of 10
(the default) iterations guarantees that, on average, 2
iterations will be available for fitting each independent
parameter (the four harmonic amplitudes and the intensity
level). For the first isophote, the minimum number of
iterations is 2 * ``minit`` to ensure that, even departing
from not-so-good initial values, the algorithm has a better
chance to converge to a sensible solution.
maxit : int, optional
The maximum number of iterations to perform. The default is
50.
fflag : float, optional
The acceptable fraction of flagged data points in the
sample. If the actual fraction of valid data points is
smaller than this, the iterations will stop and the current
`~photutils.isophote.Isophote` will be returned. Flagged
data points are points that either lie outside the image
frame, are masked, or were rejected by sigma-clipping. The
default is 0.7.
maxgerr : float, optional
The maximum acceptable relative error in the local radial
intensity gradient. When fitting a single isophote by
itself this paramter doesn't have any effect on the outcome.
sclip : float, optional
The sigma-clip sigma value. The default is 3.0.
nclip : int, optional
The number of sigma-clip interations. The default is 0,
which means sigma-clipping is skipped.
integrmode : {'bilinear', 'nearest_neighbor', 'mean', 'median'}, optional
The area integration mode. The default is 'bilinear'.
linear : bool, optional
The semimajor axis growing/shrinking mode. When fitting
just one isophote, this parameter is used only by the code
that define the details of how elliptical arc segments
("sectors") are extracted from the image when using area
extraction modes (see the ``integrmode`` parameter).
maxrit : float or `None`, optional
The maximum value of semimajor axis to perform an actual
fit. Whenever the current semimajor axis length is larger
than ``maxrit``, the isophotes will be extracted using the
current geometry, without being fitted. This non-iterative
mode may be useful for sampling regions of very low surface
brightness, where the algorithm may become unstable and
unable to recover reliable geometry information.
Non-iterative mode can also be entered automatically
whenever the ellipticity exceeds 1.0 or the ellipse center
crosses the image boundaries. If `None` (default), then no
maximum value is used.
noniterate : bool, optional
Whether the fitting algorithm should be bypassed and an
isophote should be extracted with the geometry taken
directly from the most recent `~photutils.isophote.Isophote`
instance stored in the ``isophote_list`` parameter. This
parameter is mainly used when running the method in a loop
over different values of semimajor axis length, and we want
to change from iterative to non-iterative mode somewhere
along the sequence of isophotes. When set to `True`, this
parameter overrides the behavior associated with parameter
``maxrit``. The default is `False`.
going_inwards : bool, optional
Parameter to define the sense of SMA growth. When fitting
just one isophote, this parameter is used only by the code
that defines the details of how elliptical arc segments
("sectors") are extracted from the image, when using area
extraction modes (see the ``integrmode`` parameter). The
default is `False`.
isophote_list : list or `None`, optional
If not `None` (the default), the fitted
`~photutils.isophote.Isophote` instance is appended to this
list. It must be created and managed by the caller.
Returns
-------
result : `~photutils.isophote.Isophote` instance
The fitted isophote. The fitted isophote is also appended to
the input list input to the ``isophote_list`` parameter.
|
[
"Fit",
"a",
"single",
"isophote",
"with",
"a",
"given",
"semimajor",
"axis",
"length",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/isophote/ellipse.py#L451-L579
|
10,615
|
astropy/photutils
|
photutils/aperture/circle.py
|
CircularAperture.to_sky
|
def to_sky(self, wcs, mode='all'):
"""
Convert the aperture to a `SkyCircularAperture` object defined
in celestial coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `SkyCircularAperture` object
A `SkyCircularAperture` object.
"""
sky_params = self._to_sky_params(wcs, mode=mode)
return SkyCircularAperture(**sky_params)
|
python
|
def to_sky(self, wcs, mode='all'):
"""
Convert the aperture to a `SkyCircularAperture` object defined
in celestial coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `SkyCircularAperture` object
A `SkyCircularAperture` object.
"""
sky_params = self._to_sky_params(wcs, mode=mode)
return SkyCircularAperture(**sky_params)
|
[
"def",
"to_sky",
"(",
"self",
",",
"wcs",
",",
"mode",
"=",
"'all'",
")",
":",
"sky_params",
"=",
"self",
".",
"_to_sky_params",
"(",
"wcs",
",",
"mode",
"=",
"mode",
")",
"return",
"SkyCircularAperture",
"(",
"*",
"*",
"sky_params",
")"
] |
Convert the aperture to a `SkyCircularAperture` object defined
in celestial coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `SkyCircularAperture` object
A `SkyCircularAperture` object.
|
[
"Convert",
"the",
"aperture",
"to",
"a",
"SkyCircularAperture",
"object",
"defined",
"in",
"celestial",
"coordinates",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/aperture/circle.py#L157-L179
|
10,616
|
astropy/photutils
|
photutils/aperture/circle.py
|
CircularAnnulus.to_sky
|
def to_sky(self, wcs, mode='all'):
"""
Convert the aperture to a `SkyCircularAnnulus` object defined
in celestial coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `SkyCircularAnnulus` object
A `SkyCircularAnnulus` object.
"""
sky_params = self._to_sky_params(wcs, mode=mode)
return SkyCircularAnnulus(**sky_params)
|
python
|
def to_sky(self, wcs, mode='all'):
"""
Convert the aperture to a `SkyCircularAnnulus` object defined
in celestial coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `SkyCircularAnnulus` object
A `SkyCircularAnnulus` object.
"""
sky_params = self._to_sky_params(wcs, mode=mode)
return SkyCircularAnnulus(**sky_params)
|
[
"def",
"to_sky",
"(",
"self",
",",
"wcs",
",",
"mode",
"=",
"'all'",
")",
":",
"sky_params",
"=",
"self",
".",
"_to_sky_params",
"(",
"wcs",
",",
"mode",
"=",
"mode",
")",
"return",
"SkyCircularAnnulus",
"(",
"*",
"*",
"sky_params",
")"
] |
Convert the aperture to a `SkyCircularAnnulus` object defined
in celestial coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `SkyCircularAnnulus` object
A `SkyCircularAnnulus` object.
|
[
"Convert",
"the",
"aperture",
"to",
"a",
"SkyCircularAnnulus",
"object",
"defined",
"in",
"celestial",
"coordinates",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/aperture/circle.py#L254-L276
|
10,617
|
astropy/photutils
|
photutils/aperture/circle.py
|
SkyCircularAperture.to_pixel
|
def to_pixel(self, wcs, mode='all'):
"""
Convert the aperture to a `CircularAperture` object defined in
pixel coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `CircularAperture` object
A `CircularAperture` object.
"""
pixel_params = self._to_pixel_params(wcs, mode=mode)
return CircularAperture(**pixel_params)
|
python
|
def to_pixel(self, wcs, mode='all'):
"""
Convert the aperture to a `CircularAperture` object defined in
pixel coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `CircularAperture` object
A `CircularAperture` object.
"""
pixel_params = self._to_pixel_params(wcs, mode=mode)
return CircularAperture(**pixel_params)
|
[
"def",
"to_pixel",
"(",
"self",
",",
"wcs",
",",
"mode",
"=",
"'all'",
")",
":",
"pixel_params",
"=",
"self",
".",
"_to_pixel_params",
"(",
"wcs",
",",
"mode",
"=",
"mode",
")",
"return",
"CircularAperture",
"(",
"*",
"*",
"pixel_params",
")"
] |
Convert the aperture to a `CircularAperture` object defined in
pixel coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `CircularAperture` object
A `CircularAperture` object.
|
[
"Convert",
"the",
"aperture",
"to",
"a",
"CircularAperture",
"object",
"defined",
"in",
"pixel",
"coordinates",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/aperture/circle.py#L303-L325
|
10,618
|
astropy/photutils
|
photutils/aperture/circle.py
|
SkyCircularAnnulus.to_pixel
|
def to_pixel(self, wcs, mode='all'):
"""
Convert the aperture to a `CircularAnnulus` object defined in
pixel coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `CircularAnnulus` object
A `CircularAnnulus` object.
"""
pixel_params = self._to_pixel_params(wcs, mode=mode)
return CircularAnnulus(**pixel_params)
|
python
|
def to_pixel(self, wcs, mode='all'):
"""
Convert the aperture to a `CircularAnnulus` object defined in
pixel coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `CircularAnnulus` object
A `CircularAnnulus` object.
"""
pixel_params = self._to_pixel_params(wcs, mode=mode)
return CircularAnnulus(**pixel_params)
|
[
"def",
"to_pixel",
"(",
"self",
",",
"wcs",
",",
"mode",
"=",
"'all'",
")",
":",
"pixel_params",
"=",
"self",
".",
"_to_pixel_params",
"(",
"wcs",
",",
"mode",
"=",
"mode",
")",
"return",
"CircularAnnulus",
"(",
"*",
"*",
"pixel_params",
")"
] |
Convert the aperture to a `CircularAnnulus` object defined in
pixel coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `CircularAnnulus` object
A `CircularAnnulus` object.
|
[
"Convert",
"the",
"aperture",
"to",
"a",
"CircularAnnulus",
"object",
"defined",
"in",
"pixel",
"coordinates",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/aperture/circle.py#L364-L386
|
10,619
|
astropy/photutils
|
photutils/datasets/make.py
|
apply_poisson_noise
|
def apply_poisson_noise(data, random_state=None):
"""
Apply Poisson noise to an array, where the value of each element in
the input array represents the expected number of counts.
Each pixel in the output array is generated by drawing a random
sample from a Poisson distribution whose expectation value is given
by the pixel value in the input array.
Parameters
----------
data : array-like
The array on which to apply Poisson noise. Every pixel in the
array must have a positive value (i.e. counts).
random_state : int or `~numpy.random.RandomState`, optional
Pseudo-random number generator state used for random sampling.
Returns
-------
result : `~numpy.ndarray`
The data array after applying Poisson noise.
See Also
--------
make_noise_image
Examples
--------
.. plot::
:include-source:
from photutils.datasets import make_4gaussians_image
from photutils.datasets import apply_poisson_noise
data1 = make_4gaussians_image(noise=False)
data2 = apply_poisson_noise(data1, random_state=12345)
# plot the images
import matplotlib.pyplot as plt
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8))
ax1.imshow(data1, origin='lower', interpolation='nearest')
ax1.set_title('Original image')
ax2.imshow(data2, origin='lower', interpolation='nearest')
ax2.set_title('Original image with Poisson noise applied')
"""
data = np.asanyarray(data)
if np.any(data < 0):
raise ValueError('data must not contain any negative values')
prng = check_random_state(random_state)
return prng.poisson(data)
|
python
|
def apply_poisson_noise(data, random_state=None):
"""
Apply Poisson noise to an array, where the value of each element in
the input array represents the expected number of counts.
Each pixel in the output array is generated by drawing a random
sample from a Poisson distribution whose expectation value is given
by the pixel value in the input array.
Parameters
----------
data : array-like
The array on which to apply Poisson noise. Every pixel in the
array must have a positive value (i.e. counts).
random_state : int or `~numpy.random.RandomState`, optional
Pseudo-random number generator state used for random sampling.
Returns
-------
result : `~numpy.ndarray`
The data array after applying Poisson noise.
See Also
--------
make_noise_image
Examples
--------
.. plot::
:include-source:
from photutils.datasets import make_4gaussians_image
from photutils.datasets import apply_poisson_noise
data1 = make_4gaussians_image(noise=False)
data2 = apply_poisson_noise(data1, random_state=12345)
# plot the images
import matplotlib.pyplot as plt
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8))
ax1.imshow(data1, origin='lower', interpolation='nearest')
ax1.set_title('Original image')
ax2.imshow(data2, origin='lower', interpolation='nearest')
ax2.set_title('Original image with Poisson noise applied')
"""
data = np.asanyarray(data)
if np.any(data < 0):
raise ValueError('data must not contain any negative values')
prng = check_random_state(random_state)
return prng.poisson(data)
|
[
"def",
"apply_poisson_noise",
"(",
"data",
",",
"random_state",
"=",
"None",
")",
":",
"data",
"=",
"np",
".",
"asanyarray",
"(",
"data",
")",
"if",
"np",
".",
"any",
"(",
"data",
"<",
"0",
")",
":",
"raise",
"ValueError",
"(",
"'data must not contain any negative values'",
")",
"prng",
"=",
"check_random_state",
"(",
"random_state",
")",
"return",
"prng",
".",
"poisson",
"(",
"data",
")"
] |
Apply Poisson noise to an array, where the value of each element in
the input array represents the expected number of counts.
Each pixel in the output array is generated by drawing a random
sample from a Poisson distribution whose expectation value is given
by the pixel value in the input array.
Parameters
----------
data : array-like
The array on which to apply Poisson noise. Every pixel in the
array must have a positive value (i.e. counts).
random_state : int or `~numpy.random.RandomState`, optional
Pseudo-random number generator state used for random sampling.
Returns
-------
result : `~numpy.ndarray`
The data array after applying Poisson noise.
See Also
--------
make_noise_image
Examples
--------
.. plot::
:include-source:
from photutils.datasets import make_4gaussians_image
from photutils.datasets import apply_poisson_noise
data1 = make_4gaussians_image(noise=False)
data2 = apply_poisson_noise(data1, random_state=12345)
# plot the images
import matplotlib.pyplot as plt
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8))
ax1.imshow(data1, origin='lower', interpolation='nearest')
ax1.set_title('Original image')
ax2.imshow(data2, origin='lower', interpolation='nearest')
ax2.set_title('Original image with Poisson noise applied')
|
[
"Apply",
"Poisson",
"noise",
"to",
"an",
"array",
"where",
"the",
"value",
"of",
"each",
"element",
"in",
"the",
"input",
"array",
"represents",
"the",
"expected",
"number",
"of",
"counts",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/datasets/make.py#L26-L78
|
10,620
|
astropy/photutils
|
photutils/datasets/make.py
|
make_noise_image
|
def make_noise_image(shape, type='gaussian', mean=None, stddev=None,
random_state=None):
"""
Make a noise image containing Gaussian or Poisson noise.
Parameters
----------
shape : 2-tuple of int
The shape of the output 2D image.
type : {'gaussian', 'poisson'}
The distribution used to generate the random noise:
* ``'gaussian'``: Gaussian distributed noise.
* ``'poisson'``: Poisson distributed noise.
mean : float
The mean of the random distribution. Required for both Gaussian
and Poisson noise. The default is 0.
stddev : float, optional
The standard deviation of the Gaussian noise to add to the
output image. Required for Gaussian noise and ignored for
Poisson noise (the variance of the Poisson distribution is equal
to its mean).
random_state : int or `~numpy.random.RandomState`, optional
Pseudo-random number generator state used for random sampling.
Separate function calls with the same noise parameters and
``random_state`` will generate the identical noise image.
Returns
-------
image : 2D `~numpy.ndarray`
Image containing random noise.
See Also
--------
apply_poisson_noise
Examples
--------
.. plot::
:include-source:
# make Gaussian and Poisson noise images
from photutils.datasets import make_noise_image
shape = (100, 100)
image1 = make_noise_image(shape, type='gaussian', mean=0., stddev=5.)
image2 = make_noise_image(shape, type='poisson', mean=5.)
# plot the images
import matplotlib.pyplot as plt
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4))
ax1.imshow(image1, origin='lower', interpolation='nearest')
ax1.set_title('Gaussian noise ($\\mu=0$, $\\sigma=5.$)')
ax2.imshow(image2, origin='lower', interpolation='nearest')
ax2.set_title('Poisson noise ($\\mu=5$)')
"""
if mean is None:
raise ValueError('"mean" must be input')
prng = check_random_state(random_state)
if type == 'gaussian':
if stddev is None:
raise ValueError('"stddev" must be input for Gaussian noise')
image = prng.normal(loc=mean, scale=stddev, size=shape)
elif type == 'poisson':
image = prng.poisson(lam=mean, size=shape)
else:
raise ValueError('Invalid type: {0}. Use one of '
'{"gaussian", "poisson"}.'.format(type))
return image
|
python
|
def make_noise_image(shape, type='gaussian', mean=None, stddev=None,
random_state=None):
"""
Make a noise image containing Gaussian or Poisson noise.
Parameters
----------
shape : 2-tuple of int
The shape of the output 2D image.
type : {'gaussian', 'poisson'}
The distribution used to generate the random noise:
* ``'gaussian'``: Gaussian distributed noise.
* ``'poisson'``: Poisson distributed noise.
mean : float
The mean of the random distribution. Required for both Gaussian
and Poisson noise. The default is 0.
stddev : float, optional
The standard deviation of the Gaussian noise to add to the
output image. Required for Gaussian noise and ignored for
Poisson noise (the variance of the Poisson distribution is equal
to its mean).
random_state : int or `~numpy.random.RandomState`, optional
Pseudo-random number generator state used for random sampling.
Separate function calls with the same noise parameters and
``random_state`` will generate the identical noise image.
Returns
-------
image : 2D `~numpy.ndarray`
Image containing random noise.
See Also
--------
apply_poisson_noise
Examples
--------
.. plot::
:include-source:
# make Gaussian and Poisson noise images
from photutils.datasets import make_noise_image
shape = (100, 100)
image1 = make_noise_image(shape, type='gaussian', mean=0., stddev=5.)
image2 = make_noise_image(shape, type='poisson', mean=5.)
# plot the images
import matplotlib.pyplot as plt
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4))
ax1.imshow(image1, origin='lower', interpolation='nearest')
ax1.set_title('Gaussian noise ($\\mu=0$, $\\sigma=5.$)')
ax2.imshow(image2, origin='lower', interpolation='nearest')
ax2.set_title('Poisson noise ($\\mu=5$)')
"""
if mean is None:
raise ValueError('"mean" must be input')
prng = check_random_state(random_state)
if type == 'gaussian':
if stddev is None:
raise ValueError('"stddev" must be input for Gaussian noise')
image = prng.normal(loc=mean, scale=stddev, size=shape)
elif type == 'poisson':
image = prng.poisson(lam=mean, size=shape)
else:
raise ValueError('Invalid type: {0}. Use one of '
'{"gaussian", "poisson"}.'.format(type))
return image
|
[
"def",
"make_noise_image",
"(",
"shape",
",",
"type",
"=",
"'gaussian'",
",",
"mean",
"=",
"None",
",",
"stddev",
"=",
"None",
",",
"random_state",
"=",
"None",
")",
":",
"if",
"mean",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'\"mean\" must be input'",
")",
"prng",
"=",
"check_random_state",
"(",
"random_state",
")",
"if",
"type",
"==",
"'gaussian'",
":",
"if",
"stddev",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'\"stddev\" must be input for Gaussian noise'",
")",
"image",
"=",
"prng",
".",
"normal",
"(",
"loc",
"=",
"mean",
",",
"scale",
"=",
"stddev",
",",
"size",
"=",
"shape",
")",
"elif",
"type",
"==",
"'poisson'",
":",
"image",
"=",
"prng",
".",
"poisson",
"(",
"lam",
"=",
"mean",
",",
"size",
"=",
"shape",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Invalid type: {0}. Use one of '",
"'{\"gaussian\", \"poisson\"}.'",
".",
"format",
"(",
"type",
")",
")",
"return",
"image"
] |
Make a noise image containing Gaussian or Poisson noise.
Parameters
----------
shape : 2-tuple of int
The shape of the output 2D image.
type : {'gaussian', 'poisson'}
The distribution used to generate the random noise:
* ``'gaussian'``: Gaussian distributed noise.
* ``'poisson'``: Poisson distributed noise.
mean : float
The mean of the random distribution. Required for both Gaussian
and Poisson noise. The default is 0.
stddev : float, optional
The standard deviation of the Gaussian noise to add to the
output image. Required for Gaussian noise and ignored for
Poisson noise (the variance of the Poisson distribution is equal
to its mean).
random_state : int or `~numpy.random.RandomState`, optional
Pseudo-random number generator state used for random sampling.
Separate function calls with the same noise parameters and
``random_state`` will generate the identical noise image.
Returns
-------
image : 2D `~numpy.ndarray`
Image containing random noise.
See Also
--------
apply_poisson_noise
Examples
--------
.. plot::
:include-source:
# make Gaussian and Poisson noise images
from photutils.datasets import make_noise_image
shape = (100, 100)
image1 = make_noise_image(shape, type='gaussian', mean=0., stddev=5.)
image2 = make_noise_image(shape, type='poisson', mean=5.)
# plot the images
import matplotlib.pyplot as plt
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4))
ax1.imshow(image1, origin='lower', interpolation='nearest')
ax1.set_title('Gaussian noise ($\\mu=0$, $\\sigma=5.$)')
ax2.imshow(image2, origin='lower', interpolation='nearest')
ax2.set_title('Poisson noise ($\\mu=5$)')
|
[
"Make",
"a",
"noise",
"image",
"containing",
"Gaussian",
"or",
"Poisson",
"noise",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/datasets/make.py#L81-L156
|
10,621
|
astropy/photutils
|
photutils/datasets/make.py
|
make_random_models_table
|
def make_random_models_table(n_sources, param_ranges, random_state=None):
"""
Make a `~astropy.table.Table` containing randomly generated
parameters for an Astropy model to simulate a set of sources.
Each row of the table corresponds to a source whose parameters are
defined by the column names. The parameters are drawn from a
uniform distribution over the specified input ranges.
The output table can be input into :func:`make_model_sources_image`
to create an image containing the model sources.
Parameters
----------
n_sources : float
The number of random model sources to generate.
param_ranges : dict
The lower and upper boundaries for each of the model parameters
as a `dict` mapping the parameter name to its ``(lower, upper)``
bounds.
random_state : int or `~numpy.random.RandomState`, optional
Pseudo-random number generator state used for random sampling.
Returns
-------
table : `~astropy.table.Table`
A table of parameters for the randomly generated sources. Each
row of the table corresponds to a source whose model parameters
are defined by the column names. The column names will be the
keys of the dictionary ``param_ranges``.
See Also
--------
make_random_gaussians_table, make_model_sources_image
Notes
-----
To generate identical parameter values from separate function calls,
``param_ranges`` must be input as an `~collections.OrderedDict` with
the same parameter ranges and ``random_state`` must be the same.
Examples
--------
>>> from collections import OrderedDict
>>> from photutils.datasets import make_random_models_table
>>> n_sources = 5
>>> param_ranges = [('amplitude', [500, 1000]),
... ('x_mean', [0, 500]),
... ('y_mean', [0, 300]),
... ('x_stddev', [1, 5]),
... ('y_stddev', [1, 5]),
... ('theta', [0, np.pi])]
>>> param_ranges = OrderedDict(param_ranges)
>>> sources = make_random_models_table(n_sources, param_ranges,
... random_state=12345)
>>> for col in sources.colnames:
... sources[col].info.format = '%.8g' # for consistent table output
>>> print(sources)
amplitude x_mean y_mean x_stddev y_stddev theta
--------- --------- --------- --------- --------- ----------
964.80805 297.77235 224.31444 3.6256447 3.5699013 2.2923859
658.18778 482.25726 288.39202 4.2392502 3.8698145 3.1227889
591.95941 326.58855 2.5164894 4.4887037 2.870396 2.1264615
602.28014 374.45332 31.933313 4.8585904 2.3023387 2.4844422
783.86251 326.78494 89.611114 3.8947414 2.7585784 0.53694298
"""
prng = check_random_state(random_state)
sources = Table()
for param_name, (lower, upper) in param_ranges.items():
# Generate a column for every item in param_ranges, even if it
# is not in the model (e.g. flux). However, such columns will
# be ignored when rendering the image.
sources[param_name] = prng.uniform(lower, upper, n_sources)
return sources
|
python
|
def make_random_models_table(n_sources, param_ranges, random_state=None):
"""
Make a `~astropy.table.Table` containing randomly generated
parameters for an Astropy model to simulate a set of sources.
Each row of the table corresponds to a source whose parameters are
defined by the column names. The parameters are drawn from a
uniform distribution over the specified input ranges.
The output table can be input into :func:`make_model_sources_image`
to create an image containing the model sources.
Parameters
----------
n_sources : float
The number of random model sources to generate.
param_ranges : dict
The lower and upper boundaries for each of the model parameters
as a `dict` mapping the parameter name to its ``(lower, upper)``
bounds.
random_state : int or `~numpy.random.RandomState`, optional
Pseudo-random number generator state used for random sampling.
Returns
-------
table : `~astropy.table.Table`
A table of parameters for the randomly generated sources. Each
row of the table corresponds to a source whose model parameters
are defined by the column names. The column names will be the
keys of the dictionary ``param_ranges``.
See Also
--------
make_random_gaussians_table, make_model_sources_image
Notes
-----
To generate identical parameter values from separate function calls,
``param_ranges`` must be input as an `~collections.OrderedDict` with
the same parameter ranges and ``random_state`` must be the same.
Examples
--------
>>> from collections import OrderedDict
>>> from photutils.datasets import make_random_models_table
>>> n_sources = 5
>>> param_ranges = [('amplitude', [500, 1000]),
... ('x_mean', [0, 500]),
... ('y_mean', [0, 300]),
... ('x_stddev', [1, 5]),
... ('y_stddev', [1, 5]),
... ('theta', [0, np.pi])]
>>> param_ranges = OrderedDict(param_ranges)
>>> sources = make_random_models_table(n_sources, param_ranges,
... random_state=12345)
>>> for col in sources.colnames:
... sources[col].info.format = '%.8g' # for consistent table output
>>> print(sources)
amplitude x_mean y_mean x_stddev y_stddev theta
--------- --------- --------- --------- --------- ----------
964.80805 297.77235 224.31444 3.6256447 3.5699013 2.2923859
658.18778 482.25726 288.39202 4.2392502 3.8698145 3.1227889
591.95941 326.58855 2.5164894 4.4887037 2.870396 2.1264615
602.28014 374.45332 31.933313 4.8585904 2.3023387 2.4844422
783.86251 326.78494 89.611114 3.8947414 2.7585784 0.53694298
"""
prng = check_random_state(random_state)
sources = Table()
for param_name, (lower, upper) in param_ranges.items():
# Generate a column for every item in param_ranges, even if it
# is not in the model (e.g. flux). However, such columns will
# be ignored when rendering the image.
sources[param_name] = prng.uniform(lower, upper, n_sources)
return sources
|
[
"def",
"make_random_models_table",
"(",
"n_sources",
",",
"param_ranges",
",",
"random_state",
"=",
"None",
")",
":",
"prng",
"=",
"check_random_state",
"(",
"random_state",
")",
"sources",
"=",
"Table",
"(",
")",
"for",
"param_name",
",",
"(",
"lower",
",",
"upper",
")",
"in",
"param_ranges",
".",
"items",
"(",
")",
":",
"# Generate a column for every item in param_ranges, even if it",
"# is not in the model (e.g. flux). However, such columns will",
"# be ignored when rendering the image.",
"sources",
"[",
"param_name",
"]",
"=",
"prng",
".",
"uniform",
"(",
"lower",
",",
"upper",
",",
"n_sources",
")",
"return",
"sources"
] |
Make a `~astropy.table.Table` containing randomly generated
parameters for an Astropy model to simulate a set of sources.
Each row of the table corresponds to a source whose parameters are
defined by the column names. The parameters are drawn from a
uniform distribution over the specified input ranges.
The output table can be input into :func:`make_model_sources_image`
to create an image containing the model sources.
Parameters
----------
n_sources : float
The number of random model sources to generate.
param_ranges : dict
The lower and upper boundaries for each of the model parameters
as a `dict` mapping the parameter name to its ``(lower, upper)``
bounds.
random_state : int or `~numpy.random.RandomState`, optional
Pseudo-random number generator state used for random sampling.
Returns
-------
table : `~astropy.table.Table`
A table of parameters for the randomly generated sources. Each
row of the table corresponds to a source whose model parameters
are defined by the column names. The column names will be the
keys of the dictionary ``param_ranges``.
See Also
--------
make_random_gaussians_table, make_model_sources_image
Notes
-----
To generate identical parameter values from separate function calls,
``param_ranges`` must be input as an `~collections.OrderedDict` with
the same parameter ranges and ``random_state`` must be the same.
Examples
--------
>>> from collections import OrderedDict
>>> from photutils.datasets import make_random_models_table
>>> n_sources = 5
>>> param_ranges = [('amplitude', [500, 1000]),
... ('x_mean', [0, 500]),
... ('y_mean', [0, 300]),
... ('x_stddev', [1, 5]),
... ('y_stddev', [1, 5]),
... ('theta', [0, np.pi])]
>>> param_ranges = OrderedDict(param_ranges)
>>> sources = make_random_models_table(n_sources, param_ranges,
... random_state=12345)
>>> for col in sources.colnames:
... sources[col].info.format = '%.8g' # for consistent table output
>>> print(sources)
amplitude x_mean y_mean x_stddev y_stddev theta
--------- --------- --------- --------- --------- ----------
964.80805 297.77235 224.31444 3.6256447 3.5699013 2.2923859
658.18778 482.25726 288.39202 4.2392502 3.8698145 3.1227889
591.95941 326.58855 2.5164894 4.4887037 2.870396 2.1264615
602.28014 374.45332 31.933313 4.8585904 2.3023387 2.4844422
783.86251 326.78494 89.611114 3.8947414 2.7585784 0.53694298
|
[
"Make",
"a",
"~astropy",
".",
"table",
".",
"Table",
"containing",
"randomly",
"generated",
"parameters",
"for",
"an",
"Astropy",
"model",
"to",
"simulate",
"a",
"set",
"of",
"sources",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/datasets/make.py#L159-L237
|
10,622
|
astropy/photutils
|
photutils/datasets/make.py
|
make_random_gaussians_table
|
def make_random_gaussians_table(n_sources, param_ranges, random_state=None):
"""
Make a `~astropy.table.Table` containing randomly generated
parameters for 2D Gaussian sources.
Each row of the table corresponds to a Gaussian source whose
parameters are defined by the column names. The parameters are
drawn from a uniform distribution over the specified input ranges.
The output table can be input into
:func:`make_gaussian_sources_image` to create an image containing
the 2D Gaussian sources.
Parameters
----------
n_sources : float
The number of random Gaussian sources to generate.
param_ranges : dict
The lower and upper boundaries for each of the
`~astropy.modeling.functional_models.Gaussian2D` parameters as a
`dict` mapping the parameter name to its ``(lower, upper)``
bounds. The dictionary keys must be valid
`~astropy.modeling.functional_models.Gaussian2D` parameter names
or ``'flux'``. If ``'flux'`` is specified, but not
``'amplitude'`` then the 2D Gaussian amplitudes will be
calculated and placed in the output table. If both ``'flux'``
and ``'amplitude'`` are specified, then ``'flux'`` will be
ignored. Model parameters not defined in ``param_ranges`` will
be set to the default value.
random_state : int or `~numpy.random.RandomState`, optional
Pseudo-random number generator state used for random sampling.
Returns
-------
table : `~astropy.table.Table`
A table of parameters for the randomly generated Gaussian
sources. Each row of the table corresponds to a Gaussian source
whose parameters are defined by the column names.
See Also
--------
make_random_models_table, make_gaussian_sources_image
Notes
-----
To generate identical parameter values from separate function calls,
``param_ranges`` must be input as an `~collections.OrderedDict` with
the same parameter ranges and ``random_state`` must be the same.
Examples
--------
>>> from collections import OrderedDict
>>> from photutils.datasets import make_random_gaussians_table
>>> n_sources = 5
>>> param_ranges = [('amplitude', [500, 1000]),
... ('x_mean', [0, 500]),
... ('y_mean', [0, 300]),
... ('x_stddev', [1, 5]),
... ('y_stddev', [1, 5]),
... ('theta', [0, np.pi])]
>>> param_ranges = OrderedDict(param_ranges)
>>> sources = make_random_gaussians_table(n_sources, param_ranges,
... random_state=12345)
>>> for col in sources.colnames:
... sources[col].info.format = '%.8g' # for consistent table output
>>> print(sources)
amplitude x_mean y_mean x_stddev y_stddev theta
--------- --------- --------- --------- --------- ----------
964.80805 297.77235 224.31444 3.6256447 3.5699013 2.2923859
658.18778 482.25726 288.39202 4.2392502 3.8698145 3.1227889
591.95941 326.58855 2.5164894 4.4887037 2.870396 2.1264615
602.28014 374.45332 31.933313 4.8585904 2.3023387 2.4844422
783.86251 326.78494 89.611114 3.8947414 2.7585784 0.53694298
To specifying the flux range instead of the amplitude range:
>>> param_ranges = [('flux', [500, 1000]),
... ('x_mean', [0, 500]),
... ('y_mean', [0, 300]),
... ('x_stddev', [1, 5]),
... ('y_stddev', [1, 5]),
... ('theta', [0, np.pi])]
>>> param_ranges = OrderedDict(param_ranges)
>>> sources = make_random_gaussians_table(n_sources, param_ranges,
... random_state=12345)
>>> for col in sources.colnames:
... sources[col].info.format = '%.8g' # for consistent table output
>>> print(sources)
flux x_mean y_mean x_stddev y_stddev theta amplitude
--------- --------- --------- --------- --------- ---------- ---------
964.80805 297.77235 224.31444 3.6256447 3.5699013 2.2923859 11.863685
658.18778 482.25726 288.39202 4.2392502 3.8698145 3.1227889 6.3854388
591.95941 326.58855 2.5164894 4.4887037 2.870396 2.1264615 7.3122209
602.28014 374.45332 31.933313 4.8585904 2.3023387 2.4844422 8.5691781
783.86251 326.78494 89.611114 3.8947414 2.7585784 0.53694298 11.611707
Note that in this case the output table contains both a flux and
amplitude column. The flux column will be ignored when generating
an image of the models using :func:`make_gaussian_sources_image`.
"""
sources = make_random_models_table(n_sources, param_ranges,
random_state=random_state)
# convert Gaussian2D flux to amplitude
if 'flux' in param_ranges and 'amplitude' not in param_ranges:
model = Gaussian2D(x_stddev=1, y_stddev=1)
if 'x_stddev' in sources.colnames:
xstd = sources['x_stddev']
else:
xstd = model.x_stddev.value # default
if 'y_stddev' in sources.colnames:
ystd = sources['y_stddev']
else:
ystd = model.y_stddev.value # default
sources = sources.copy()
sources['amplitude'] = sources['flux'] / (2. * np.pi * xstd * ystd)
return sources
|
python
|
def make_random_gaussians_table(n_sources, param_ranges, random_state=None):
"""
Make a `~astropy.table.Table` containing randomly generated
parameters for 2D Gaussian sources.
Each row of the table corresponds to a Gaussian source whose
parameters are defined by the column names. The parameters are
drawn from a uniform distribution over the specified input ranges.
The output table can be input into
:func:`make_gaussian_sources_image` to create an image containing
the 2D Gaussian sources.
Parameters
----------
n_sources : float
The number of random Gaussian sources to generate.
param_ranges : dict
The lower and upper boundaries for each of the
`~astropy.modeling.functional_models.Gaussian2D` parameters as a
`dict` mapping the parameter name to its ``(lower, upper)``
bounds. The dictionary keys must be valid
`~astropy.modeling.functional_models.Gaussian2D` parameter names
or ``'flux'``. If ``'flux'`` is specified, but not
``'amplitude'`` then the 2D Gaussian amplitudes will be
calculated and placed in the output table. If both ``'flux'``
and ``'amplitude'`` are specified, then ``'flux'`` will be
ignored. Model parameters not defined in ``param_ranges`` will
be set to the default value.
random_state : int or `~numpy.random.RandomState`, optional
Pseudo-random number generator state used for random sampling.
Returns
-------
table : `~astropy.table.Table`
A table of parameters for the randomly generated Gaussian
sources. Each row of the table corresponds to a Gaussian source
whose parameters are defined by the column names.
See Also
--------
make_random_models_table, make_gaussian_sources_image
Notes
-----
To generate identical parameter values from separate function calls,
``param_ranges`` must be input as an `~collections.OrderedDict` with
the same parameter ranges and ``random_state`` must be the same.
Examples
--------
>>> from collections import OrderedDict
>>> from photutils.datasets import make_random_gaussians_table
>>> n_sources = 5
>>> param_ranges = [('amplitude', [500, 1000]),
... ('x_mean', [0, 500]),
... ('y_mean', [0, 300]),
... ('x_stddev', [1, 5]),
... ('y_stddev', [1, 5]),
... ('theta', [0, np.pi])]
>>> param_ranges = OrderedDict(param_ranges)
>>> sources = make_random_gaussians_table(n_sources, param_ranges,
... random_state=12345)
>>> for col in sources.colnames:
... sources[col].info.format = '%.8g' # for consistent table output
>>> print(sources)
amplitude x_mean y_mean x_stddev y_stddev theta
--------- --------- --------- --------- --------- ----------
964.80805 297.77235 224.31444 3.6256447 3.5699013 2.2923859
658.18778 482.25726 288.39202 4.2392502 3.8698145 3.1227889
591.95941 326.58855 2.5164894 4.4887037 2.870396 2.1264615
602.28014 374.45332 31.933313 4.8585904 2.3023387 2.4844422
783.86251 326.78494 89.611114 3.8947414 2.7585784 0.53694298
To specifying the flux range instead of the amplitude range:
>>> param_ranges = [('flux', [500, 1000]),
... ('x_mean', [0, 500]),
... ('y_mean', [0, 300]),
... ('x_stddev', [1, 5]),
... ('y_stddev', [1, 5]),
... ('theta', [0, np.pi])]
>>> param_ranges = OrderedDict(param_ranges)
>>> sources = make_random_gaussians_table(n_sources, param_ranges,
... random_state=12345)
>>> for col in sources.colnames:
... sources[col].info.format = '%.8g' # for consistent table output
>>> print(sources)
flux x_mean y_mean x_stddev y_stddev theta amplitude
--------- --------- --------- --------- --------- ---------- ---------
964.80805 297.77235 224.31444 3.6256447 3.5699013 2.2923859 11.863685
658.18778 482.25726 288.39202 4.2392502 3.8698145 3.1227889 6.3854388
591.95941 326.58855 2.5164894 4.4887037 2.870396 2.1264615 7.3122209
602.28014 374.45332 31.933313 4.8585904 2.3023387 2.4844422 8.5691781
783.86251 326.78494 89.611114 3.8947414 2.7585784 0.53694298 11.611707
Note that in this case the output table contains both a flux and
amplitude column. The flux column will be ignored when generating
an image of the models using :func:`make_gaussian_sources_image`.
"""
sources = make_random_models_table(n_sources, param_ranges,
random_state=random_state)
# convert Gaussian2D flux to amplitude
if 'flux' in param_ranges and 'amplitude' not in param_ranges:
model = Gaussian2D(x_stddev=1, y_stddev=1)
if 'x_stddev' in sources.colnames:
xstd = sources['x_stddev']
else:
xstd = model.x_stddev.value # default
if 'y_stddev' in sources.colnames:
ystd = sources['y_stddev']
else:
ystd = model.y_stddev.value # default
sources = sources.copy()
sources['amplitude'] = sources['flux'] / (2. * np.pi * xstd * ystd)
return sources
|
[
"def",
"make_random_gaussians_table",
"(",
"n_sources",
",",
"param_ranges",
",",
"random_state",
"=",
"None",
")",
":",
"sources",
"=",
"make_random_models_table",
"(",
"n_sources",
",",
"param_ranges",
",",
"random_state",
"=",
"random_state",
")",
"# convert Gaussian2D flux to amplitude",
"if",
"'flux'",
"in",
"param_ranges",
"and",
"'amplitude'",
"not",
"in",
"param_ranges",
":",
"model",
"=",
"Gaussian2D",
"(",
"x_stddev",
"=",
"1",
",",
"y_stddev",
"=",
"1",
")",
"if",
"'x_stddev'",
"in",
"sources",
".",
"colnames",
":",
"xstd",
"=",
"sources",
"[",
"'x_stddev'",
"]",
"else",
":",
"xstd",
"=",
"model",
".",
"x_stddev",
".",
"value",
"# default",
"if",
"'y_stddev'",
"in",
"sources",
".",
"colnames",
":",
"ystd",
"=",
"sources",
"[",
"'y_stddev'",
"]",
"else",
":",
"ystd",
"=",
"model",
".",
"y_stddev",
".",
"value",
"# default",
"sources",
"=",
"sources",
".",
"copy",
"(",
")",
"sources",
"[",
"'amplitude'",
"]",
"=",
"sources",
"[",
"'flux'",
"]",
"/",
"(",
"2.",
"*",
"np",
".",
"pi",
"*",
"xstd",
"*",
"ystd",
")",
"return",
"sources"
] |
Make a `~astropy.table.Table` containing randomly generated
parameters for 2D Gaussian sources.
Each row of the table corresponds to a Gaussian source whose
parameters are defined by the column names. The parameters are
drawn from a uniform distribution over the specified input ranges.
The output table can be input into
:func:`make_gaussian_sources_image` to create an image containing
the 2D Gaussian sources.
Parameters
----------
n_sources : float
The number of random Gaussian sources to generate.
param_ranges : dict
The lower and upper boundaries for each of the
`~astropy.modeling.functional_models.Gaussian2D` parameters as a
`dict` mapping the parameter name to its ``(lower, upper)``
bounds. The dictionary keys must be valid
`~astropy.modeling.functional_models.Gaussian2D` parameter names
or ``'flux'``. If ``'flux'`` is specified, but not
``'amplitude'`` then the 2D Gaussian amplitudes will be
calculated and placed in the output table. If both ``'flux'``
and ``'amplitude'`` are specified, then ``'flux'`` will be
ignored. Model parameters not defined in ``param_ranges`` will
be set to the default value.
random_state : int or `~numpy.random.RandomState`, optional
Pseudo-random number generator state used for random sampling.
Returns
-------
table : `~astropy.table.Table`
A table of parameters for the randomly generated Gaussian
sources. Each row of the table corresponds to a Gaussian source
whose parameters are defined by the column names.
See Also
--------
make_random_models_table, make_gaussian_sources_image
Notes
-----
To generate identical parameter values from separate function calls,
``param_ranges`` must be input as an `~collections.OrderedDict` with
the same parameter ranges and ``random_state`` must be the same.
Examples
--------
>>> from collections import OrderedDict
>>> from photutils.datasets import make_random_gaussians_table
>>> n_sources = 5
>>> param_ranges = [('amplitude', [500, 1000]),
... ('x_mean', [0, 500]),
... ('y_mean', [0, 300]),
... ('x_stddev', [1, 5]),
... ('y_stddev', [1, 5]),
... ('theta', [0, np.pi])]
>>> param_ranges = OrderedDict(param_ranges)
>>> sources = make_random_gaussians_table(n_sources, param_ranges,
... random_state=12345)
>>> for col in sources.colnames:
... sources[col].info.format = '%.8g' # for consistent table output
>>> print(sources)
amplitude x_mean y_mean x_stddev y_stddev theta
--------- --------- --------- --------- --------- ----------
964.80805 297.77235 224.31444 3.6256447 3.5699013 2.2923859
658.18778 482.25726 288.39202 4.2392502 3.8698145 3.1227889
591.95941 326.58855 2.5164894 4.4887037 2.870396 2.1264615
602.28014 374.45332 31.933313 4.8585904 2.3023387 2.4844422
783.86251 326.78494 89.611114 3.8947414 2.7585784 0.53694298
To specifying the flux range instead of the amplitude range:
>>> param_ranges = [('flux', [500, 1000]),
... ('x_mean', [0, 500]),
... ('y_mean', [0, 300]),
... ('x_stddev', [1, 5]),
... ('y_stddev', [1, 5]),
... ('theta', [0, np.pi])]
>>> param_ranges = OrderedDict(param_ranges)
>>> sources = make_random_gaussians_table(n_sources, param_ranges,
... random_state=12345)
>>> for col in sources.colnames:
... sources[col].info.format = '%.8g' # for consistent table output
>>> print(sources)
flux x_mean y_mean x_stddev y_stddev theta amplitude
--------- --------- --------- --------- --------- ---------- ---------
964.80805 297.77235 224.31444 3.6256447 3.5699013 2.2923859 11.863685
658.18778 482.25726 288.39202 4.2392502 3.8698145 3.1227889 6.3854388
591.95941 326.58855 2.5164894 4.4887037 2.870396 2.1264615 7.3122209
602.28014 374.45332 31.933313 4.8585904 2.3023387 2.4844422 8.5691781
783.86251 326.78494 89.611114 3.8947414 2.7585784 0.53694298 11.611707
Note that in this case the output table contains both a flux and
amplitude column. The flux column will be ignored when generating
an image of the models using :func:`make_gaussian_sources_image`.
|
[
"Make",
"a",
"~astropy",
".",
"table",
".",
"Table",
"containing",
"randomly",
"generated",
"parameters",
"for",
"2D",
"Gaussian",
"sources",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/datasets/make.py#L240-L363
|
10,623
|
astropy/photutils
|
photutils/datasets/make.py
|
make_model_sources_image
|
def make_model_sources_image(shape, model, source_table, oversample=1):
"""
Make an image containing sources generated from a user-specified
model.
Parameters
----------
shape : 2-tuple of int
The shape of the output 2D image.
model : 2D astropy.modeling.models object
The model to be used for rendering the sources.
source_table : `~astropy.table.Table`
Table of parameters for the sources. Each row of the table
corresponds to a source whose model parameters are defined by
the column names, which must match the model parameter names.
Column names that do not match model parameters will be ignored.
Model parameters not defined in the table will be set to the
``model`` default value.
oversample : float, optional
The sampling factor used to discretize the models on a pixel
grid. If the value is 1.0 (the default), then the models will
be discretized by taking the value at the center of the pixel
bin. Note that this method will not preserve the total flux of
very small sources. Otherwise, the models will be discretized
by taking the average over an oversampled grid. The pixels will
be oversampled by the ``oversample`` factor.
Returns
-------
image : 2D `~numpy.ndarray`
Image containing model sources.
See Also
--------
make_random_models_table, make_gaussian_sources_image
Examples
--------
.. plot::
:include-source:
from collections import OrderedDict
from astropy.modeling.models import Moffat2D
from photutils.datasets import (make_random_models_table,
make_model_sources_image)
model = Moffat2D()
n_sources = 10
shape = (100, 100)
param_ranges = [('amplitude', [100, 200]),
('x_0', [0, shape[1]]),
('y_0', [0, shape[0]]),
('gamma', [5, 10]),
('alpha', [1, 2])]
param_ranges = OrderedDict(param_ranges)
sources = make_random_models_table(n_sources, param_ranges,
random_state=12345)
data = make_model_sources_image(shape, model, sources)
plt.imshow(data)
"""
image = np.zeros(shape, dtype=np.float64)
y, x = np.indices(shape)
params_to_set = []
for param in source_table.colnames:
if param in model.param_names:
params_to_set.append(param)
# Save the initial parameter values so we can set them back when
# done with the loop. It's best not to copy a model, because some
# models (e.g. PSF models) may have substantial amounts of data in
# them.
init_params = {param: getattr(model, param) for param in params_to_set}
try:
for i, source in enumerate(source_table):
for param in params_to_set:
setattr(model, param, source[param])
if oversample == 1:
image += model(x, y)
else:
image += discretize_model(model, (0, shape[1]),
(0, shape[0]), mode='oversample',
factor=oversample)
finally:
for param, value in init_params.items():
setattr(model, param, value)
return image
|
python
|
def make_model_sources_image(shape, model, source_table, oversample=1):
"""
Make an image containing sources generated from a user-specified
model.
Parameters
----------
shape : 2-tuple of int
The shape of the output 2D image.
model : 2D astropy.modeling.models object
The model to be used for rendering the sources.
source_table : `~astropy.table.Table`
Table of parameters for the sources. Each row of the table
corresponds to a source whose model parameters are defined by
the column names, which must match the model parameter names.
Column names that do not match model parameters will be ignored.
Model parameters not defined in the table will be set to the
``model`` default value.
oversample : float, optional
The sampling factor used to discretize the models on a pixel
grid. If the value is 1.0 (the default), then the models will
be discretized by taking the value at the center of the pixel
bin. Note that this method will not preserve the total flux of
very small sources. Otherwise, the models will be discretized
by taking the average over an oversampled grid. The pixels will
be oversampled by the ``oversample`` factor.
Returns
-------
image : 2D `~numpy.ndarray`
Image containing model sources.
See Also
--------
make_random_models_table, make_gaussian_sources_image
Examples
--------
.. plot::
:include-source:
from collections import OrderedDict
from astropy.modeling.models import Moffat2D
from photutils.datasets import (make_random_models_table,
make_model_sources_image)
model = Moffat2D()
n_sources = 10
shape = (100, 100)
param_ranges = [('amplitude', [100, 200]),
('x_0', [0, shape[1]]),
('y_0', [0, shape[0]]),
('gamma', [5, 10]),
('alpha', [1, 2])]
param_ranges = OrderedDict(param_ranges)
sources = make_random_models_table(n_sources, param_ranges,
random_state=12345)
data = make_model_sources_image(shape, model, sources)
plt.imshow(data)
"""
image = np.zeros(shape, dtype=np.float64)
y, x = np.indices(shape)
params_to_set = []
for param in source_table.colnames:
if param in model.param_names:
params_to_set.append(param)
# Save the initial parameter values so we can set them back when
# done with the loop. It's best not to copy a model, because some
# models (e.g. PSF models) may have substantial amounts of data in
# them.
init_params = {param: getattr(model, param) for param in params_to_set}
try:
for i, source in enumerate(source_table):
for param in params_to_set:
setattr(model, param, source[param])
if oversample == 1:
image += model(x, y)
else:
image += discretize_model(model, (0, shape[1]),
(0, shape[0]), mode='oversample',
factor=oversample)
finally:
for param, value in init_params.items():
setattr(model, param, value)
return image
|
[
"def",
"make_model_sources_image",
"(",
"shape",
",",
"model",
",",
"source_table",
",",
"oversample",
"=",
"1",
")",
":",
"image",
"=",
"np",
".",
"zeros",
"(",
"shape",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"y",
",",
"x",
"=",
"np",
".",
"indices",
"(",
"shape",
")",
"params_to_set",
"=",
"[",
"]",
"for",
"param",
"in",
"source_table",
".",
"colnames",
":",
"if",
"param",
"in",
"model",
".",
"param_names",
":",
"params_to_set",
".",
"append",
"(",
"param",
")",
"# Save the initial parameter values so we can set them back when",
"# done with the loop. It's best not to copy a model, because some",
"# models (e.g. PSF models) may have substantial amounts of data in",
"# them.",
"init_params",
"=",
"{",
"param",
":",
"getattr",
"(",
"model",
",",
"param",
")",
"for",
"param",
"in",
"params_to_set",
"}",
"try",
":",
"for",
"i",
",",
"source",
"in",
"enumerate",
"(",
"source_table",
")",
":",
"for",
"param",
"in",
"params_to_set",
":",
"setattr",
"(",
"model",
",",
"param",
",",
"source",
"[",
"param",
"]",
")",
"if",
"oversample",
"==",
"1",
":",
"image",
"+=",
"model",
"(",
"x",
",",
"y",
")",
"else",
":",
"image",
"+=",
"discretize_model",
"(",
"model",
",",
"(",
"0",
",",
"shape",
"[",
"1",
"]",
")",
",",
"(",
"0",
",",
"shape",
"[",
"0",
"]",
")",
",",
"mode",
"=",
"'oversample'",
",",
"factor",
"=",
"oversample",
")",
"finally",
":",
"for",
"param",
",",
"value",
"in",
"init_params",
".",
"items",
"(",
")",
":",
"setattr",
"(",
"model",
",",
"param",
",",
"value",
")",
"return",
"image"
] |
Make an image containing sources generated from a user-specified
model.
Parameters
----------
shape : 2-tuple of int
The shape of the output 2D image.
model : 2D astropy.modeling.models object
The model to be used for rendering the sources.
source_table : `~astropy.table.Table`
Table of parameters for the sources. Each row of the table
corresponds to a source whose model parameters are defined by
the column names, which must match the model parameter names.
Column names that do not match model parameters will be ignored.
Model parameters not defined in the table will be set to the
``model`` default value.
oversample : float, optional
The sampling factor used to discretize the models on a pixel
grid. If the value is 1.0 (the default), then the models will
be discretized by taking the value at the center of the pixel
bin. Note that this method will not preserve the total flux of
very small sources. Otherwise, the models will be discretized
by taking the average over an oversampled grid. The pixels will
be oversampled by the ``oversample`` factor.
Returns
-------
image : 2D `~numpy.ndarray`
Image containing model sources.
See Also
--------
make_random_models_table, make_gaussian_sources_image
Examples
--------
.. plot::
:include-source:
from collections import OrderedDict
from astropy.modeling.models import Moffat2D
from photutils.datasets import (make_random_models_table,
make_model_sources_image)
model = Moffat2D()
n_sources = 10
shape = (100, 100)
param_ranges = [('amplitude', [100, 200]),
('x_0', [0, shape[1]]),
('y_0', [0, shape[0]]),
('gamma', [5, 10]),
('alpha', [1, 2])]
param_ranges = OrderedDict(param_ranges)
sources = make_random_models_table(n_sources, param_ranges,
random_state=12345)
data = make_model_sources_image(shape, model, sources)
plt.imshow(data)
|
[
"Make",
"an",
"image",
"containing",
"sources",
"generated",
"from",
"a",
"user",
"-",
"specified",
"model",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/datasets/make.py#L366-L460
|
10,624
|
astropy/photutils
|
photutils/datasets/make.py
|
make_4gaussians_image
|
def make_4gaussians_image(noise=True):
"""
Make an example image containing four 2D Gaussians plus a constant
background.
The background has a mean of 5.
If ``noise`` is `True`, then Gaussian noise with a mean of 0 and a
standard deviation of 5 is added to the output image.
Parameters
----------
noise : bool, optional
Whether to include noise in the output image (default is
`True`).
Returns
-------
image : 2D `~numpy.ndarray`
Image containing four 2D Gaussian sources.
See Also
--------
make_100gaussians_image
Examples
--------
.. plot::
:include-source:
from photutils import datasets
image = datasets.make_4gaussians_image()
plt.imshow(image, origin='lower', interpolation='nearest')
"""
table = Table()
table['amplitude'] = [50, 70, 150, 210]
table['x_mean'] = [160, 25, 150, 90]
table['y_mean'] = [70, 40, 25, 60]
table['x_stddev'] = [15.2, 5.1, 3., 8.1]
table['y_stddev'] = [2.6, 2.5, 3., 4.7]
table['theta'] = np.array([145., 20., 0., 60.]) * np.pi / 180.
shape = (100, 200)
data = make_gaussian_sources_image(shape, table) + 5.
if noise:
data += make_noise_image(shape, type='gaussian', mean=0.,
stddev=5., random_state=12345)
return data
|
python
|
def make_4gaussians_image(noise=True):
"""
Make an example image containing four 2D Gaussians plus a constant
background.
The background has a mean of 5.
If ``noise`` is `True`, then Gaussian noise with a mean of 0 and a
standard deviation of 5 is added to the output image.
Parameters
----------
noise : bool, optional
Whether to include noise in the output image (default is
`True`).
Returns
-------
image : 2D `~numpy.ndarray`
Image containing four 2D Gaussian sources.
See Also
--------
make_100gaussians_image
Examples
--------
.. plot::
:include-source:
from photutils import datasets
image = datasets.make_4gaussians_image()
plt.imshow(image, origin='lower', interpolation='nearest')
"""
table = Table()
table['amplitude'] = [50, 70, 150, 210]
table['x_mean'] = [160, 25, 150, 90]
table['y_mean'] = [70, 40, 25, 60]
table['x_stddev'] = [15.2, 5.1, 3., 8.1]
table['y_stddev'] = [2.6, 2.5, 3., 4.7]
table['theta'] = np.array([145., 20., 0., 60.]) * np.pi / 180.
shape = (100, 200)
data = make_gaussian_sources_image(shape, table) + 5.
if noise:
data += make_noise_image(shape, type='gaussian', mean=0.,
stddev=5., random_state=12345)
return data
|
[
"def",
"make_4gaussians_image",
"(",
"noise",
"=",
"True",
")",
":",
"table",
"=",
"Table",
"(",
")",
"table",
"[",
"'amplitude'",
"]",
"=",
"[",
"50",
",",
"70",
",",
"150",
",",
"210",
"]",
"table",
"[",
"'x_mean'",
"]",
"=",
"[",
"160",
",",
"25",
",",
"150",
",",
"90",
"]",
"table",
"[",
"'y_mean'",
"]",
"=",
"[",
"70",
",",
"40",
",",
"25",
",",
"60",
"]",
"table",
"[",
"'x_stddev'",
"]",
"=",
"[",
"15.2",
",",
"5.1",
",",
"3.",
",",
"8.1",
"]",
"table",
"[",
"'y_stddev'",
"]",
"=",
"[",
"2.6",
",",
"2.5",
",",
"3.",
",",
"4.7",
"]",
"table",
"[",
"'theta'",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"145.",
",",
"20.",
",",
"0.",
",",
"60.",
"]",
")",
"*",
"np",
".",
"pi",
"/",
"180.",
"shape",
"=",
"(",
"100",
",",
"200",
")",
"data",
"=",
"make_gaussian_sources_image",
"(",
"shape",
",",
"table",
")",
"+",
"5.",
"if",
"noise",
":",
"data",
"+=",
"make_noise_image",
"(",
"shape",
",",
"type",
"=",
"'gaussian'",
",",
"mean",
"=",
"0.",
",",
"stddev",
"=",
"5.",
",",
"random_state",
"=",
"12345",
")",
"return",
"data"
] |
Make an example image containing four 2D Gaussians plus a constant
background.
The background has a mean of 5.
If ``noise`` is `True`, then Gaussian noise with a mean of 0 and a
standard deviation of 5 is added to the output image.
Parameters
----------
noise : bool, optional
Whether to include noise in the output image (default is
`True`).
Returns
-------
image : 2D `~numpy.ndarray`
Image containing four 2D Gaussian sources.
See Also
--------
make_100gaussians_image
Examples
--------
.. plot::
:include-source:
from photutils import datasets
image = datasets.make_4gaussians_image()
plt.imshow(image, origin='lower', interpolation='nearest')
|
[
"Make",
"an",
"example",
"image",
"containing",
"four",
"2D",
"Gaussians",
"plus",
"a",
"constant",
"background",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/datasets/make.py#L638-L688
|
10,625
|
astropy/photutils
|
photutils/datasets/make.py
|
make_100gaussians_image
|
def make_100gaussians_image(noise=True):
"""
Make an example image containing 100 2D Gaussians plus a constant
background.
The background has a mean of 5.
If ``noise`` is `True`, then Gaussian noise with a mean of 0 and a
standard deviation of 2 is added to the output image.
Parameters
----------
noise : bool, optional
Whether to include noise in the output image (default is
`True`).
Returns
-------
image : 2D `~numpy.ndarray`
Image containing 100 2D Gaussian sources.
See Also
--------
make_4gaussians_image
Examples
--------
.. plot::
:include-source:
from photutils import datasets
image = datasets.make_100gaussians_image()
plt.imshow(image, origin='lower', interpolation='nearest')
"""
n_sources = 100
flux_range = [500, 1000]
xmean_range = [0, 500]
ymean_range = [0, 300]
xstddev_range = [1, 5]
ystddev_range = [1, 5]
params = OrderedDict([('flux', flux_range),
('x_mean', xmean_range),
('y_mean', ymean_range),
('x_stddev', xstddev_range),
('y_stddev', ystddev_range),
('theta', [0, 2*np.pi])])
sources = make_random_gaussians_table(n_sources, params,
random_state=12345)
shape = (300, 500)
data = make_gaussian_sources_image(shape, sources) + 5.
if noise:
data += make_noise_image(shape, type='gaussian', mean=0.,
stddev=2., random_state=12345)
return data
|
python
|
def make_100gaussians_image(noise=True):
"""
Make an example image containing 100 2D Gaussians plus a constant
background.
The background has a mean of 5.
If ``noise`` is `True`, then Gaussian noise with a mean of 0 and a
standard deviation of 2 is added to the output image.
Parameters
----------
noise : bool, optional
Whether to include noise in the output image (default is
`True`).
Returns
-------
image : 2D `~numpy.ndarray`
Image containing 100 2D Gaussian sources.
See Also
--------
make_4gaussians_image
Examples
--------
.. plot::
:include-source:
from photutils import datasets
image = datasets.make_100gaussians_image()
plt.imshow(image, origin='lower', interpolation='nearest')
"""
n_sources = 100
flux_range = [500, 1000]
xmean_range = [0, 500]
ymean_range = [0, 300]
xstddev_range = [1, 5]
ystddev_range = [1, 5]
params = OrderedDict([('flux', flux_range),
('x_mean', xmean_range),
('y_mean', ymean_range),
('x_stddev', xstddev_range),
('y_stddev', ystddev_range),
('theta', [0, 2*np.pi])])
sources = make_random_gaussians_table(n_sources, params,
random_state=12345)
shape = (300, 500)
data = make_gaussian_sources_image(shape, sources) + 5.
if noise:
data += make_noise_image(shape, type='gaussian', mean=0.,
stddev=2., random_state=12345)
return data
|
[
"def",
"make_100gaussians_image",
"(",
"noise",
"=",
"True",
")",
":",
"n_sources",
"=",
"100",
"flux_range",
"=",
"[",
"500",
",",
"1000",
"]",
"xmean_range",
"=",
"[",
"0",
",",
"500",
"]",
"ymean_range",
"=",
"[",
"0",
",",
"300",
"]",
"xstddev_range",
"=",
"[",
"1",
",",
"5",
"]",
"ystddev_range",
"=",
"[",
"1",
",",
"5",
"]",
"params",
"=",
"OrderedDict",
"(",
"[",
"(",
"'flux'",
",",
"flux_range",
")",
",",
"(",
"'x_mean'",
",",
"xmean_range",
")",
",",
"(",
"'y_mean'",
",",
"ymean_range",
")",
",",
"(",
"'x_stddev'",
",",
"xstddev_range",
")",
",",
"(",
"'y_stddev'",
",",
"ystddev_range",
")",
",",
"(",
"'theta'",
",",
"[",
"0",
",",
"2",
"*",
"np",
".",
"pi",
"]",
")",
"]",
")",
"sources",
"=",
"make_random_gaussians_table",
"(",
"n_sources",
",",
"params",
",",
"random_state",
"=",
"12345",
")",
"shape",
"=",
"(",
"300",
",",
"500",
")",
"data",
"=",
"make_gaussian_sources_image",
"(",
"shape",
",",
"sources",
")",
"+",
"5.",
"if",
"noise",
":",
"data",
"+=",
"make_noise_image",
"(",
"shape",
",",
"type",
"=",
"'gaussian'",
",",
"mean",
"=",
"0.",
",",
"stddev",
"=",
"2.",
",",
"random_state",
"=",
"12345",
")",
"return",
"data"
] |
Make an example image containing 100 2D Gaussians plus a constant
background.
The background has a mean of 5.
If ``noise`` is `True`, then Gaussian noise with a mean of 0 and a
standard deviation of 2 is added to the output image.
Parameters
----------
noise : bool, optional
Whether to include noise in the output image (default is
`True`).
Returns
-------
image : 2D `~numpy.ndarray`
Image containing 100 2D Gaussian sources.
See Also
--------
make_4gaussians_image
Examples
--------
.. plot::
:include-source:
from photutils import datasets
image = datasets.make_100gaussians_image()
plt.imshow(image, origin='lower', interpolation='nearest')
|
[
"Make",
"an",
"example",
"image",
"containing",
"100",
"2D",
"Gaussians",
"plus",
"a",
"constant",
"background",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/datasets/make.py#L691-L749
|
10,626
|
astropy/photutils
|
photutils/datasets/make.py
|
make_wcs
|
def make_wcs(shape, galactic=False):
"""
Create a simple celestial WCS object in either the ICRS or Galactic
coordinate frame.
Parameters
----------
shape : 2-tuple of int
The shape of the 2D array to be used with the output
`~astropy.wcs.WCS` object.
galactic : bool, optional
If `True`, then the output WCS will be in the Galactic
coordinate frame. If `False` (default), then the output WCS
will be in the ICRS coordinate frame.
Returns
-------
wcs : `~astropy.wcs.WCS` object
The world coordinate system (WCS) transformation.
See Also
--------
make_imagehdu
Examples
--------
>>> from photutils.datasets import make_wcs
>>> shape = (100, 100)
>>> wcs = make_wcs(shape)
>>> print(wcs.wcs.crpix) # doctest: +FLOAT_CMP
[50. 50.]
>>> print(wcs.wcs.crval) # doctest: +FLOAT_CMP
[197.8925 -1.36555556]
"""
wcs = WCS(naxis=2)
rho = np.pi / 3.
scale = 0.1 / 3600.
if astropy_version < '3.1':
wcs._naxis1 = shape[1] # nx
wcs._naxis2 = shape[0] # ny
else:
wcs.pixel_shape = shape
wcs.wcs.crpix = [shape[1] / 2, shape[0] / 2] # 1-indexed (x, y)
wcs.wcs.crval = [197.8925, -1.36555556]
wcs.wcs.cunit = ['deg', 'deg']
wcs.wcs.cd = [[-scale * np.cos(rho), scale * np.sin(rho)],
[scale * np.sin(rho), scale * np.cos(rho)]]
if not galactic:
wcs.wcs.radesys = 'ICRS'
wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
else:
wcs.wcs.ctype = ['GLON-CAR', 'GLAT-CAR']
return wcs
|
python
|
def make_wcs(shape, galactic=False):
"""
Create a simple celestial WCS object in either the ICRS or Galactic
coordinate frame.
Parameters
----------
shape : 2-tuple of int
The shape of the 2D array to be used with the output
`~astropy.wcs.WCS` object.
galactic : bool, optional
If `True`, then the output WCS will be in the Galactic
coordinate frame. If `False` (default), then the output WCS
will be in the ICRS coordinate frame.
Returns
-------
wcs : `~astropy.wcs.WCS` object
The world coordinate system (WCS) transformation.
See Also
--------
make_imagehdu
Examples
--------
>>> from photutils.datasets import make_wcs
>>> shape = (100, 100)
>>> wcs = make_wcs(shape)
>>> print(wcs.wcs.crpix) # doctest: +FLOAT_CMP
[50. 50.]
>>> print(wcs.wcs.crval) # doctest: +FLOAT_CMP
[197.8925 -1.36555556]
"""
wcs = WCS(naxis=2)
rho = np.pi / 3.
scale = 0.1 / 3600.
if astropy_version < '3.1':
wcs._naxis1 = shape[1] # nx
wcs._naxis2 = shape[0] # ny
else:
wcs.pixel_shape = shape
wcs.wcs.crpix = [shape[1] / 2, shape[0] / 2] # 1-indexed (x, y)
wcs.wcs.crval = [197.8925, -1.36555556]
wcs.wcs.cunit = ['deg', 'deg']
wcs.wcs.cd = [[-scale * np.cos(rho), scale * np.sin(rho)],
[scale * np.sin(rho), scale * np.cos(rho)]]
if not galactic:
wcs.wcs.radesys = 'ICRS'
wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
else:
wcs.wcs.ctype = ['GLON-CAR', 'GLAT-CAR']
return wcs
|
[
"def",
"make_wcs",
"(",
"shape",
",",
"galactic",
"=",
"False",
")",
":",
"wcs",
"=",
"WCS",
"(",
"naxis",
"=",
"2",
")",
"rho",
"=",
"np",
".",
"pi",
"/",
"3.",
"scale",
"=",
"0.1",
"/",
"3600.",
"if",
"astropy_version",
"<",
"'3.1'",
":",
"wcs",
".",
"_naxis1",
"=",
"shape",
"[",
"1",
"]",
"# nx",
"wcs",
".",
"_naxis2",
"=",
"shape",
"[",
"0",
"]",
"# ny",
"else",
":",
"wcs",
".",
"pixel_shape",
"=",
"shape",
"wcs",
".",
"wcs",
".",
"crpix",
"=",
"[",
"shape",
"[",
"1",
"]",
"/",
"2",
",",
"shape",
"[",
"0",
"]",
"/",
"2",
"]",
"# 1-indexed (x, y)",
"wcs",
".",
"wcs",
".",
"crval",
"=",
"[",
"197.8925",
",",
"-",
"1.36555556",
"]",
"wcs",
".",
"wcs",
".",
"cunit",
"=",
"[",
"'deg'",
",",
"'deg'",
"]",
"wcs",
".",
"wcs",
".",
"cd",
"=",
"[",
"[",
"-",
"scale",
"*",
"np",
".",
"cos",
"(",
"rho",
")",
",",
"scale",
"*",
"np",
".",
"sin",
"(",
"rho",
")",
"]",
",",
"[",
"scale",
"*",
"np",
".",
"sin",
"(",
"rho",
")",
",",
"scale",
"*",
"np",
".",
"cos",
"(",
"rho",
")",
"]",
"]",
"if",
"not",
"galactic",
":",
"wcs",
".",
"wcs",
".",
"radesys",
"=",
"'ICRS'",
"wcs",
".",
"wcs",
".",
"ctype",
"=",
"[",
"'RA---TAN'",
",",
"'DEC--TAN'",
"]",
"else",
":",
"wcs",
".",
"wcs",
".",
"ctype",
"=",
"[",
"'GLON-CAR'",
",",
"'GLAT-CAR'",
"]",
"return",
"wcs"
] |
Create a simple celestial WCS object in either the ICRS or Galactic
coordinate frame.
Parameters
----------
shape : 2-tuple of int
The shape of the 2D array to be used with the output
`~astropy.wcs.WCS` object.
galactic : bool, optional
If `True`, then the output WCS will be in the Galactic
coordinate frame. If `False` (default), then the output WCS
will be in the ICRS coordinate frame.
Returns
-------
wcs : `~astropy.wcs.WCS` object
The world coordinate system (WCS) transformation.
See Also
--------
make_imagehdu
Examples
--------
>>> from photutils.datasets import make_wcs
>>> shape = (100, 100)
>>> wcs = make_wcs(shape)
>>> print(wcs.wcs.crpix) # doctest: +FLOAT_CMP
[50. 50.]
>>> print(wcs.wcs.crval) # doctest: +FLOAT_CMP
[197.8925 -1.36555556]
|
[
"Create",
"a",
"simple",
"celestial",
"WCS",
"object",
"in",
"either",
"the",
"ICRS",
"or",
"Galactic",
"coordinate",
"frame",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/datasets/make.py#L752-L809
|
10,627
|
astropy/photutils
|
photutils/datasets/make.py
|
make_imagehdu
|
def make_imagehdu(data, wcs=None):
"""
Create a FITS `~astropy.io.fits.ImageHDU` containing the input 2D
image.
Parameters
----------
data : 2D array-like
The input 2D data.
wcs : `~astropy.wcs.WCS`, optional
The world coordinate system (WCS) transformation to include in
the output FITS header.
Returns
-------
image_hdu : `~astropy.io.fits.ImageHDU`
The FITS `~astropy.io.fits.ImageHDU`.
See Also
--------
make_wcs
Examples
--------
>>> from photutils.datasets import make_imagehdu, make_wcs
>>> shape = (100, 100)
>>> data = np.ones(shape)
>>> wcs = make_wcs(shape)
>>> hdu = make_imagehdu(data, wcs=wcs)
>>> print(hdu.data.shape)
(100, 100)
"""
data = np.asanyarray(data)
if data.ndim != 2:
raise ValueError('data must be a 2D array')
if wcs is not None:
header = wcs.to_header()
else:
header = None
return fits.ImageHDU(data, header=header)
|
python
|
def make_imagehdu(data, wcs=None):
"""
Create a FITS `~astropy.io.fits.ImageHDU` containing the input 2D
image.
Parameters
----------
data : 2D array-like
The input 2D data.
wcs : `~astropy.wcs.WCS`, optional
The world coordinate system (WCS) transformation to include in
the output FITS header.
Returns
-------
image_hdu : `~astropy.io.fits.ImageHDU`
The FITS `~astropy.io.fits.ImageHDU`.
See Also
--------
make_wcs
Examples
--------
>>> from photutils.datasets import make_imagehdu, make_wcs
>>> shape = (100, 100)
>>> data = np.ones(shape)
>>> wcs = make_wcs(shape)
>>> hdu = make_imagehdu(data, wcs=wcs)
>>> print(hdu.data.shape)
(100, 100)
"""
data = np.asanyarray(data)
if data.ndim != 2:
raise ValueError('data must be a 2D array')
if wcs is not None:
header = wcs.to_header()
else:
header = None
return fits.ImageHDU(data, header=header)
|
[
"def",
"make_imagehdu",
"(",
"data",
",",
"wcs",
"=",
"None",
")",
":",
"data",
"=",
"np",
".",
"asanyarray",
"(",
"data",
")",
"if",
"data",
".",
"ndim",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'data must be a 2D array'",
")",
"if",
"wcs",
"is",
"not",
"None",
":",
"header",
"=",
"wcs",
".",
"to_header",
"(",
")",
"else",
":",
"header",
"=",
"None",
"return",
"fits",
".",
"ImageHDU",
"(",
"data",
",",
"header",
"=",
"header",
")"
] |
Create a FITS `~astropy.io.fits.ImageHDU` containing the input 2D
image.
Parameters
----------
data : 2D array-like
The input 2D data.
wcs : `~astropy.wcs.WCS`, optional
The world coordinate system (WCS) transformation to include in
the output FITS header.
Returns
-------
image_hdu : `~astropy.io.fits.ImageHDU`
The FITS `~astropy.io.fits.ImageHDU`.
See Also
--------
make_wcs
Examples
--------
>>> from photutils.datasets import make_imagehdu, make_wcs
>>> shape = (100, 100)
>>> data = np.ones(shape)
>>> wcs = make_wcs(shape)
>>> hdu = make_imagehdu(data, wcs=wcs)
>>> print(hdu.data.shape)
(100, 100)
|
[
"Create",
"a",
"FITS",
"~astropy",
".",
"io",
".",
"fits",
".",
"ImageHDU",
"containing",
"the",
"input",
"2D",
"image",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/datasets/make.py#L812-L855
|
10,628
|
astropy/photutils
|
photutils/centroids/core.py
|
centroid_com
|
def centroid_com(data, mask=None):
"""
Calculate the centroid of an n-dimensional array as its "center of
mass" determined from moments.
Invalid values (e.g. NaNs or infs) in the ``data`` array are
automatically masked.
Parameters
----------
data : array_like
The input n-dimensional array.
mask : array_like (bool), optional
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Returns
-------
centroid : `~numpy.ndarray`
The coordinates of the centroid in pixel order (e.g. ``(x, y)``
or ``(x, y, z)``), not numpy axis order.
"""
data = data.astype(np.float)
if mask is not None and mask is not np.ma.nomask:
mask = np.asarray(mask, dtype=bool)
if data.shape != mask.shape:
raise ValueError('data and mask must have the same shape.')
data[mask] = 0.
badidx = ~np.isfinite(data)
if np.any(badidx):
warnings.warn('Input data contains input values (e.g. NaNs or infs), '
'which were automatically masked.', AstropyUserWarning)
data[badidx] = 0.
total = np.sum(data)
indices = np.ogrid[[slice(0, i) for i in data.shape]]
# note the output array is reversed to give (x, y) order
return np.array([np.sum(indices[axis] * data) / total
for axis in range(data.ndim)])[::-1]
|
python
|
def centroid_com(data, mask=None):
"""
Calculate the centroid of an n-dimensional array as its "center of
mass" determined from moments.
Invalid values (e.g. NaNs or infs) in the ``data`` array are
automatically masked.
Parameters
----------
data : array_like
The input n-dimensional array.
mask : array_like (bool), optional
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Returns
-------
centroid : `~numpy.ndarray`
The coordinates of the centroid in pixel order (e.g. ``(x, y)``
or ``(x, y, z)``), not numpy axis order.
"""
data = data.astype(np.float)
if mask is not None and mask is not np.ma.nomask:
mask = np.asarray(mask, dtype=bool)
if data.shape != mask.shape:
raise ValueError('data and mask must have the same shape.')
data[mask] = 0.
badidx = ~np.isfinite(data)
if np.any(badidx):
warnings.warn('Input data contains input values (e.g. NaNs or infs), '
'which were automatically masked.', AstropyUserWarning)
data[badidx] = 0.
total = np.sum(data)
indices = np.ogrid[[slice(0, i) for i in data.shape]]
# note the output array is reversed to give (x, y) order
return np.array([np.sum(indices[axis] * data) / total
for axis in range(data.ndim)])[::-1]
|
[
"def",
"centroid_com",
"(",
"data",
",",
"mask",
"=",
"None",
")",
":",
"data",
"=",
"data",
".",
"astype",
"(",
"np",
".",
"float",
")",
"if",
"mask",
"is",
"not",
"None",
"and",
"mask",
"is",
"not",
"np",
".",
"ma",
".",
"nomask",
":",
"mask",
"=",
"np",
".",
"asarray",
"(",
"mask",
",",
"dtype",
"=",
"bool",
")",
"if",
"data",
".",
"shape",
"!=",
"mask",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"'data and mask must have the same shape.'",
")",
"data",
"[",
"mask",
"]",
"=",
"0.",
"badidx",
"=",
"~",
"np",
".",
"isfinite",
"(",
"data",
")",
"if",
"np",
".",
"any",
"(",
"badidx",
")",
":",
"warnings",
".",
"warn",
"(",
"'Input data contains input values (e.g. NaNs or infs), '",
"'which were automatically masked.'",
",",
"AstropyUserWarning",
")",
"data",
"[",
"badidx",
"]",
"=",
"0.",
"total",
"=",
"np",
".",
"sum",
"(",
"data",
")",
"indices",
"=",
"np",
".",
"ogrid",
"[",
"[",
"slice",
"(",
"0",
",",
"i",
")",
"for",
"i",
"in",
"data",
".",
"shape",
"]",
"]",
"# note the output array is reversed to give (x, y) order",
"return",
"np",
".",
"array",
"(",
"[",
"np",
".",
"sum",
"(",
"indices",
"[",
"axis",
"]",
"*",
"data",
")",
"/",
"total",
"for",
"axis",
"in",
"range",
"(",
"data",
".",
"ndim",
")",
"]",
")",
"[",
":",
":",
"-",
"1",
"]"
] |
Calculate the centroid of an n-dimensional array as its "center of
mass" determined from moments.
Invalid values (e.g. NaNs or infs) in the ``data`` array are
automatically masked.
Parameters
----------
data : array_like
The input n-dimensional array.
mask : array_like (bool), optional
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Returns
-------
centroid : `~numpy.ndarray`
The coordinates of the centroid in pixel order (e.g. ``(x, y)``
or ``(x, y, z)``), not numpy axis order.
|
[
"Calculate",
"the",
"centroid",
"of",
"an",
"n",
"-",
"dimensional",
"array",
"as",
"its",
"center",
"of",
"mass",
"determined",
"from",
"moments",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/centroids/core.py#L74-L117
|
10,629
|
astropy/photutils
|
photutils/centroids/core.py
|
gaussian1d_moments
|
def gaussian1d_moments(data, mask=None):
"""
Estimate 1D Gaussian parameters from the moments of 1D data.
This function can be useful for providing initial parameter values
when fitting a 1D Gaussian to the ``data``.
Parameters
----------
data : array_like (1D)
The 1D array.
mask : array_like (1D bool), optional
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Returns
-------
amplitude, mean, stddev : float
The estimated parameters of a 1D Gaussian.
"""
if np.any(~np.isfinite(data)):
data = np.ma.masked_invalid(data)
warnings.warn('Input data contains input values (e.g. NaNs or infs), '
'which were automatically masked.', AstropyUserWarning)
else:
data = np.ma.array(data)
if mask is not None and mask is not np.ma.nomask:
mask = np.asanyarray(mask)
if data.shape != mask.shape:
raise ValueError('data and mask must have the same shape.')
data.mask |= mask
data.fill_value = 0.
data = data.filled()
x = np.arange(data.size)
x_mean = np.sum(x * data) / np.sum(data)
x_stddev = np.sqrt(abs(np.sum(data * (x - x_mean)**2) / np.sum(data)))
amplitude = np.ptp(data)
return amplitude, x_mean, x_stddev
|
python
|
def gaussian1d_moments(data, mask=None):
"""
Estimate 1D Gaussian parameters from the moments of 1D data.
This function can be useful for providing initial parameter values
when fitting a 1D Gaussian to the ``data``.
Parameters
----------
data : array_like (1D)
The 1D array.
mask : array_like (1D bool), optional
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Returns
-------
amplitude, mean, stddev : float
The estimated parameters of a 1D Gaussian.
"""
if np.any(~np.isfinite(data)):
data = np.ma.masked_invalid(data)
warnings.warn('Input data contains input values (e.g. NaNs or infs), '
'which were automatically masked.', AstropyUserWarning)
else:
data = np.ma.array(data)
if mask is not None and mask is not np.ma.nomask:
mask = np.asanyarray(mask)
if data.shape != mask.shape:
raise ValueError('data and mask must have the same shape.')
data.mask |= mask
data.fill_value = 0.
data = data.filled()
x = np.arange(data.size)
x_mean = np.sum(x * data) / np.sum(data)
x_stddev = np.sqrt(abs(np.sum(data * (x - x_mean)**2) / np.sum(data)))
amplitude = np.ptp(data)
return amplitude, x_mean, x_stddev
|
[
"def",
"gaussian1d_moments",
"(",
"data",
",",
"mask",
"=",
"None",
")",
":",
"if",
"np",
".",
"any",
"(",
"~",
"np",
".",
"isfinite",
"(",
"data",
")",
")",
":",
"data",
"=",
"np",
".",
"ma",
".",
"masked_invalid",
"(",
"data",
")",
"warnings",
".",
"warn",
"(",
"'Input data contains input values (e.g. NaNs or infs), '",
"'which were automatically masked.'",
",",
"AstropyUserWarning",
")",
"else",
":",
"data",
"=",
"np",
".",
"ma",
".",
"array",
"(",
"data",
")",
"if",
"mask",
"is",
"not",
"None",
"and",
"mask",
"is",
"not",
"np",
".",
"ma",
".",
"nomask",
":",
"mask",
"=",
"np",
".",
"asanyarray",
"(",
"mask",
")",
"if",
"data",
".",
"shape",
"!=",
"mask",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"'data and mask must have the same shape.'",
")",
"data",
".",
"mask",
"|=",
"mask",
"data",
".",
"fill_value",
"=",
"0.",
"data",
"=",
"data",
".",
"filled",
"(",
")",
"x",
"=",
"np",
".",
"arange",
"(",
"data",
".",
"size",
")",
"x_mean",
"=",
"np",
".",
"sum",
"(",
"x",
"*",
"data",
")",
"/",
"np",
".",
"sum",
"(",
"data",
")",
"x_stddev",
"=",
"np",
".",
"sqrt",
"(",
"abs",
"(",
"np",
".",
"sum",
"(",
"data",
"*",
"(",
"x",
"-",
"x_mean",
")",
"**",
"2",
")",
"/",
"np",
".",
"sum",
"(",
"data",
")",
")",
")",
"amplitude",
"=",
"np",
".",
"ptp",
"(",
"data",
")",
"return",
"amplitude",
",",
"x_mean",
",",
"x_stddev"
] |
Estimate 1D Gaussian parameters from the moments of 1D data.
This function can be useful for providing initial parameter values
when fitting a 1D Gaussian to the ``data``.
Parameters
----------
data : array_like (1D)
The 1D array.
mask : array_like (1D bool), optional
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Returns
-------
amplitude, mean, stddev : float
The estimated parameters of a 1D Gaussian.
|
[
"Estimate",
"1D",
"Gaussian",
"parameters",
"from",
"the",
"moments",
"of",
"1D",
"data",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/centroids/core.py#L120-L163
|
10,630
|
astropy/photutils
|
photutils/centroids/core.py
|
fit_2dgaussian
|
def fit_2dgaussian(data, error=None, mask=None):
"""
Fit a 2D Gaussian plus a constant to a 2D image.
Invalid values (e.g. NaNs or infs) in the ``data`` or ``error``
arrays are automatically masked. The mask for invalid values
represents the combination of the invalid-value masks for the
``data`` and ``error`` arrays.
Parameters
----------
data : array_like
The 2D array of the image.
error : array_like, optional
The 2D array of the 1-sigma errors of the input ``data``.
mask : array_like (bool), optional
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Returns
-------
result : A `GaussianConst2D` model instance.
The best-fitting Gaussian 2D model.
"""
from ..morphology import data_properties # prevent circular imports
data = np.ma.asanyarray(data)
if mask is not None and mask is not np.ma.nomask:
mask = np.asanyarray(mask)
if data.shape != mask.shape:
raise ValueError('data and mask must have the same shape.')
data.mask |= mask
if np.any(~np.isfinite(data)):
data = np.ma.masked_invalid(data)
warnings.warn('Input data contains input values (e.g. NaNs or infs), '
'which were automatically masked.', AstropyUserWarning)
if error is not None:
error = np.ma.masked_invalid(error)
if data.shape != error.shape:
raise ValueError('data and error must have the same shape.')
data.mask |= error.mask
weights = 1.0 / error.clip(min=1.e-30)
else:
weights = np.ones(data.shape)
if np.ma.count(data) < 7:
raise ValueError('Input data must have a least 7 unmasked values to '
'fit a 2D Gaussian plus a constant.')
# assign zero weight to masked pixels
if data.mask is not np.ma.nomask:
weights[data.mask] = 0.
mask = data.mask
data.fill_value = 0.0
data = data.filled()
# Subtract the minimum of the data as a crude background estimate.
# This will also make the data values positive, preventing issues with
# the moment estimation in data_properties (moments from negative data
# values can yield undefined Gaussian parameters, e.g. x/y_stddev).
props = data_properties(data - np.min(data), mask=mask)
init_const = 0. # subtracted data minimum above
init_amplitude = np.ptp(data)
g_init = GaussianConst2D(constant=init_const, amplitude=init_amplitude,
x_mean=props.xcentroid.value,
y_mean=props.ycentroid.value,
x_stddev=props.semimajor_axis_sigma.value,
y_stddev=props.semiminor_axis_sigma.value,
theta=props.orientation.value)
fitter = LevMarLSQFitter()
y, x = np.indices(data.shape)
gfit = fitter(g_init, x, y, data, weights=weights)
return gfit
|
python
|
def fit_2dgaussian(data, error=None, mask=None):
"""
Fit a 2D Gaussian plus a constant to a 2D image.
Invalid values (e.g. NaNs or infs) in the ``data`` or ``error``
arrays are automatically masked. The mask for invalid values
represents the combination of the invalid-value masks for the
``data`` and ``error`` arrays.
Parameters
----------
data : array_like
The 2D array of the image.
error : array_like, optional
The 2D array of the 1-sigma errors of the input ``data``.
mask : array_like (bool), optional
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Returns
-------
result : A `GaussianConst2D` model instance.
The best-fitting Gaussian 2D model.
"""
from ..morphology import data_properties # prevent circular imports
data = np.ma.asanyarray(data)
if mask is not None and mask is not np.ma.nomask:
mask = np.asanyarray(mask)
if data.shape != mask.shape:
raise ValueError('data and mask must have the same shape.')
data.mask |= mask
if np.any(~np.isfinite(data)):
data = np.ma.masked_invalid(data)
warnings.warn('Input data contains input values (e.g. NaNs or infs), '
'which were automatically masked.', AstropyUserWarning)
if error is not None:
error = np.ma.masked_invalid(error)
if data.shape != error.shape:
raise ValueError('data and error must have the same shape.')
data.mask |= error.mask
weights = 1.0 / error.clip(min=1.e-30)
else:
weights = np.ones(data.shape)
if np.ma.count(data) < 7:
raise ValueError('Input data must have a least 7 unmasked values to '
'fit a 2D Gaussian plus a constant.')
# assign zero weight to masked pixels
if data.mask is not np.ma.nomask:
weights[data.mask] = 0.
mask = data.mask
data.fill_value = 0.0
data = data.filled()
# Subtract the minimum of the data as a crude background estimate.
# This will also make the data values positive, preventing issues with
# the moment estimation in data_properties (moments from negative data
# values can yield undefined Gaussian parameters, e.g. x/y_stddev).
props = data_properties(data - np.min(data), mask=mask)
init_const = 0. # subtracted data minimum above
init_amplitude = np.ptp(data)
g_init = GaussianConst2D(constant=init_const, amplitude=init_amplitude,
x_mean=props.xcentroid.value,
y_mean=props.ycentroid.value,
x_stddev=props.semimajor_axis_sigma.value,
y_stddev=props.semiminor_axis_sigma.value,
theta=props.orientation.value)
fitter = LevMarLSQFitter()
y, x = np.indices(data.shape)
gfit = fitter(g_init, x, y, data, weights=weights)
return gfit
|
[
"def",
"fit_2dgaussian",
"(",
"data",
",",
"error",
"=",
"None",
",",
"mask",
"=",
"None",
")",
":",
"from",
".",
".",
"morphology",
"import",
"data_properties",
"# prevent circular imports",
"data",
"=",
"np",
".",
"ma",
".",
"asanyarray",
"(",
"data",
")",
"if",
"mask",
"is",
"not",
"None",
"and",
"mask",
"is",
"not",
"np",
".",
"ma",
".",
"nomask",
":",
"mask",
"=",
"np",
".",
"asanyarray",
"(",
"mask",
")",
"if",
"data",
".",
"shape",
"!=",
"mask",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"'data and mask must have the same shape.'",
")",
"data",
".",
"mask",
"|=",
"mask",
"if",
"np",
".",
"any",
"(",
"~",
"np",
".",
"isfinite",
"(",
"data",
")",
")",
":",
"data",
"=",
"np",
".",
"ma",
".",
"masked_invalid",
"(",
"data",
")",
"warnings",
".",
"warn",
"(",
"'Input data contains input values (e.g. NaNs or infs), '",
"'which were automatically masked.'",
",",
"AstropyUserWarning",
")",
"if",
"error",
"is",
"not",
"None",
":",
"error",
"=",
"np",
".",
"ma",
".",
"masked_invalid",
"(",
"error",
")",
"if",
"data",
".",
"shape",
"!=",
"error",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"'data and error must have the same shape.'",
")",
"data",
".",
"mask",
"|=",
"error",
".",
"mask",
"weights",
"=",
"1.0",
"/",
"error",
".",
"clip",
"(",
"min",
"=",
"1.e-30",
")",
"else",
":",
"weights",
"=",
"np",
".",
"ones",
"(",
"data",
".",
"shape",
")",
"if",
"np",
".",
"ma",
".",
"count",
"(",
"data",
")",
"<",
"7",
":",
"raise",
"ValueError",
"(",
"'Input data must have a least 7 unmasked values to '",
"'fit a 2D Gaussian plus a constant.'",
")",
"# assign zero weight to masked pixels",
"if",
"data",
".",
"mask",
"is",
"not",
"np",
".",
"ma",
".",
"nomask",
":",
"weights",
"[",
"data",
".",
"mask",
"]",
"=",
"0.",
"mask",
"=",
"data",
".",
"mask",
"data",
".",
"fill_value",
"=",
"0.0",
"data",
"=",
"data",
".",
"filled",
"(",
")",
"# Subtract the minimum of the data as a crude background estimate.",
"# This will also make the data values positive, preventing issues with",
"# the moment estimation in data_properties (moments from negative data",
"# values can yield undefined Gaussian parameters, e.g. x/y_stddev).",
"props",
"=",
"data_properties",
"(",
"data",
"-",
"np",
".",
"min",
"(",
"data",
")",
",",
"mask",
"=",
"mask",
")",
"init_const",
"=",
"0.",
"# subtracted data minimum above",
"init_amplitude",
"=",
"np",
".",
"ptp",
"(",
"data",
")",
"g_init",
"=",
"GaussianConst2D",
"(",
"constant",
"=",
"init_const",
",",
"amplitude",
"=",
"init_amplitude",
",",
"x_mean",
"=",
"props",
".",
"xcentroid",
".",
"value",
",",
"y_mean",
"=",
"props",
".",
"ycentroid",
".",
"value",
",",
"x_stddev",
"=",
"props",
".",
"semimajor_axis_sigma",
".",
"value",
",",
"y_stddev",
"=",
"props",
".",
"semiminor_axis_sigma",
".",
"value",
",",
"theta",
"=",
"props",
".",
"orientation",
".",
"value",
")",
"fitter",
"=",
"LevMarLSQFitter",
"(",
")",
"y",
",",
"x",
"=",
"np",
".",
"indices",
"(",
"data",
".",
"shape",
")",
"gfit",
"=",
"fitter",
"(",
"g_init",
",",
"x",
",",
"y",
",",
"data",
",",
"weights",
"=",
"weights",
")",
"return",
"gfit"
] |
Fit a 2D Gaussian plus a constant to a 2D image.
Invalid values (e.g. NaNs or infs) in the ``data`` or ``error``
arrays are automatically masked. The mask for invalid values
represents the combination of the invalid-value masks for the
``data`` and ``error`` arrays.
Parameters
----------
data : array_like
The 2D array of the image.
error : array_like, optional
The 2D array of the 1-sigma errors of the input ``data``.
mask : array_like (bool), optional
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Returns
-------
result : A `GaussianConst2D` model instance.
The best-fitting Gaussian 2D model.
|
[
"Fit",
"a",
"2D",
"Gaussian",
"plus",
"a",
"constant",
"to",
"a",
"2D",
"image",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/centroids/core.py#L166-L247
|
10,631
|
astropy/photutils
|
photutils/centroids/core.py
|
centroid_1dg
|
def centroid_1dg(data, error=None, mask=None):
"""
Calculate the centroid of a 2D array by fitting 1D Gaussians to the
marginal ``x`` and ``y`` distributions of the array.
Invalid values (e.g. NaNs or infs) in the ``data`` or ``error``
arrays are automatically masked. The mask for invalid values
represents the combination of the invalid-value masks for the
``data`` and ``error`` arrays.
Parameters
----------
data : array_like
The 2D data array.
error : array_like, optional
The 2D array of the 1-sigma errors of the input ``data``.
mask : array_like (bool), optional
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Returns
-------
centroid : `~numpy.ndarray`
The ``x, y`` coordinates of the centroid.
"""
data = np.ma.asanyarray(data)
if mask is not None and mask is not np.ma.nomask:
mask = np.asanyarray(mask)
if data.shape != mask.shape:
raise ValueError('data and mask must have the same shape.')
data.mask |= mask
if np.any(~np.isfinite(data)):
data = np.ma.masked_invalid(data)
warnings.warn('Input data contains input values (e.g. NaNs or infs), '
'which were automatically masked.', AstropyUserWarning)
if error is not None:
error = np.ma.masked_invalid(error)
if data.shape != error.shape:
raise ValueError('data and error must have the same shape.')
data.mask |= error.mask
error.mask = data.mask
xy_error = np.array([np.sqrt(np.ma.sum(error**2, axis=i))
for i in [0, 1]])
xy_weights = [(1.0 / xy_error[i].clip(min=1.e-30)) for i in [0, 1]]
else:
xy_weights = [np.ones(data.shape[i]) for i in [1, 0]]
# assign zero weight to masked pixels
if data.mask is not np.ma.nomask:
bad_idx = [np.all(data.mask, axis=i) for i in [0, 1]]
for i in [0, 1]:
xy_weights[i][bad_idx[i]] = 0.
xy_data = np.array([np.ma.sum(data, axis=i) for i in [0, 1]])
constant_init = np.ma.min(data)
centroid = []
for (data_i, weights_i) in zip(xy_data, xy_weights):
params_init = gaussian1d_moments(data_i)
g_init = Const1D(constant_init) + Gaussian1D(*params_init)
fitter = LevMarLSQFitter()
x = np.arange(data_i.size)
g_fit = fitter(g_init, x, data_i, weights=weights_i)
centroid.append(g_fit.mean_1.value)
return np.array(centroid)
|
python
|
def centroid_1dg(data, error=None, mask=None):
"""
Calculate the centroid of a 2D array by fitting 1D Gaussians to the
marginal ``x`` and ``y`` distributions of the array.
Invalid values (e.g. NaNs or infs) in the ``data`` or ``error``
arrays are automatically masked. The mask for invalid values
represents the combination of the invalid-value masks for the
``data`` and ``error`` arrays.
Parameters
----------
data : array_like
The 2D data array.
error : array_like, optional
The 2D array of the 1-sigma errors of the input ``data``.
mask : array_like (bool), optional
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Returns
-------
centroid : `~numpy.ndarray`
The ``x, y`` coordinates of the centroid.
"""
data = np.ma.asanyarray(data)
if mask is not None and mask is not np.ma.nomask:
mask = np.asanyarray(mask)
if data.shape != mask.shape:
raise ValueError('data and mask must have the same shape.')
data.mask |= mask
if np.any(~np.isfinite(data)):
data = np.ma.masked_invalid(data)
warnings.warn('Input data contains input values (e.g. NaNs or infs), '
'which were automatically masked.', AstropyUserWarning)
if error is not None:
error = np.ma.masked_invalid(error)
if data.shape != error.shape:
raise ValueError('data and error must have the same shape.')
data.mask |= error.mask
error.mask = data.mask
xy_error = np.array([np.sqrt(np.ma.sum(error**2, axis=i))
for i in [0, 1]])
xy_weights = [(1.0 / xy_error[i].clip(min=1.e-30)) for i in [0, 1]]
else:
xy_weights = [np.ones(data.shape[i]) for i in [1, 0]]
# assign zero weight to masked pixels
if data.mask is not np.ma.nomask:
bad_idx = [np.all(data.mask, axis=i) for i in [0, 1]]
for i in [0, 1]:
xy_weights[i][bad_idx[i]] = 0.
xy_data = np.array([np.ma.sum(data, axis=i) for i in [0, 1]])
constant_init = np.ma.min(data)
centroid = []
for (data_i, weights_i) in zip(xy_data, xy_weights):
params_init = gaussian1d_moments(data_i)
g_init = Const1D(constant_init) + Gaussian1D(*params_init)
fitter = LevMarLSQFitter()
x = np.arange(data_i.size)
g_fit = fitter(g_init, x, data_i, weights=weights_i)
centroid.append(g_fit.mean_1.value)
return np.array(centroid)
|
[
"def",
"centroid_1dg",
"(",
"data",
",",
"error",
"=",
"None",
",",
"mask",
"=",
"None",
")",
":",
"data",
"=",
"np",
".",
"ma",
".",
"asanyarray",
"(",
"data",
")",
"if",
"mask",
"is",
"not",
"None",
"and",
"mask",
"is",
"not",
"np",
".",
"ma",
".",
"nomask",
":",
"mask",
"=",
"np",
".",
"asanyarray",
"(",
"mask",
")",
"if",
"data",
".",
"shape",
"!=",
"mask",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"'data and mask must have the same shape.'",
")",
"data",
".",
"mask",
"|=",
"mask",
"if",
"np",
".",
"any",
"(",
"~",
"np",
".",
"isfinite",
"(",
"data",
")",
")",
":",
"data",
"=",
"np",
".",
"ma",
".",
"masked_invalid",
"(",
"data",
")",
"warnings",
".",
"warn",
"(",
"'Input data contains input values (e.g. NaNs or infs), '",
"'which were automatically masked.'",
",",
"AstropyUserWarning",
")",
"if",
"error",
"is",
"not",
"None",
":",
"error",
"=",
"np",
".",
"ma",
".",
"masked_invalid",
"(",
"error",
")",
"if",
"data",
".",
"shape",
"!=",
"error",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"'data and error must have the same shape.'",
")",
"data",
".",
"mask",
"|=",
"error",
".",
"mask",
"error",
".",
"mask",
"=",
"data",
".",
"mask",
"xy_error",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"sqrt",
"(",
"np",
".",
"ma",
".",
"sum",
"(",
"error",
"**",
"2",
",",
"axis",
"=",
"i",
")",
")",
"for",
"i",
"in",
"[",
"0",
",",
"1",
"]",
"]",
")",
"xy_weights",
"=",
"[",
"(",
"1.0",
"/",
"xy_error",
"[",
"i",
"]",
".",
"clip",
"(",
"min",
"=",
"1.e-30",
")",
")",
"for",
"i",
"in",
"[",
"0",
",",
"1",
"]",
"]",
"else",
":",
"xy_weights",
"=",
"[",
"np",
".",
"ones",
"(",
"data",
".",
"shape",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"[",
"1",
",",
"0",
"]",
"]",
"# assign zero weight to masked pixels",
"if",
"data",
".",
"mask",
"is",
"not",
"np",
".",
"ma",
".",
"nomask",
":",
"bad_idx",
"=",
"[",
"np",
".",
"all",
"(",
"data",
".",
"mask",
",",
"axis",
"=",
"i",
")",
"for",
"i",
"in",
"[",
"0",
",",
"1",
"]",
"]",
"for",
"i",
"in",
"[",
"0",
",",
"1",
"]",
":",
"xy_weights",
"[",
"i",
"]",
"[",
"bad_idx",
"[",
"i",
"]",
"]",
"=",
"0.",
"xy_data",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"ma",
".",
"sum",
"(",
"data",
",",
"axis",
"=",
"i",
")",
"for",
"i",
"in",
"[",
"0",
",",
"1",
"]",
"]",
")",
"constant_init",
"=",
"np",
".",
"ma",
".",
"min",
"(",
"data",
")",
"centroid",
"=",
"[",
"]",
"for",
"(",
"data_i",
",",
"weights_i",
")",
"in",
"zip",
"(",
"xy_data",
",",
"xy_weights",
")",
":",
"params_init",
"=",
"gaussian1d_moments",
"(",
"data_i",
")",
"g_init",
"=",
"Const1D",
"(",
"constant_init",
")",
"+",
"Gaussian1D",
"(",
"*",
"params_init",
")",
"fitter",
"=",
"LevMarLSQFitter",
"(",
")",
"x",
"=",
"np",
".",
"arange",
"(",
"data_i",
".",
"size",
")",
"g_fit",
"=",
"fitter",
"(",
"g_init",
",",
"x",
",",
"data_i",
",",
"weights",
"=",
"weights_i",
")",
"centroid",
".",
"append",
"(",
"g_fit",
".",
"mean_1",
".",
"value",
")",
"return",
"np",
".",
"array",
"(",
"centroid",
")"
] |
Calculate the centroid of a 2D array by fitting 1D Gaussians to the
marginal ``x`` and ``y`` distributions of the array.
Invalid values (e.g. NaNs or infs) in the ``data`` or ``error``
arrays are automatically masked. The mask for invalid values
represents the combination of the invalid-value masks for the
``data`` and ``error`` arrays.
Parameters
----------
data : array_like
The 2D data array.
error : array_like, optional
The 2D array of the 1-sigma errors of the input ``data``.
mask : array_like (bool), optional
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Returns
-------
centroid : `~numpy.ndarray`
The ``x, y`` coordinates of the centroid.
|
[
"Calculate",
"the",
"centroid",
"of",
"a",
"2D",
"array",
"by",
"fitting",
"1D",
"Gaussians",
"to",
"the",
"marginal",
"x",
"and",
"y",
"distributions",
"of",
"the",
"array",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/centroids/core.py#L250-L322
|
10,632
|
astropy/photutils
|
photutils/centroids/core.py
|
centroid_sources
|
def centroid_sources(data, xpos, ypos, box_size=11, footprint=None,
error=None, mask=None, centroid_func=centroid_com):
"""
Calculate the centroid of sources at the defined positions.
A cutout image centered on each input position will be used to
calculate the centroid position. The cutout image is defined either
using the ``box_size`` or ``footprint`` keyword. The ``footprint``
keyword can be used to create a non-rectangular cutout image.
Parameters
----------
data : array_like
The 2D array of the image.
xpos, ypos : float or array-like of float
The initial ``x`` and ``y`` pixel position(s) of the center
position. A cutout image centered on this position be used to
calculate the centroid.
box_size : int or array-like of int, optional
The size of the cutout image along each axis. If ``box_size``
is a number, then a square cutout of ``box_size`` will be
created. If ``box_size`` has two elements, they should be in
``(ny, nx)`` order.
footprint : `~numpy.ndarray` of bools, optional
A 2D boolean array where `True` values describe the local
footprint region to cutout. ``footprint`` can be used to create
a non-rectangular cutout image, in which case the input ``xpos``
and ``ypos`` represent the center of the minimal bounding box
for the input ``footprint``. ``box_size=(n, m)`` is equivalent
to ``footprint=np.ones((n, m))``. Either ``box_size`` or
``footprint`` must be defined. If they are both defined, then
``footprint`` overrides ``box_size``.
mask : array_like, bool, optional
A 2D boolean array with the same shape as ``data``, where a
`True` value indicates the corresponding element of ``data`` is
masked.
error : array_like, optional
The 2D array of the 1-sigma errors of the input ``data``.
``error`` must have the same shape as ``data``. ``error`` will
be used only if supported by the input ``centroid_func``.
centroid_func : callable, optional
A callable object (e.g. function or class) that is used to
calculate the centroid of a 2D array. The ``centroid_func``
must accept a 2D `~numpy.ndarray`, have a ``mask`` keyword and
optionally an ``error`` keyword. The callable object must
return a tuple of two 1D `~numpy.ndarray`\\s, representing the x
and y centroids. The default is
`~photutils.centroids.centroid_com`.
Returns
-------
xcentroid, ycentroid : `~numpy.ndarray`
The ``x`` and ``y`` pixel position(s) of the centroids.
"""
xpos = np.atleast_1d(xpos)
ypos = np.atleast_1d(ypos)
if xpos.ndim != 1:
raise ValueError('xpos must be a 1D array.')
if ypos.ndim != 1:
raise ValueError('ypos must be a 1D array.')
if footprint is None:
if box_size is None:
raise ValueError('box_size or footprint must be defined.')
else:
box_size = np.atleast_1d(box_size)
if len(box_size) == 1:
box_size = np.repeat(box_size, 2)
if len(box_size) != 2:
raise ValueError('box_size must have 1 or 2 elements.')
footprint = np.ones(box_size, dtype=bool)
else:
footprint = np.asanyarray(footprint, dtype=bool)
if footprint.ndim != 2:
raise ValueError('footprint must be a 2D array.')
use_error = False
spec = inspect.getfullargspec(centroid_func)
if 'mask' not in spec.args:
raise ValueError('The input "centroid_func" must have a "mask" '
'keyword.')
if 'error' in spec.args:
use_error = True
xcentroids = []
ycentroids = []
for xp, yp in zip(xpos, ypos):
slices_large, slices_small = overlap_slices(data.shape,
footprint.shape, (yp, xp))
data_cutout = data[slices_large]
mask_cutout = None
if mask is not None:
mask_cutout = mask[slices_large]
footprint_mask = ~footprint
# trim footprint mask if partial overlap on the data
footprint_mask = footprint_mask[slices_small]
if mask_cutout is None:
mask_cutout = footprint_mask
else:
# combine the input mask and footprint mask
mask_cutout = np.logical_or(mask_cutout, footprint_mask)
if error is not None and use_error:
error_cutout = error[slices_large]
xcen, ycen = centroid_func(data_cutout, mask=mask_cutout,
error=error_cutout)
else:
xcen, ycen = centroid_func(data_cutout, mask=mask_cutout)
xcentroids.append(xcen + slices_large[1].start)
ycentroids.append(ycen + slices_large[0].start)
return np.array(xcentroids), np.array(ycentroids)
|
python
|
def centroid_sources(data, xpos, ypos, box_size=11, footprint=None,
error=None, mask=None, centroid_func=centroid_com):
"""
Calculate the centroid of sources at the defined positions.
A cutout image centered on each input position will be used to
calculate the centroid position. The cutout image is defined either
using the ``box_size`` or ``footprint`` keyword. The ``footprint``
keyword can be used to create a non-rectangular cutout image.
Parameters
----------
data : array_like
The 2D array of the image.
xpos, ypos : float or array-like of float
The initial ``x`` and ``y`` pixel position(s) of the center
position. A cutout image centered on this position be used to
calculate the centroid.
box_size : int or array-like of int, optional
The size of the cutout image along each axis. If ``box_size``
is a number, then a square cutout of ``box_size`` will be
created. If ``box_size`` has two elements, they should be in
``(ny, nx)`` order.
footprint : `~numpy.ndarray` of bools, optional
A 2D boolean array where `True` values describe the local
footprint region to cutout. ``footprint`` can be used to create
a non-rectangular cutout image, in which case the input ``xpos``
and ``ypos`` represent the center of the minimal bounding box
for the input ``footprint``. ``box_size=(n, m)`` is equivalent
to ``footprint=np.ones((n, m))``. Either ``box_size`` or
``footprint`` must be defined. If they are both defined, then
``footprint`` overrides ``box_size``.
mask : array_like, bool, optional
A 2D boolean array with the same shape as ``data``, where a
`True` value indicates the corresponding element of ``data`` is
masked.
error : array_like, optional
The 2D array of the 1-sigma errors of the input ``data``.
``error`` must have the same shape as ``data``. ``error`` will
be used only if supported by the input ``centroid_func``.
centroid_func : callable, optional
A callable object (e.g. function or class) that is used to
calculate the centroid of a 2D array. The ``centroid_func``
must accept a 2D `~numpy.ndarray`, have a ``mask`` keyword and
optionally an ``error`` keyword. The callable object must
return a tuple of two 1D `~numpy.ndarray`\\s, representing the x
and y centroids. The default is
`~photutils.centroids.centroid_com`.
Returns
-------
xcentroid, ycentroid : `~numpy.ndarray`
The ``x`` and ``y`` pixel position(s) of the centroids.
"""
xpos = np.atleast_1d(xpos)
ypos = np.atleast_1d(ypos)
if xpos.ndim != 1:
raise ValueError('xpos must be a 1D array.')
if ypos.ndim != 1:
raise ValueError('ypos must be a 1D array.')
if footprint is None:
if box_size is None:
raise ValueError('box_size or footprint must be defined.')
else:
box_size = np.atleast_1d(box_size)
if len(box_size) == 1:
box_size = np.repeat(box_size, 2)
if len(box_size) != 2:
raise ValueError('box_size must have 1 or 2 elements.')
footprint = np.ones(box_size, dtype=bool)
else:
footprint = np.asanyarray(footprint, dtype=bool)
if footprint.ndim != 2:
raise ValueError('footprint must be a 2D array.')
use_error = False
spec = inspect.getfullargspec(centroid_func)
if 'mask' not in spec.args:
raise ValueError('The input "centroid_func" must have a "mask" '
'keyword.')
if 'error' in spec.args:
use_error = True
xcentroids = []
ycentroids = []
for xp, yp in zip(xpos, ypos):
slices_large, slices_small = overlap_slices(data.shape,
footprint.shape, (yp, xp))
data_cutout = data[slices_large]
mask_cutout = None
if mask is not None:
mask_cutout = mask[slices_large]
footprint_mask = ~footprint
# trim footprint mask if partial overlap on the data
footprint_mask = footprint_mask[slices_small]
if mask_cutout is None:
mask_cutout = footprint_mask
else:
# combine the input mask and footprint mask
mask_cutout = np.logical_or(mask_cutout, footprint_mask)
if error is not None and use_error:
error_cutout = error[slices_large]
xcen, ycen = centroid_func(data_cutout, mask=mask_cutout,
error=error_cutout)
else:
xcen, ycen = centroid_func(data_cutout, mask=mask_cutout)
xcentroids.append(xcen + slices_large[1].start)
ycentroids.append(ycen + slices_large[0].start)
return np.array(xcentroids), np.array(ycentroids)
|
[
"def",
"centroid_sources",
"(",
"data",
",",
"xpos",
",",
"ypos",
",",
"box_size",
"=",
"11",
",",
"footprint",
"=",
"None",
",",
"error",
"=",
"None",
",",
"mask",
"=",
"None",
",",
"centroid_func",
"=",
"centroid_com",
")",
":",
"xpos",
"=",
"np",
".",
"atleast_1d",
"(",
"xpos",
")",
"ypos",
"=",
"np",
".",
"atleast_1d",
"(",
"ypos",
")",
"if",
"xpos",
".",
"ndim",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"'xpos must be a 1D array.'",
")",
"if",
"ypos",
".",
"ndim",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"'ypos must be a 1D array.'",
")",
"if",
"footprint",
"is",
"None",
":",
"if",
"box_size",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'box_size or footprint must be defined.'",
")",
"else",
":",
"box_size",
"=",
"np",
".",
"atleast_1d",
"(",
"box_size",
")",
"if",
"len",
"(",
"box_size",
")",
"==",
"1",
":",
"box_size",
"=",
"np",
".",
"repeat",
"(",
"box_size",
",",
"2",
")",
"if",
"len",
"(",
"box_size",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'box_size must have 1 or 2 elements.'",
")",
"footprint",
"=",
"np",
".",
"ones",
"(",
"box_size",
",",
"dtype",
"=",
"bool",
")",
"else",
":",
"footprint",
"=",
"np",
".",
"asanyarray",
"(",
"footprint",
",",
"dtype",
"=",
"bool",
")",
"if",
"footprint",
".",
"ndim",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'footprint must be a 2D array.'",
")",
"use_error",
"=",
"False",
"spec",
"=",
"inspect",
".",
"getfullargspec",
"(",
"centroid_func",
")",
"if",
"'mask'",
"not",
"in",
"spec",
".",
"args",
":",
"raise",
"ValueError",
"(",
"'The input \"centroid_func\" must have a \"mask\" '",
"'keyword.'",
")",
"if",
"'error'",
"in",
"spec",
".",
"args",
":",
"use_error",
"=",
"True",
"xcentroids",
"=",
"[",
"]",
"ycentroids",
"=",
"[",
"]",
"for",
"xp",
",",
"yp",
"in",
"zip",
"(",
"xpos",
",",
"ypos",
")",
":",
"slices_large",
",",
"slices_small",
"=",
"overlap_slices",
"(",
"data",
".",
"shape",
",",
"footprint",
".",
"shape",
",",
"(",
"yp",
",",
"xp",
")",
")",
"data_cutout",
"=",
"data",
"[",
"slices_large",
"]",
"mask_cutout",
"=",
"None",
"if",
"mask",
"is",
"not",
"None",
":",
"mask_cutout",
"=",
"mask",
"[",
"slices_large",
"]",
"footprint_mask",
"=",
"~",
"footprint",
"# trim footprint mask if partial overlap on the data",
"footprint_mask",
"=",
"footprint_mask",
"[",
"slices_small",
"]",
"if",
"mask_cutout",
"is",
"None",
":",
"mask_cutout",
"=",
"footprint_mask",
"else",
":",
"# combine the input mask and footprint mask",
"mask_cutout",
"=",
"np",
".",
"logical_or",
"(",
"mask_cutout",
",",
"footprint_mask",
")",
"if",
"error",
"is",
"not",
"None",
"and",
"use_error",
":",
"error_cutout",
"=",
"error",
"[",
"slices_large",
"]",
"xcen",
",",
"ycen",
"=",
"centroid_func",
"(",
"data_cutout",
",",
"mask",
"=",
"mask_cutout",
",",
"error",
"=",
"error_cutout",
")",
"else",
":",
"xcen",
",",
"ycen",
"=",
"centroid_func",
"(",
"data_cutout",
",",
"mask",
"=",
"mask_cutout",
")",
"xcentroids",
".",
"append",
"(",
"xcen",
"+",
"slices_large",
"[",
"1",
"]",
".",
"start",
")",
"ycentroids",
".",
"append",
"(",
"ycen",
"+",
"slices_large",
"[",
"0",
"]",
".",
"start",
")",
"return",
"np",
".",
"array",
"(",
"xcentroids",
")",
",",
"np",
".",
"array",
"(",
"ycentroids",
")"
] |
Calculate the centroid of sources at the defined positions.
A cutout image centered on each input position will be used to
calculate the centroid position. The cutout image is defined either
using the ``box_size`` or ``footprint`` keyword. The ``footprint``
keyword can be used to create a non-rectangular cutout image.
Parameters
----------
data : array_like
The 2D array of the image.
xpos, ypos : float or array-like of float
The initial ``x`` and ``y`` pixel position(s) of the center
position. A cutout image centered on this position be used to
calculate the centroid.
box_size : int or array-like of int, optional
The size of the cutout image along each axis. If ``box_size``
is a number, then a square cutout of ``box_size`` will be
created. If ``box_size`` has two elements, they should be in
``(ny, nx)`` order.
footprint : `~numpy.ndarray` of bools, optional
A 2D boolean array where `True` values describe the local
footprint region to cutout. ``footprint`` can be used to create
a non-rectangular cutout image, in which case the input ``xpos``
and ``ypos`` represent the center of the minimal bounding box
for the input ``footprint``. ``box_size=(n, m)`` is equivalent
to ``footprint=np.ones((n, m))``. Either ``box_size`` or
``footprint`` must be defined. If they are both defined, then
``footprint`` overrides ``box_size``.
mask : array_like, bool, optional
A 2D boolean array with the same shape as ``data``, where a
`True` value indicates the corresponding element of ``data`` is
masked.
error : array_like, optional
The 2D array of the 1-sigma errors of the input ``data``.
``error`` must have the same shape as ``data``. ``error`` will
be used only if supported by the input ``centroid_func``.
centroid_func : callable, optional
A callable object (e.g. function or class) that is used to
calculate the centroid of a 2D array. The ``centroid_func``
must accept a 2D `~numpy.ndarray`, have a ``mask`` keyword and
optionally an ``error`` keyword. The callable object must
return a tuple of two 1D `~numpy.ndarray`\\s, representing the x
and y centroids. The default is
`~photutils.centroids.centroid_com`.
Returns
-------
xcentroid, ycentroid : `~numpy.ndarray`
The ``x`` and ``y`` pixel position(s) of the centroids.
|
[
"Calculate",
"the",
"centroid",
"of",
"sources",
"at",
"the",
"defined",
"positions",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/centroids/core.py#L358-L481
|
10,633
|
astropy/photutils
|
photutils/centroids/core.py
|
GaussianConst2D.evaluate
|
def evaluate(x, y, constant, amplitude, x_mean, y_mean, x_stddev,
y_stddev, theta):
"""Two dimensional Gaussian plus constant function."""
model = Const2D(constant)(x, y) + Gaussian2D(amplitude, x_mean,
y_mean, x_stddev,
y_stddev, theta)(x, y)
return model
|
python
|
def evaluate(x, y, constant, amplitude, x_mean, y_mean, x_stddev,
y_stddev, theta):
"""Two dimensional Gaussian plus constant function."""
model = Const2D(constant)(x, y) + Gaussian2D(amplitude, x_mean,
y_mean, x_stddev,
y_stddev, theta)(x, y)
return model
|
[
"def",
"evaluate",
"(",
"x",
",",
"y",
",",
"constant",
",",
"amplitude",
",",
"x_mean",
",",
"y_mean",
",",
"x_stddev",
",",
"y_stddev",
",",
"theta",
")",
":",
"model",
"=",
"Const2D",
"(",
"constant",
")",
"(",
"x",
",",
"y",
")",
"+",
"Gaussian2D",
"(",
"amplitude",
",",
"x_mean",
",",
"y_mean",
",",
"x_stddev",
",",
"y_stddev",
",",
"theta",
")",
"(",
"x",
",",
"y",
")",
"return",
"model"
] |
Two dimensional Gaussian plus constant function.
|
[
"Two",
"dimensional",
"Gaussian",
"plus",
"constant",
"function",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/centroids/core.py#L61-L68
|
10,634
|
astropy/photutils
|
photutils/psf/sandbox.py
|
DiscretePRF.evaluate
|
def evaluate(self, x, y, flux, x_0, y_0):
"""
Discrete PRF model evaluation.
Given a certain position and flux the corresponding image of
the PSF is chosen and scaled to the flux. If x and y are
outside the boundaries of the image, zero will be returned.
Parameters
----------
x : float
x coordinate array in pixel coordinates.
y : float
y coordinate array in pixel coordinates.
flux : float
Model flux.
x_0 : float
x position of the center of the PRF.
y_0 : float
y position of the center of the PRF.
"""
# Convert x and y to index arrays
x = (x - x_0 + 0.5 + self.prf_shape[1] // 2).astype('int')
y = (y - y_0 + 0.5 + self.prf_shape[0] // 2).astype('int')
# Get subpixel indices
y_sub, x_sub = subpixel_indices((y_0, x_0), self.subsampling)
# Out of boundary masks
x_bound = np.logical_or(x < 0, x >= self.prf_shape[1])
y_bound = np.logical_or(y < 0, y >= self.prf_shape[0])
out_of_bounds = np.logical_or(x_bound, y_bound)
# Set out of boundary indices to zero
x[x_bound] = 0
y[y_bound] = 0
result = flux * self._prf_array[int(y_sub), int(x_sub)][y, x]
# Set out of boundary values to zero
result[out_of_bounds] = 0
return result
|
python
|
def evaluate(self, x, y, flux, x_0, y_0):
"""
Discrete PRF model evaluation.
Given a certain position and flux the corresponding image of
the PSF is chosen and scaled to the flux. If x and y are
outside the boundaries of the image, zero will be returned.
Parameters
----------
x : float
x coordinate array in pixel coordinates.
y : float
y coordinate array in pixel coordinates.
flux : float
Model flux.
x_0 : float
x position of the center of the PRF.
y_0 : float
y position of the center of the PRF.
"""
# Convert x and y to index arrays
x = (x - x_0 + 0.5 + self.prf_shape[1] // 2).astype('int')
y = (y - y_0 + 0.5 + self.prf_shape[0] // 2).astype('int')
# Get subpixel indices
y_sub, x_sub = subpixel_indices((y_0, x_0), self.subsampling)
# Out of boundary masks
x_bound = np.logical_or(x < 0, x >= self.prf_shape[1])
y_bound = np.logical_or(y < 0, y >= self.prf_shape[0])
out_of_bounds = np.logical_or(x_bound, y_bound)
# Set out of boundary indices to zero
x[x_bound] = 0
y[y_bound] = 0
result = flux * self._prf_array[int(y_sub), int(x_sub)][y, x]
# Set out of boundary values to zero
result[out_of_bounds] = 0
return result
|
[
"def",
"evaluate",
"(",
"self",
",",
"x",
",",
"y",
",",
"flux",
",",
"x_0",
",",
"y_0",
")",
":",
"# Convert x and y to index arrays",
"x",
"=",
"(",
"x",
"-",
"x_0",
"+",
"0.5",
"+",
"self",
".",
"prf_shape",
"[",
"1",
"]",
"//",
"2",
")",
".",
"astype",
"(",
"'int'",
")",
"y",
"=",
"(",
"y",
"-",
"y_0",
"+",
"0.5",
"+",
"self",
".",
"prf_shape",
"[",
"0",
"]",
"//",
"2",
")",
".",
"astype",
"(",
"'int'",
")",
"# Get subpixel indices",
"y_sub",
",",
"x_sub",
"=",
"subpixel_indices",
"(",
"(",
"y_0",
",",
"x_0",
")",
",",
"self",
".",
"subsampling",
")",
"# Out of boundary masks",
"x_bound",
"=",
"np",
".",
"logical_or",
"(",
"x",
"<",
"0",
",",
"x",
">=",
"self",
".",
"prf_shape",
"[",
"1",
"]",
")",
"y_bound",
"=",
"np",
".",
"logical_or",
"(",
"y",
"<",
"0",
",",
"y",
">=",
"self",
".",
"prf_shape",
"[",
"0",
"]",
")",
"out_of_bounds",
"=",
"np",
".",
"logical_or",
"(",
"x_bound",
",",
"y_bound",
")",
"# Set out of boundary indices to zero",
"x",
"[",
"x_bound",
"]",
"=",
"0",
"y",
"[",
"y_bound",
"]",
"=",
"0",
"result",
"=",
"flux",
"*",
"self",
".",
"_prf_array",
"[",
"int",
"(",
"y_sub",
")",
",",
"int",
"(",
"x_sub",
")",
"]",
"[",
"y",
",",
"x",
"]",
"# Set out of boundary values to zero",
"result",
"[",
"out_of_bounds",
"]",
"=",
"0",
"return",
"result"
] |
Discrete PRF model evaluation.
Given a certain position and flux the corresponding image of
the PSF is chosen and scaled to the flux. If x and y are
outside the boundaries of the image, zero will be returned.
Parameters
----------
x : float
x coordinate array in pixel coordinates.
y : float
y coordinate array in pixel coordinates.
flux : float
Model flux.
x_0 : float
x position of the center of the PRF.
y_0 : float
y position of the center of the PRF.
|
[
"Discrete",
"PRF",
"model",
"evaluation",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/sandbox.py#L104-L145
|
10,635
|
astropy/photutils
|
photutils/psf/sandbox.py
|
Reproject._reproject
|
def _reproject(wcs1, wcs2):
"""
Perform the forward transformation of ``wcs1`` followed by the
inverse transformation of ``wcs2``.
Parameters
----------
wcs1, wcs2 : `~astropy.wcs.WCS` or `~gwcs.wcs.WCS`
The WCS objects.
Returns
-------
result : func
Function to compute the transformations. It takes x, y
positions in ``wcs1`` and returns x, y positions in
``wcs2``. The input and output x, y positions are zero
indexed.
"""
import gwcs
forward_origin = []
if isinstance(wcs1, fitswcs.WCS):
forward = wcs1.all_pix2world
forward_origin = [0]
elif isinstance(wcs2, gwcs.wcs.WCS):
forward = wcs1.forward_transform
else:
raise ValueError('wcs1 must be an astropy.wcs.WCS or '
'gwcs.wcs.WCS object.')
inverse_origin = []
if isinstance(wcs2, fitswcs.WCS):
inverse = wcs2.all_world2pix
inverse_origin = [0]
elif isinstance(wcs2, gwcs.wcs.WCS):
inverse = wcs2.forward_transform.inverse
else:
raise ValueError('wcs2 must be an astropy.wcs.WCS or '
'gwcs.wcs.WCS object.')
def _reproject_func(x, y):
forward_args = [x, y] + forward_origin
sky = forward(*forward_args)
inverse_args = sky + inverse_origin
return inverse(*inverse_args)
return _reproject_func
|
python
|
def _reproject(wcs1, wcs2):
"""
Perform the forward transformation of ``wcs1`` followed by the
inverse transformation of ``wcs2``.
Parameters
----------
wcs1, wcs2 : `~astropy.wcs.WCS` or `~gwcs.wcs.WCS`
The WCS objects.
Returns
-------
result : func
Function to compute the transformations. It takes x, y
positions in ``wcs1`` and returns x, y positions in
``wcs2``. The input and output x, y positions are zero
indexed.
"""
import gwcs
forward_origin = []
if isinstance(wcs1, fitswcs.WCS):
forward = wcs1.all_pix2world
forward_origin = [0]
elif isinstance(wcs2, gwcs.wcs.WCS):
forward = wcs1.forward_transform
else:
raise ValueError('wcs1 must be an astropy.wcs.WCS or '
'gwcs.wcs.WCS object.')
inverse_origin = []
if isinstance(wcs2, fitswcs.WCS):
inverse = wcs2.all_world2pix
inverse_origin = [0]
elif isinstance(wcs2, gwcs.wcs.WCS):
inverse = wcs2.forward_transform.inverse
else:
raise ValueError('wcs2 must be an astropy.wcs.WCS or '
'gwcs.wcs.WCS object.')
def _reproject_func(x, y):
forward_args = [x, y] + forward_origin
sky = forward(*forward_args)
inverse_args = sky + inverse_origin
return inverse(*inverse_args)
return _reproject_func
|
[
"def",
"_reproject",
"(",
"wcs1",
",",
"wcs2",
")",
":",
"import",
"gwcs",
"forward_origin",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"wcs1",
",",
"fitswcs",
".",
"WCS",
")",
":",
"forward",
"=",
"wcs1",
".",
"all_pix2world",
"forward_origin",
"=",
"[",
"0",
"]",
"elif",
"isinstance",
"(",
"wcs2",
",",
"gwcs",
".",
"wcs",
".",
"WCS",
")",
":",
"forward",
"=",
"wcs1",
".",
"forward_transform",
"else",
":",
"raise",
"ValueError",
"(",
"'wcs1 must be an astropy.wcs.WCS or '",
"'gwcs.wcs.WCS object.'",
")",
"inverse_origin",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"wcs2",
",",
"fitswcs",
".",
"WCS",
")",
":",
"inverse",
"=",
"wcs2",
".",
"all_world2pix",
"inverse_origin",
"=",
"[",
"0",
"]",
"elif",
"isinstance",
"(",
"wcs2",
",",
"gwcs",
".",
"wcs",
".",
"WCS",
")",
":",
"inverse",
"=",
"wcs2",
".",
"forward_transform",
".",
"inverse",
"else",
":",
"raise",
"ValueError",
"(",
"'wcs2 must be an astropy.wcs.WCS or '",
"'gwcs.wcs.WCS object.'",
")",
"def",
"_reproject_func",
"(",
"x",
",",
"y",
")",
":",
"forward_args",
"=",
"[",
"x",
",",
"y",
"]",
"+",
"forward_origin",
"sky",
"=",
"forward",
"(",
"*",
"forward_args",
")",
"inverse_args",
"=",
"sky",
"+",
"inverse_origin",
"return",
"inverse",
"(",
"*",
"inverse_args",
")",
"return",
"_reproject_func"
] |
Perform the forward transformation of ``wcs1`` followed by the
inverse transformation of ``wcs2``.
Parameters
----------
wcs1, wcs2 : `~astropy.wcs.WCS` or `~gwcs.wcs.WCS`
The WCS objects.
Returns
-------
result : func
Function to compute the transformations. It takes x, y
positions in ``wcs1`` and returns x, y positions in
``wcs2``. The input and output x, y positions are zero
indexed.
|
[
"Perform",
"the",
"forward",
"transformation",
"of",
"wcs1",
"followed",
"by",
"the",
"inverse",
"transformation",
"of",
"wcs2",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/sandbox.py#L316-L363
|
10,636
|
astropy/photutils
|
photutils/utils/misc.py
|
get_version_info
|
def get_version_info():
"""
Return astropy and photutils versions.
Returns
-------
result : str
The astropy and photutils versions.
"""
from astropy import __version__
astropy_version = __version__
from photutils import __version__
photutils_version = __version__
return 'astropy: {0}, photutils: {1}'.format(astropy_version,
photutils_version)
|
python
|
def get_version_info():
"""
Return astropy and photutils versions.
Returns
-------
result : str
The astropy and photutils versions.
"""
from astropy import __version__
astropy_version = __version__
from photutils import __version__
photutils_version = __version__
return 'astropy: {0}, photutils: {1}'.format(astropy_version,
photutils_version)
|
[
"def",
"get_version_info",
"(",
")",
":",
"from",
"astropy",
"import",
"__version__",
"astropy_version",
"=",
"__version__",
"from",
"photutils",
"import",
"__version__",
"photutils_version",
"=",
"__version__",
"return",
"'astropy: {0}, photutils: {1}'",
".",
"format",
"(",
"astropy_version",
",",
"photutils_version",
")"
] |
Return astropy and photutils versions.
Returns
-------
result : str
The astropy and photutils versions.
|
[
"Return",
"astropy",
"and",
"photutils",
"versions",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/utils/misc.py#L15-L32
|
10,637
|
astropy/photutils
|
photutils/utils/errors.py
|
calc_total_error
|
def calc_total_error(data, bkg_error, effective_gain):
"""
Calculate a total error array, combining a background-only error
array with the Poisson noise of sources.
Parameters
----------
data : array_like or `~astropy.units.Quantity`
The data array.
bkg_error : array_like or `~astropy.units.Quantity`
The pixel-wise Gaussian 1-sigma background-only errors of the
input ``data``. ``bkg_error`` should include all sources of
"background" error but *exclude* the Poisson error of the
sources. ``bkg_error`` must have the same shape as ``data``.
If ``data`` and ``bkg_error`` are `~astropy.units.Quantity`
objects, then they must have the same units.
effective_gain : float, array-like, or `~astropy.units.Quantity`
Ratio of counts (e.g., electrons or photons) to the units of
``data`` used to calculate the Poisson error of the sources.
Returns
-------
total_error : `~numpy.ndarray` or `~astropy.units.Quantity`
The total error array. If ``data``, ``bkg_error``, and
``effective_gain`` are all `~astropy.units.Quantity` objects,
then ``total_error`` will also be returned as a
`~astropy.units.Quantity` object with the same units as the
input ``data``. Otherwise, a `~numpy.ndarray` will be returned.
Notes
-----
To use units, ``data``, ``bkg_error``, and ``effective_gain`` must
*all* be `~astropy.units.Quantity` objects. ``data`` and
``bkg_error`` must have the same units. A `ValueError` will be
raised if only some of the inputs are `~astropy.units.Quantity`
objects or if the ``data`` and ``bkg_error`` units differ.
The total error array, :math:`\\sigma_{\\mathrm{tot}}` is:
.. math:: \\sigma_{\\mathrm{tot}} = \\sqrt{\\sigma_{\\mathrm{b}}^2 +
\\frac{I}{g}}
where :math:`\\sigma_b`, :math:`I`, and :math:`g` are the background
``bkg_error`` image, ``data`` image, and ``effective_gain``,
respectively.
Pixels where ``data`` (:math:`I_i)` is negative do not contribute
additional Poisson noise to the total error, i.e.
:math:`\\sigma_{\\mathrm{tot}, i} = \\sigma_{\\mathrm{b}, i}`. Note
that this is different from `SExtractor`_, which sums the total
variance in the segment, including pixels where :math:`I_i` is
negative. In such cases, `SExtractor`_ underestimates the total
errors. Also note that SExtractor computes Poisson errors from
background-subtracted data, which also results in an underestimation
of the Poisson noise.
``effective_gain`` can either be a scalar value or a 2D image with
the same shape as the ``data``. A 2D image is useful with mosaic
images that have variable depths (i.e., exposure times) across the
field. For example, one should use an exposure-time map as the
``effective_gain`` for a variable depth mosaic image in count-rate
units.
As an example, if your input ``data`` are in units of ADU, then
``effective_gain`` should be in units of electrons/ADU (or
photons/ADU). If your input ``data`` are in units of electrons/s
then ``effective_gain`` should be the exposure time or an exposure
time map (e.g., for mosaics with non-uniform exposure times).
.. _SExtractor: http://www.astromatic.net/software/sextractor
"""
data = np.asanyarray(data)
bkg_error = np.asanyarray(bkg_error)
inputs = [data, bkg_error, effective_gain]
has_unit = [hasattr(x, 'unit') for x in inputs]
use_units = all(has_unit)
if any(has_unit) and not use_units:
raise ValueError('If any of data, bkg_error, or effective_gain has '
'units, then they all must all have units.')
if use_units:
if data.unit != bkg_error.unit:
raise ValueError('data and bkg_error must have the same units.')
count_units = [u.electron, u.photon]
datagain_unit = data.unit * effective_gain.unit
if datagain_unit not in count_units:
raise u.UnitsError('(data * effective_gain) has units of "{0}", '
'but it must have count units (e.g. '
'u.electron or u.photon).'
.format(datagain_unit))
if not isiterable(effective_gain):
effective_gain = np.zeros(data.shape) + effective_gain
else:
effective_gain = np.asanyarray(effective_gain)
if effective_gain.shape != data.shape:
raise ValueError('If input effective_gain is 2D, then it must '
'have the same shape as the input data.')
if np.any(effective_gain <= 0):
raise ValueError('effective_gain must be strictly positive '
'everywhere.')
# This calculation assumes that data and bkg_error have the same
# units. source_variance is calculated to have units of
# (data.unit)**2 so that it can be added with bkg_error**2 below. The
# final returned error will have units of data.unit. np.maximum is
# used to ensure that negative data values do not contribute to the
# Poisson noise.
if use_units:
unit = data.unit
data = data.value
effective_gain = effective_gain.value
source_variance = np.maximum(data / effective_gain, 0) * unit**2
else:
source_variance = np.maximum(data / effective_gain, 0)
return np.sqrt(bkg_error**2 + source_variance)
|
python
|
def calc_total_error(data, bkg_error, effective_gain):
"""
Calculate a total error array, combining a background-only error
array with the Poisson noise of sources.
Parameters
----------
data : array_like or `~astropy.units.Quantity`
The data array.
bkg_error : array_like or `~astropy.units.Quantity`
The pixel-wise Gaussian 1-sigma background-only errors of the
input ``data``. ``bkg_error`` should include all sources of
"background" error but *exclude* the Poisson error of the
sources. ``bkg_error`` must have the same shape as ``data``.
If ``data`` and ``bkg_error`` are `~astropy.units.Quantity`
objects, then they must have the same units.
effective_gain : float, array-like, or `~astropy.units.Quantity`
Ratio of counts (e.g., electrons or photons) to the units of
``data`` used to calculate the Poisson error of the sources.
Returns
-------
total_error : `~numpy.ndarray` or `~astropy.units.Quantity`
The total error array. If ``data``, ``bkg_error``, and
``effective_gain`` are all `~astropy.units.Quantity` objects,
then ``total_error`` will also be returned as a
`~astropy.units.Quantity` object with the same units as the
input ``data``. Otherwise, a `~numpy.ndarray` will be returned.
Notes
-----
To use units, ``data``, ``bkg_error``, and ``effective_gain`` must
*all* be `~astropy.units.Quantity` objects. ``data`` and
``bkg_error`` must have the same units. A `ValueError` will be
raised if only some of the inputs are `~astropy.units.Quantity`
objects or if the ``data`` and ``bkg_error`` units differ.
The total error array, :math:`\\sigma_{\\mathrm{tot}}` is:
.. math:: \\sigma_{\\mathrm{tot}} = \\sqrt{\\sigma_{\\mathrm{b}}^2 +
\\frac{I}{g}}
where :math:`\\sigma_b`, :math:`I`, and :math:`g` are the background
``bkg_error`` image, ``data`` image, and ``effective_gain``,
respectively.
Pixels where ``data`` (:math:`I_i)` is negative do not contribute
additional Poisson noise to the total error, i.e.
:math:`\\sigma_{\\mathrm{tot}, i} = \\sigma_{\\mathrm{b}, i}`. Note
that this is different from `SExtractor`_, which sums the total
variance in the segment, including pixels where :math:`I_i` is
negative. In such cases, `SExtractor`_ underestimates the total
errors. Also note that SExtractor computes Poisson errors from
background-subtracted data, which also results in an underestimation
of the Poisson noise.
``effective_gain`` can either be a scalar value or a 2D image with
the same shape as the ``data``. A 2D image is useful with mosaic
images that have variable depths (i.e., exposure times) across the
field. For example, one should use an exposure-time map as the
``effective_gain`` for a variable depth mosaic image in count-rate
units.
As an example, if your input ``data`` are in units of ADU, then
``effective_gain`` should be in units of electrons/ADU (or
photons/ADU). If your input ``data`` are in units of electrons/s
then ``effective_gain`` should be the exposure time or an exposure
time map (e.g., for mosaics with non-uniform exposure times).
.. _SExtractor: http://www.astromatic.net/software/sextractor
"""
data = np.asanyarray(data)
bkg_error = np.asanyarray(bkg_error)
inputs = [data, bkg_error, effective_gain]
has_unit = [hasattr(x, 'unit') for x in inputs]
use_units = all(has_unit)
if any(has_unit) and not use_units:
raise ValueError('If any of data, bkg_error, or effective_gain has '
'units, then they all must all have units.')
if use_units:
if data.unit != bkg_error.unit:
raise ValueError('data and bkg_error must have the same units.')
count_units = [u.electron, u.photon]
datagain_unit = data.unit * effective_gain.unit
if datagain_unit not in count_units:
raise u.UnitsError('(data * effective_gain) has units of "{0}", '
'but it must have count units (e.g. '
'u.electron or u.photon).'
.format(datagain_unit))
if not isiterable(effective_gain):
effective_gain = np.zeros(data.shape) + effective_gain
else:
effective_gain = np.asanyarray(effective_gain)
if effective_gain.shape != data.shape:
raise ValueError('If input effective_gain is 2D, then it must '
'have the same shape as the input data.')
if np.any(effective_gain <= 0):
raise ValueError('effective_gain must be strictly positive '
'everywhere.')
# This calculation assumes that data and bkg_error have the same
# units. source_variance is calculated to have units of
# (data.unit)**2 so that it can be added with bkg_error**2 below. The
# final returned error will have units of data.unit. np.maximum is
# used to ensure that negative data values do not contribute to the
# Poisson noise.
if use_units:
unit = data.unit
data = data.value
effective_gain = effective_gain.value
source_variance = np.maximum(data / effective_gain, 0) * unit**2
else:
source_variance = np.maximum(data / effective_gain, 0)
return np.sqrt(bkg_error**2 + source_variance)
|
[
"def",
"calc_total_error",
"(",
"data",
",",
"bkg_error",
",",
"effective_gain",
")",
":",
"data",
"=",
"np",
".",
"asanyarray",
"(",
"data",
")",
"bkg_error",
"=",
"np",
".",
"asanyarray",
"(",
"bkg_error",
")",
"inputs",
"=",
"[",
"data",
",",
"bkg_error",
",",
"effective_gain",
"]",
"has_unit",
"=",
"[",
"hasattr",
"(",
"x",
",",
"'unit'",
")",
"for",
"x",
"in",
"inputs",
"]",
"use_units",
"=",
"all",
"(",
"has_unit",
")",
"if",
"any",
"(",
"has_unit",
")",
"and",
"not",
"use_units",
":",
"raise",
"ValueError",
"(",
"'If any of data, bkg_error, or effective_gain has '",
"'units, then they all must all have units.'",
")",
"if",
"use_units",
":",
"if",
"data",
".",
"unit",
"!=",
"bkg_error",
".",
"unit",
":",
"raise",
"ValueError",
"(",
"'data and bkg_error must have the same units.'",
")",
"count_units",
"=",
"[",
"u",
".",
"electron",
",",
"u",
".",
"photon",
"]",
"datagain_unit",
"=",
"data",
".",
"unit",
"*",
"effective_gain",
".",
"unit",
"if",
"datagain_unit",
"not",
"in",
"count_units",
":",
"raise",
"u",
".",
"UnitsError",
"(",
"'(data * effective_gain) has units of \"{0}\", '",
"'but it must have count units (e.g. '",
"'u.electron or u.photon).'",
".",
"format",
"(",
"datagain_unit",
")",
")",
"if",
"not",
"isiterable",
"(",
"effective_gain",
")",
":",
"effective_gain",
"=",
"np",
".",
"zeros",
"(",
"data",
".",
"shape",
")",
"+",
"effective_gain",
"else",
":",
"effective_gain",
"=",
"np",
".",
"asanyarray",
"(",
"effective_gain",
")",
"if",
"effective_gain",
".",
"shape",
"!=",
"data",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"'If input effective_gain is 2D, then it must '",
"'have the same shape as the input data.'",
")",
"if",
"np",
".",
"any",
"(",
"effective_gain",
"<=",
"0",
")",
":",
"raise",
"ValueError",
"(",
"'effective_gain must be strictly positive '",
"'everywhere.'",
")",
"# This calculation assumes that data and bkg_error have the same",
"# units. source_variance is calculated to have units of",
"# (data.unit)**2 so that it can be added with bkg_error**2 below. The",
"# final returned error will have units of data.unit. np.maximum is",
"# used to ensure that negative data values do not contribute to the",
"# Poisson noise.",
"if",
"use_units",
":",
"unit",
"=",
"data",
".",
"unit",
"data",
"=",
"data",
".",
"value",
"effective_gain",
"=",
"effective_gain",
".",
"value",
"source_variance",
"=",
"np",
".",
"maximum",
"(",
"data",
"/",
"effective_gain",
",",
"0",
")",
"*",
"unit",
"**",
"2",
"else",
":",
"source_variance",
"=",
"np",
".",
"maximum",
"(",
"data",
"/",
"effective_gain",
",",
"0",
")",
"return",
"np",
".",
"sqrt",
"(",
"bkg_error",
"**",
"2",
"+",
"source_variance",
")"
] |
Calculate a total error array, combining a background-only error
array with the Poisson noise of sources.
Parameters
----------
data : array_like or `~astropy.units.Quantity`
The data array.
bkg_error : array_like or `~astropy.units.Quantity`
The pixel-wise Gaussian 1-sigma background-only errors of the
input ``data``. ``bkg_error`` should include all sources of
"background" error but *exclude* the Poisson error of the
sources. ``bkg_error`` must have the same shape as ``data``.
If ``data`` and ``bkg_error`` are `~astropy.units.Quantity`
objects, then they must have the same units.
effective_gain : float, array-like, or `~astropy.units.Quantity`
Ratio of counts (e.g., electrons or photons) to the units of
``data`` used to calculate the Poisson error of the sources.
Returns
-------
total_error : `~numpy.ndarray` or `~astropy.units.Quantity`
The total error array. If ``data``, ``bkg_error``, and
``effective_gain`` are all `~astropy.units.Quantity` objects,
then ``total_error`` will also be returned as a
`~astropy.units.Quantity` object with the same units as the
input ``data``. Otherwise, a `~numpy.ndarray` will be returned.
Notes
-----
To use units, ``data``, ``bkg_error``, and ``effective_gain`` must
*all* be `~astropy.units.Quantity` objects. ``data`` and
``bkg_error`` must have the same units. A `ValueError` will be
raised if only some of the inputs are `~astropy.units.Quantity`
objects or if the ``data`` and ``bkg_error`` units differ.
The total error array, :math:`\\sigma_{\\mathrm{tot}}` is:
.. math:: \\sigma_{\\mathrm{tot}} = \\sqrt{\\sigma_{\\mathrm{b}}^2 +
\\frac{I}{g}}
where :math:`\\sigma_b`, :math:`I`, and :math:`g` are the background
``bkg_error`` image, ``data`` image, and ``effective_gain``,
respectively.
Pixels where ``data`` (:math:`I_i)` is negative do not contribute
additional Poisson noise to the total error, i.e.
:math:`\\sigma_{\\mathrm{tot}, i} = \\sigma_{\\mathrm{b}, i}`. Note
that this is different from `SExtractor`_, which sums the total
variance in the segment, including pixels where :math:`I_i` is
negative. In such cases, `SExtractor`_ underestimates the total
errors. Also note that SExtractor computes Poisson errors from
background-subtracted data, which also results in an underestimation
of the Poisson noise.
``effective_gain`` can either be a scalar value or a 2D image with
the same shape as the ``data``. A 2D image is useful with mosaic
images that have variable depths (i.e., exposure times) across the
field. For example, one should use an exposure-time map as the
``effective_gain`` for a variable depth mosaic image in count-rate
units.
As an example, if your input ``data`` are in units of ADU, then
``effective_gain`` should be in units of electrons/ADU (or
photons/ADU). If your input ``data`` are in units of electrons/s
then ``effective_gain`` should be the exposure time or an exposure
time map (e.g., for mosaics with non-uniform exposure times).
.. _SExtractor: http://www.astromatic.net/software/sextractor
|
[
"Calculate",
"a",
"total",
"error",
"array",
"combining",
"a",
"background",
"-",
"only",
"error",
"array",
"with",
"the",
"Poisson",
"noise",
"of",
"sources",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/utils/errors.py#L11-L132
|
10,638
|
astropy/photutils
|
photutils/aperture/rectangle.py
|
RectangularAperture.to_sky
|
def to_sky(self, wcs, mode='all'):
"""
Convert the aperture to a `SkyRectangularAperture` object
defined in celestial coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `SkyRectangularAperture` object
A `SkyRectangularAperture` object.
"""
sky_params = self._to_sky_params(wcs, mode=mode)
return SkyRectangularAperture(**sky_params)
|
python
|
def to_sky(self, wcs, mode='all'):
"""
Convert the aperture to a `SkyRectangularAperture` object
defined in celestial coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `SkyRectangularAperture` object
A `SkyRectangularAperture` object.
"""
sky_params = self._to_sky_params(wcs, mode=mode)
return SkyRectangularAperture(**sky_params)
|
[
"def",
"to_sky",
"(",
"self",
",",
"wcs",
",",
"mode",
"=",
"'all'",
")",
":",
"sky_params",
"=",
"self",
".",
"_to_sky_params",
"(",
"wcs",
",",
"mode",
"=",
"mode",
")",
"return",
"SkyRectangularAperture",
"(",
"*",
"*",
"sky_params",
")"
] |
Convert the aperture to a `SkyRectangularAperture` object
defined in celestial coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `SkyRectangularAperture` object
A `SkyRectangularAperture` object.
|
[
"Convert",
"the",
"aperture",
"to",
"a",
"SkyRectangularAperture",
"object",
"defined",
"in",
"celestial",
"coordinates",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/aperture/rectangle.py#L200-L222
|
10,639
|
astropy/photutils
|
photutils/aperture/rectangle.py
|
RectangularAnnulus.to_sky
|
def to_sky(self, wcs, mode='all'):
"""
Convert the aperture to a `SkyRectangularAnnulus` object
defined in celestial coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `SkyRectangularAnnulus` object
A `SkyRectangularAnnulus` object.
"""
sky_params = self._to_sky_params(wcs, mode=mode)
return SkyRectangularAnnulus(**sky_params)
|
python
|
def to_sky(self, wcs, mode='all'):
"""
Convert the aperture to a `SkyRectangularAnnulus` object
defined in celestial coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `SkyRectangularAnnulus` object
A `SkyRectangularAnnulus` object.
"""
sky_params = self._to_sky_params(wcs, mode=mode)
return SkyRectangularAnnulus(**sky_params)
|
[
"def",
"to_sky",
"(",
"self",
",",
"wcs",
",",
"mode",
"=",
"'all'",
")",
":",
"sky_params",
"=",
"self",
".",
"_to_sky_params",
"(",
"wcs",
",",
"mode",
"=",
"mode",
")",
"return",
"SkyRectangularAnnulus",
"(",
"*",
"*",
"sky_params",
")"
] |
Convert the aperture to a `SkyRectangularAnnulus` object
defined in celestial coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `SkyRectangularAnnulus` object
A `SkyRectangularAnnulus` object.
|
[
"Convert",
"the",
"aperture",
"to",
"a",
"SkyRectangularAnnulus",
"object",
"defined",
"in",
"celestial",
"coordinates",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/aperture/rectangle.py#L354-L376
|
10,640
|
astropy/photutils
|
photutils/aperture/rectangle.py
|
SkyRectangularAperture.to_pixel
|
def to_pixel(self, wcs, mode='all'):
"""
Convert the aperture to a `RectangularAperture` object defined
in pixel coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `RectangularAperture` object
A `RectangularAperture` object.
"""
pixel_params = self._to_pixel_params(wcs, mode=mode)
return RectangularAperture(**pixel_params)
|
python
|
def to_pixel(self, wcs, mode='all'):
"""
Convert the aperture to a `RectangularAperture` object defined
in pixel coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `RectangularAperture` object
A `RectangularAperture` object.
"""
pixel_params = self._to_pixel_params(wcs, mode=mode)
return RectangularAperture(**pixel_params)
|
[
"def",
"to_pixel",
"(",
"self",
",",
"wcs",
",",
"mode",
"=",
"'all'",
")",
":",
"pixel_params",
"=",
"self",
".",
"_to_pixel_params",
"(",
"wcs",
",",
"mode",
"=",
"mode",
")",
"return",
"RectangularAperture",
"(",
"*",
"*",
"pixel_params",
")"
] |
Convert the aperture to a `RectangularAperture` object defined
in pixel coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `RectangularAperture` object
A `RectangularAperture` object.
|
[
"Convert",
"the",
"aperture",
"to",
"a",
"RectangularAperture",
"object",
"defined",
"in",
"pixel",
"coordinates",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/aperture/rectangle.py#L425-L447
|
10,641
|
astropy/photutils
|
photutils/aperture/rectangle.py
|
SkyRectangularAnnulus.to_pixel
|
def to_pixel(self, wcs, mode='all'):
"""
Convert the aperture to a `RectangularAnnulus` object defined in
pixel coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `RectangularAnnulus` object
A `RectangularAnnulus` object.
"""
pixel_params = self._to_pixel_params(wcs, mode=mode)
return RectangularAnnulus(**pixel_params)
|
python
|
def to_pixel(self, wcs, mode='all'):
"""
Convert the aperture to a `RectangularAnnulus` object defined in
pixel coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `RectangularAnnulus` object
A `RectangularAnnulus` object.
"""
pixel_params = self._to_pixel_params(wcs, mode=mode)
return RectangularAnnulus(**pixel_params)
|
[
"def",
"to_pixel",
"(",
"self",
",",
"wcs",
",",
"mode",
"=",
"'all'",
")",
":",
"pixel_params",
"=",
"self",
".",
"_to_pixel_params",
"(",
"wcs",
",",
"mode",
"=",
"mode",
")",
"return",
"RectangularAnnulus",
"(",
"*",
"*",
"pixel_params",
")"
] |
Convert the aperture to a `RectangularAnnulus` object defined in
pixel coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `RectangularAnnulus` object
A `RectangularAnnulus` object.
|
[
"Convert",
"the",
"aperture",
"to",
"a",
"RectangularAnnulus",
"object",
"defined",
"in",
"pixel",
"coordinates",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/aperture/rectangle.py#L511-L533
|
10,642
|
astropy/photutils
|
photutils/psf/epsf.py
|
_py2intround
|
def _py2intround(a):
"""
Round the input to the nearest integer.
If two integers are equally close, rounding is done away from 0.
"""
data = np.asanyarray(a)
value = np.where(data >= 0, np.floor(data + 0.5),
np.ceil(data - 0.5)).astype(int)
if not hasattr(a, '__iter__'):
value = np.asscalar(value)
return value
|
python
|
def _py2intround(a):
"""
Round the input to the nearest integer.
If two integers are equally close, rounding is done away from 0.
"""
data = np.asanyarray(a)
value = np.where(data >= 0, np.floor(data + 0.5),
np.ceil(data - 0.5)).astype(int)
if not hasattr(a, '__iter__'):
value = np.asscalar(value)
return value
|
[
"def",
"_py2intround",
"(",
"a",
")",
":",
"data",
"=",
"np",
".",
"asanyarray",
"(",
"a",
")",
"value",
"=",
"np",
".",
"where",
"(",
"data",
">=",
"0",
",",
"np",
".",
"floor",
"(",
"data",
"+",
"0.5",
")",
",",
"np",
".",
"ceil",
"(",
"data",
"-",
"0.5",
")",
")",
".",
"astype",
"(",
"int",
")",
"if",
"not",
"hasattr",
"(",
"a",
",",
"'__iter__'",
")",
":",
"value",
"=",
"np",
".",
"asscalar",
"(",
"value",
")",
"return",
"value"
] |
Round the input to the nearest integer.
If two integers are equally close, rounding is done away from 0.
|
[
"Round",
"the",
"input",
"to",
"the",
"nearest",
"integer",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/epsf.py#L845-L859
|
10,643
|
astropy/photutils
|
photutils/psf/epsf.py
|
_interpolate_missing_data
|
def _interpolate_missing_data(data, mask, method='cubic'):
"""
Interpolate missing data as identified by the ``mask`` keyword.
Parameters
----------
data : 2D `~numpy.ndarray`
An array containing the 2D image.
mask : 2D bool `~numpy.ndarray`
A 2D booleen mask array with the same shape as the input
``data``, where a `True` value indicates the corresponding
element of ``data`` is masked. The masked data points are
those that will be interpolated.
method : {'cubic', 'nearest'}, optional
The method of used to interpolate the missing data:
* ``'cubic'``: Masked data are interpolated using 2D cubic
splines. This is the default.
* ``'nearest'``: Masked data are interpolated using
nearest-neighbor interpolation.
Returns
-------
data_interp : 2D `~numpy.ndarray`
The interpolated 2D image.
"""
from scipy import interpolate
data_interp = np.array(data, copy=True)
if len(data_interp.shape) != 2:
raise ValueError('data must be a 2D array.')
if mask.shape != data.shape:
raise ValueError('mask and data must have the same shape.')
y, x = np.indices(data_interp.shape)
xy = np.dstack((x[~mask].ravel(), y[~mask].ravel()))[0]
z = data_interp[~mask].ravel()
if method == 'nearest':
interpol = interpolate.NearestNDInterpolator(xy, z)
elif method == 'cubic':
interpol = interpolate.CloughTocher2DInterpolator(xy, z)
else:
raise ValueError('Unsupported interpolation method.')
xy_missing = np.dstack((x[mask].ravel(), y[mask].ravel()))[0]
data_interp[mask] = interpol(xy_missing)
return data_interp
|
python
|
def _interpolate_missing_data(data, mask, method='cubic'):
"""
Interpolate missing data as identified by the ``mask`` keyword.
Parameters
----------
data : 2D `~numpy.ndarray`
An array containing the 2D image.
mask : 2D bool `~numpy.ndarray`
A 2D booleen mask array with the same shape as the input
``data``, where a `True` value indicates the corresponding
element of ``data`` is masked. The masked data points are
those that will be interpolated.
method : {'cubic', 'nearest'}, optional
The method of used to interpolate the missing data:
* ``'cubic'``: Masked data are interpolated using 2D cubic
splines. This is the default.
* ``'nearest'``: Masked data are interpolated using
nearest-neighbor interpolation.
Returns
-------
data_interp : 2D `~numpy.ndarray`
The interpolated 2D image.
"""
from scipy import interpolate
data_interp = np.array(data, copy=True)
if len(data_interp.shape) != 2:
raise ValueError('data must be a 2D array.')
if mask.shape != data.shape:
raise ValueError('mask and data must have the same shape.')
y, x = np.indices(data_interp.shape)
xy = np.dstack((x[~mask].ravel(), y[~mask].ravel()))[0]
z = data_interp[~mask].ravel()
if method == 'nearest':
interpol = interpolate.NearestNDInterpolator(xy, z)
elif method == 'cubic':
interpol = interpolate.CloughTocher2DInterpolator(xy, z)
else:
raise ValueError('Unsupported interpolation method.')
xy_missing = np.dstack((x[mask].ravel(), y[mask].ravel()))[0]
data_interp[mask] = interpol(xy_missing)
return data_interp
|
[
"def",
"_interpolate_missing_data",
"(",
"data",
",",
"mask",
",",
"method",
"=",
"'cubic'",
")",
":",
"from",
"scipy",
"import",
"interpolate",
"data_interp",
"=",
"np",
".",
"array",
"(",
"data",
",",
"copy",
"=",
"True",
")",
"if",
"len",
"(",
"data_interp",
".",
"shape",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'data must be a 2D array.'",
")",
"if",
"mask",
".",
"shape",
"!=",
"data",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"'mask and data must have the same shape.'",
")",
"y",
",",
"x",
"=",
"np",
".",
"indices",
"(",
"data_interp",
".",
"shape",
")",
"xy",
"=",
"np",
".",
"dstack",
"(",
"(",
"x",
"[",
"~",
"mask",
"]",
".",
"ravel",
"(",
")",
",",
"y",
"[",
"~",
"mask",
"]",
".",
"ravel",
"(",
")",
")",
")",
"[",
"0",
"]",
"z",
"=",
"data_interp",
"[",
"~",
"mask",
"]",
".",
"ravel",
"(",
")",
"if",
"method",
"==",
"'nearest'",
":",
"interpol",
"=",
"interpolate",
".",
"NearestNDInterpolator",
"(",
"xy",
",",
"z",
")",
"elif",
"method",
"==",
"'cubic'",
":",
"interpol",
"=",
"interpolate",
".",
"CloughTocher2DInterpolator",
"(",
"xy",
",",
"z",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unsupported interpolation method.'",
")",
"xy_missing",
"=",
"np",
".",
"dstack",
"(",
"(",
"x",
"[",
"mask",
"]",
".",
"ravel",
"(",
")",
",",
"y",
"[",
"mask",
"]",
".",
"ravel",
"(",
")",
")",
")",
"[",
"0",
"]",
"data_interp",
"[",
"mask",
"]",
"=",
"interpol",
"(",
"xy_missing",
")",
"return",
"data_interp"
] |
Interpolate missing data as identified by the ``mask`` keyword.
Parameters
----------
data : 2D `~numpy.ndarray`
An array containing the 2D image.
mask : 2D bool `~numpy.ndarray`
A 2D booleen mask array with the same shape as the input
``data``, where a `True` value indicates the corresponding
element of ``data`` is masked. The masked data points are
those that will be interpolated.
method : {'cubic', 'nearest'}, optional
The method of used to interpolate the missing data:
* ``'cubic'``: Masked data are interpolated using 2D cubic
splines. This is the default.
* ``'nearest'``: Masked data are interpolated using
nearest-neighbor interpolation.
Returns
-------
data_interp : 2D `~numpy.ndarray`
The interpolated 2D image.
|
[
"Interpolate",
"missing",
"data",
"as",
"identified",
"by",
"the",
"mask",
"keyword",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/epsf.py#L862-L916
|
10,644
|
astropy/photutils
|
photutils/psf/epsf.py
|
EPSFFitter._fit_star
|
def _fit_star(self, epsf, star, fitter, fitter_kwargs,
fitter_has_fit_info, fit_boxsize):
"""
Fit an ePSF model to a single star.
The input ``epsf`` will usually be modified by the fitting
routine in this function. Make a copy before calling this
function if the original is needed.
"""
if fit_boxsize is not None:
try:
xcenter, ycenter = star.cutout_center
large_slc, small_slc = overlap_slices(star.shape,
fit_boxsize,
(ycenter, xcenter),
mode='strict')
except (PartialOverlapError, NoOverlapError):
warnings.warn('The star at ({0}, {1}) cannot be fit because '
'its fitting region extends beyond the star '
'cutout image.'.format(star.center[0],
star.center[1]),
AstropyUserWarning)
star = copy.deepcopy(star)
star._fit_error_status = 1
return star
data = star.data[large_slc]
weights = star.weights[large_slc]
# define the origin of the fitting region
x0 = large_slc[1].start
y0 = large_slc[0].start
else:
# use the entire cutout image
data = star.data
weights = star.weights
# define the origin of the fitting region
x0 = 0
y0 = 0
scaled_data = data / np.prod(epsf._oversampling)
# define positions in the ePSF oversampled grid
yy, xx = np.indices(data.shape, dtype=np.float)
xx = (xx - (star.cutout_center[0] - x0)) * epsf._oversampling[0]
yy = (yy - (star.cutout_center[1] - y0)) * epsf._oversampling[1]
# define the initial guesses for fitted flux and shifts
epsf.flux = star.flux
epsf.x_0 = 0.0
epsf.y_0 = 0.0
# create copy to avoid overwriting original oversampling factor
_epsf = epsf.copy()
_epsf._oversampling = np.array([1., 1.])
try:
fitted_epsf = fitter(model=_epsf, x=xx, y=yy, z=scaled_data,
weights=weights, **fitter_kwargs)
except TypeError:
# fitter doesn't support weights
fitted_epsf = fitter(model=_epsf, x=xx, y=yy, z=scaled_data,
**fitter_kwargs)
fit_error_status = 0
if fitter_has_fit_info:
fit_info = copy.copy(fitter.fit_info)
if 'ierr' in fit_info and fit_info['ierr'] not in [1, 2, 3, 4]:
fit_error_status = 2 # fit solution was not found
else:
fit_info = None
# compute the star's fitted position
x_center = (star.cutout_center[0] +
(fitted_epsf.x_0.value / epsf._oversampling[0]))
y_center = (star.cutout_center[1] +
(fitted_epsf.y_0.value / epsf._oversampling[1]))
star = copy.deepcopy(star)
star.cutout_center = (x_center, y_center)
# set the star's flux to the ePSF-fitted flux
star.flux = fitted_epsf.flux.value
star._fit_info = fit_info
star._fit_error_status = fit_error_status
return star
|
python
|
def _fit_star(self, epsf, star, fitter, fitter_kwargs,
fitter_has_fit_info, fit_boxsize):
"""
Fit an ePSF model to a single star.
The input ``epsf`` will usually be modified by the fitting
routine in this function. Make a copy before calling this
function if the original is needed.
"""
if fit_boxsize is not None:
try:
xcenter, ycenter = star.cutout_center
large_slc, small_slc = overlap_slices(star.shape,
fit_boxsize,
(ycenter, xcenter),
mode='strict')
except (PartialOverlapError, NoOverlapError):
warnings.warn('The star at ({0}, {1}) cannot be fit because '
'its fitting region extends beyond the star '
'cutout image.'.format(star.center[0],
star.center[1]),
AstropyUserWarning)
star = copy.deepcopy(star)
star._fit_error_status = 1
return star
data = star.data[large_slc]
weights = star.weights[large_slc]
# define the origin of the fitting region
x0 = large_slc[1].start
y0 = large_slc[0].start
else:
# use the entire cutout image
data = star.data
weights = star.weights
# define the origin of the fitting region
x0 = 0
y0 = 0
scaled_data = data / np.prod(epsf._oversampling)
# define positions in the ePSF oversampled grid
yy, xx = np.indices(data.shape, dtype=np.float)
xx = (xx - (star.cutout_center[0] - x0)) * epsf._oversampling[0]
yy = (yy - (star.cutout_center[1] - y0)) * epsf._oversampling[1]
# define the initial guesses for fitted flux and shifts
epsf.flux = star.flux
epsf.x_0 = 0.0
epsf.y_0 = 0.0
# create copy to avoid overwriting original oversampling factor
_epsf = epsf.copy()
_epsf._oversampling = np.array([1., 1.])
try:
fitted_epsf = fitter(model=_epsf, x=xx, y=yy, z=scaled_data,
weights=weights, **fitter_kwargs)
except TypeError:
# fitter doesn't support weights
fitted_epsf = fitter(model=_epsf, x=xx, y=yy, z=scaled_data,
**fitter_kwargs)
fit_error_status = 0
if fitter_has_fit_info:
fit_info = copy.copy(fitter.fit_info)
if 'ierr' in fit_info and fit_info['ierr'] not in [1, 2, 3, 4]:
fit_error_status = 2 # fit solution was not found
else:
fit_info = None
# compute the star's fitted position
x_center = (star.cutout_center[0] +
(fitted_epsf.x_0.value / epsf._oversampling[0]))
y_center = (star.cutout_center[1] +
(fitted_epsf.y_0.value / epsf._oversampling[1]))
star = copy.deepcopy(star)
star.cutout_center = (x_center, y_center)
# set the star's flux to the ePSF-fitted flux
star.flux = fitted_epsf.flux.value
star._fit_info = fit_info
star._fit_error_status = fit_error_status
return star
|
[
"def",
"_fit_star",
"(",
"self",
",",
"epsf",
",",
"star",
",",
"fitter",
",",
"fitter_kwargs",
",",
"fitter_has_fit_info",
",",
"fit_boxsize",
")",
":",
"if",
"fit_boxsize",
"is",
"not",
"None",
":",
"try",
":",
"xcenter",
",",
"ycenter",
"=",
"star",
".",
"cutout_center",
"large_slc",
",",
"small_slc",
"=",
"overlap_slices",
"(",
"star",
".",
"shape",
",",
"fit_boxsize",
",",
"(",
"ycenter",
",",
"xcenter",
")",
",",
"mode",
"=",
"'strict'",
")",
"except",
"(",
"PartialOverlapError",
",",
"NoOverlapError",
")",
":",
"warnings",
".",
"warn",
"(",
"'The star at ({0}, {1}) cannot be fit because '",
"'its fitting region extends beyond the star '",
"'cutout image.'",
".",
"format",
"(",
"star",
".",
"center",
"[",
"0",
"]",
",",
"star",
".",
"center",
"[",
"1",
"]",
")",
",",
"AstropyUserWarning",
")",
"star",
"=",
"copy",
".",
"deepcopy",
"(",
"star",
")",
"star",
".",
"_fit_error_status",
"=",
"1",
"return",
"star",
"data",
"=",
"star",
".",
"data",
"[",
"large_slc",
"]",
"weights",
"=",
"star",
".",
"weights",
"[",
"large_slc",
"]",
"# define the origin of the fitting region",
"x0",
"=",
"large_slc",
"[",
"1",
"]",
".",
"start",
"y0",
"=",
"large_slc",
"[",
"0",
"]",
".",
"start",
"else",
":",
"# use the entire cutout image",
"data",
"=",
"star",
".",
"data",
"weights",
"=",
"star",
".",
"weights",
"# define the origin of the fitting region",
"x0",
"=",
"0",
"y0",
"=",
"0",
"scaled_data",
"=",
"data",
"/",
"np",
".",
"prod",
"(",
"epsf",
".",
"_oversampling",
")",
"# define positions in the ePSF oversampled grid",
"yy",
",",
"xx",
"=",
"np",
".",
"indices",
"(",
"data",
".",
"shape",
",",
"dtype",
"=",
"np",
".",
"float",
")",
"xx",
"=",
"(",
"xx",
"-",
"(",
"star",
".",
"cutout_center",
"[",
"0",
"]",
"-",
"x0",
")",
")",
"*",
"epsf",
".",
"_oversampling",
"[",
"0",
"]",
"yy",
"=",
"(",
"yy",
"-",
"(",
"star",
".",
"cutout_center",
"[",
"1",
"]",
"-",
"y0",
")",
")",
"*",
"epsf",
".",
"_oversampling",
"[",
"1",
"]",
"# define the initial guesses for fitted flux and shifts",
"epsf",
".",
"flux",
"=",
"star",
".",
"flux",
"epsf",
".",
"x_0",
"=",
"0.0",
"epsf",
".",
"y_0",
"=",
"0.0",
"# create copy to avoid overwriting original oversampling factor",
"_epsf",
"=",
"epsf",
".",
"copy",
"(",
")",
"_epsf",
".",
"_oversampling",
"=",
"np",
".",
"array",
"(",
"[",
"1.",
",",
"1.",
"]",
")",
"try",
":",
"fitted_epsf",
"=",
"fitter",
"(",
"model",
"=",
"_epsf",
",",
"x",
"=",
"xx",
",",
"y",
"=",
"yy",
",",
"z",
"=",
"scaled_data",
",",
"weights",
"=",
"weights",
",",
"*",
"*",
"fitter_kwargs",
")",
"except",
"TypeError",
":",
"# fitter doesn't support weights",
"fitted_epsf",
"=",
"fitter",
"(",
"model",
"=",
"_epsf",
",",
"x",
"=",
"xx",
",",
"y",
"=",
"yy",
",",
"z",
"=",
"scaled_data",
",",
"*",
"*",
"fitter_kwargs",
")",
"fit_error_status",
"=",
"0",
"if",
"fitter_has_fit_info",
":",
"fit_info",
"=",
"copy",
".",
"copy",
"(",
"fitter",
".",
"fit_info",
")",
"if",
"'ierr'",
"in",
"fit_info",
"and",
"fit_info",
"[",
"'ierr'",
"]",
"not",
"in",
"[",
"1",
",",
"2",
",",
"3",
",",
"4",
"]",
":",
"fit_error_status",
"=",
"2",
"# fit solution was not found",
"else",
":",
"fit_info",
"=",
"None",
"# compute the star's fitted position",
"x_center",
"=",
"(",
"star",
".",
"cutout_center",
"[",
"0",
"]",
"+",
"(",
"fitted_epsf",
".",
"x_0",
".",
"value",
"/",
"epsf",
".",
"_oversampling",
"[",
"0",
"]",
")",
")",
"y_center",
"=",
"(",
"star",
".",
"cutout_center",
"[",
"1",
"]",
"+",
"(",
"fitted_epsf",
".",
"y_0",
".",
"value",
"/",
"epsf",
".",
"_oversampling",
"[",
"1",
"]",
")",
")",
"star",
"=",
"copy",
".",
"deepcopy",
"(",
"star",
")",
"star",
".",
"cutout_center",
"=",
"(",
"x_center",
",",
"y_center",
")",
"# set the star's flux to the ePSF-fitted flux",
"star",
".",
"flux",
"=",
"fitted_epsf",
".",
"flux",
".",
"value",
"star",
".",
"_fit_info",
"=",
"fit_info",
"star",
".",
"_fit_error_status",
"=",
"fit_error_status",
"return",
"star"
] |
Fit an ePSF model to a single star.
The input ``epsf`` will usually be modified by the fitting
routine in this function. Make a copy before calling this
function if the original is needed.
|
[
"Fit",
"an",
"ePSF",
"model",
"to",
"a",
"single",
"star",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/epsf.py#L146-L238
|
10,645
|
astropy/photutils
|
photutils/psf/epsf.py
|
EPSFBuilder._init_img_params
|
def _init_img_params(param):
"""
Initialize 2D image-type parameters that can accept either a
single or two values.
"""
if param is not None:
param = np.atleast_1d(param)
if len(param) == 1:
param = np.repeat(param, 2)
return param
|
python
|
def _init_img_params(param):
"""
Initialize 2D image-type parameters that can accept either a
single or two values.
"""
if param is not None:
param = np.atleast_1d(param)
if len(param) == 1:
param = np.repeat(param, 2)
return param
|
[
"def",
"_init_img_params",
"(",
"param",
")",
":",
"if",
"param",
"is",
"not",
"None",
":",
"param",
"=",
"np",
".",
"atleast_1d",
"(",
"param",
")",
"if",
"len",
"(",
"param",
")",
"==",
"1",
":",
"param",
"=",
"np",
".",
"repeat",
"(",
"param",
",",
"2",
")",
"return",
"param"
] |
Initialize 2D image-type parameters that can accept either a
single or two values.
|
[
"Initialize",
"2D",
"image",
"-",
"type",
"parameters",
"that",
"can",
"accept",
"either",
"a",
"single",
"or",
"two",
"values",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/epsf.py#L369-L380
|
10,646
|
astropy/photutils
|
photutils/psf/epsf.py
|
EPSFBuilder._create_initial_epsf
|
def _create_initial_epsf(self, stars):
"""
Create an initial `EPSFModel` object.
The initial ePSF data are all zeros.
If ``shape`` is not specified, the shape of the ePSF data array
is determined from the shape of the input ``stars`` and the
oversampling factor. If the size is even along any axis, it
will be made odd by adding one. The output ePSF will always
have odd sizes along both axes to ensure a central pixel.
Parameters
----------
stars : `EPSFStars` object
The stars used to build the ePSF.
Returns
-------
epsf : `EPSFModel`
The initial ePSF model.
"""
oversampling = self.oversampling
shape = self.shape
# define the ePSF shape
if shape is not None:
shape = np.atleast_1d(shape).astype(int)
if len(shape) == 1:
shape = np.repeat(shape, 2)
else:
x_shape = np.int(np.ceil(stars._max_shape[1] *
oversampling[0]))
y_shape = np.int(np.ceil(stars._max_shape[0] *
oversampling[1]))
shape = np.array((y_shape, x_shape))
# ensure odd sizes
shape = [(i + 1) if i % 2 == 0 else i for i in shape]
data = np.zeros(shape, dtype=np.float)
xcenter = (shape[1] - 1) / 2.
ycenter = (shape[0] - 1) / 2.
epsf = EPSFModel(data=data, origin=(xcenter, ycenter),
normalize=False, oversampling=oversampling)
return epsf
|
python
|
def _create_initial_epsf(self, stars):
"""
Create an initial `EPSFModel` object.
The initial ePSF data are all zeros.
If ``shape`` is not specified, the shape of the ePSF data array
is determined from the shape of the input ``stars`` and the
oversampling factor. If the size is even along any axis, it
will be made odd by adding one. The output ePSF will always
have odd sizes along both axes to ensure a central pixel.
Parameters
----------
stars : `EPSFStars` object
The stars used to build the ePSF.
Returns
-------
epsf : `EPSFModel`
The initial ePSF model.
"""
oversampling = self.oversampling
shape = self.shape
# define the ePSF shape
if shape is not None:
shape = np.atleast_1d(shape).astype(int)
if len(shape) == 1:
shape = np.repeat(shape, 2)
else:
x_shape = np.int(np.ceil(stars._max_shape[1] *
oversampling[0]))
y_shape = np.int(np.ceil(stars._max_shape[0] *
oversampling[1]))
shape = np.array((y_shape, x_shape))
# ensure odd sizes
shape = [(i + 1) if i % 2 == 0 else i for i in shape]
data = np.zeros(shape, dtype=np.float)
xcenter = (shape[1] - 1) / 2.
ycenter = (shape[0] - 1) / 2.
epsf = EPSFModel(data=data, origin=(xcenter, ycenter),
normalize=False, oversampling=oversampling)
return epsf
|
[
"def",
"_create_initial_epsf",
"(",
"self",
",",
"stars",
")",
":",
"oversampling",
"=",
"self",
".",
"oversampling",
"shape",
"=",
"self",
".",
"shape",
"# define the ePSF shape",
"if",
"shape",
"is",
"not",
"None",
":",
"shape",
"=",
"np",
".",
"atleast_1d",
"(",
"shape",
")",
".",
"astype",
"(",
"int",
")",
"if",
"len",
"(",
"shape",
")",
"==",
"1",
":",
"shape",
"=",
"np",
".",
"repeat",
"(",
"shape",
",",
"2",
")",
"else",
":",
"x_shape",
"=",
"np",
".",
"int",
"(",
"np",
".",
"ceil",
"(",
"stars",
".",
"_max_shape",
"[",
"1",
"]",
"*",
"oversampling",
"[",
"0",
"]",
")",
")",
"y_shape",
"=",
"np",
".",
"int",
"(",
"np",
".",
"ceil",
"(",
"stars",
".",
"_max_shape",
"[",
"0",
"]",
"*",
"oversampling",
"[",
"1",
"]",
")",
")",
"shape",
"=",
"np",
".",
"array",
"(",
"(",
"y_shape",
",",
"x_shape",
")",
")",
"# ensure odd sizes",
"shape",
"=",
"[",
"(",
"i",
"+",
"1",
")",
"if",
"i",
"%",
"2",
"==",
"0",
"else",
"i",
"for",
"i",
"in",
"shape",
"]",
"data",
"=",
"np",
".",
"zeros",
"(",
"shape",
",",
"dtype",
"=",
"np",
".",
"float",
")",
"xcenter",
"=",
"(",
"shape",
"[",
"1",
"]",
"-",
"1",
")",
"/",
"2.",
"ycenter",
"=",
"(",
"shape",
"[",
"0",
"]",
"-",
"1",
")",
"/",
"2.",
"epsf",
"=",
"EPSFModel",
"(",
"data",
"=",
"data",
",",
"origin",
"=",
"(",
"xcenter",
",",
"ycenter",
")",
",",
"normalize",
"=",
"False",
",",
"oversampling",
"=",
"oversampling",
")",
"return",
"epsf"
] |
Create an initial `EPSFModel` object.
The initial ePSF data are all zeros.
If ``shape`` is not specified, the shape of the ePSF data array
is determined from the shape of the input ``stars`` and the
oversampling factor. If the size is even along any axis, it
will be made odd by adding one. The output ePSF will always
have odd sizes along both axes to ensure a central pixel.
Parameters
----------
stars : `EPSFStars` object
The stars used to build the ePSF.
Returns
-------
epsf : `EPSFModel`
The initial ePSF model.
|
[
"Create",
"an",
"initial",
"EPSFModel",
"object",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/epsf.py#L382-L430
|
10,647
|
astropy/photutils
|
photutils/psf/epsf.py
|
EPSFBuilder._resample_residual
|
def _resample_residual(self, star, epsf):
"""
Compute a normalized residual image in the oversampled ePSF
grid.
A normalized residual image is calculated by subtracting the
normalized ePSF model from the normalized star at the location
of the star in the undersampled grid. The normalized residual
image is then resampled from the undersampled star grid to the
oversampled ePSF grid.
Parameters
----------
star : `EPSFStar` object
A single star object.
epsf : `EPSFModel` object
The ePSF model.
Returns
-------
image : 2D `~numpy.ndarray`
A 2D image containing the resampled residual image. The
image contains NaNs where there is no data.
"""
# find the integer index of EPSFStar pixels in the oversampled
# ePSF grid
x = epsf._oversampling[0] * star._xidx_centered
y = epsf._oversampling[1] * star._yidx_centered
epsf_xcenter, epsf_ycenter = epsf.origin
xidx = _py2intround(x + epsf_xcenter)
yidx = _py2intround(y + epsf_ycenter)
mask = np.logical_and(np.logical_and(xidx >= 0, xidx < epsf.shape[1]),
np.logical_and(yidx >= 0, yidx < epsf.shape[0]))
xidx = xidx[mask]
yidx = yidx[mask]
# Compute the normalized residual image by subtracting the
# normalized ePSF model from the normalized star at the location
# of the star in the undersampled grid. Then, resample the
# normalized residual image in the oversampled ePSF grid.
# [(star - (epsf * xov * yov)) / (xov * yov)]
# --> [(star / (xov * yov)) - epsf]
stardata = ((star._data_values_normalized / np.prod(epsf._oversampling)) -
epsf.evaluate(x=x, y=y, flux=1.0, x_0=0.0, y_0=0.0,
use_oversampling=False))
resampled_img = np.full(epsf.shape, np.nan)
resampled_img[yidx, xidx] = stardata[mask]
return resampled_img
|
python
|
def _resample_residual(self, star, epsf):
"""
Compute a normalized residual image in the oversampled ePSF
grid.
A normalized residual image is calculated by subtracting the
normalized ePSF model from the normalized star at the location
of the star in the undersampled grid. The normalized residual
image is then resampled from the undersampled star grid to the
oversampled ePSF grid.
Parameters
----------
star : `EPSFStar` object
A single star object.
epsf : `EPSFModel` object
The ePSF model.
Returns
-------
image : 2D `~numpy.ndarray`
A 2D image containing the resampled residual image. The
image contains NaNs where there is no data.
"""
# find the integer index of EPSFStar pixels in the oversampled
# ePSF grid
x = epsf._oversampling[0] * star._xidx_centered
y = epsf._oversampling[1] * star._yidx_centered
epsf_xcenter, epsf_ycenter = epsf.origin
xidx = _py2intround(x + epsf_xcenter)
yidx = _py2intround(y + epsf_ycenter)
mask = np.logical_and(np.logical_and(xidx >= 0, xidx < epsf.shape[1]),
np.logical_and(yidx >= 0, yidx < epsf.shape[0]))
xidx = xidx[mask]
yidx = yidx[mask]
# Compute the normalized residual image by subtracting the
# normalized ePSF model from the normalized star at the location
# of the star in the undersampled grid. Then, resample the
# normalized residual image in the oversampled ePSF grid.
# [(star - (epsf * xov * yov)) / (xov * yov)]
# --> [(star / (xov * yov)) - epsf]
stardata = ((star._data_values_normalized / np.prod(epsf._oversampling)) -
epsf.evaluate(x=x, y=y, flux=1.0, x_0=0.0, y_0=0.0,
use_oversampling=False))
resampled_img = np.full(epsf.shape, np.nan)
resampled_img[yidx, xidx] = stardata[mask]
return resampled_img
|
[
"def",
"_resample_residual",
"(",
"self",
",",
"star",
",",
"epsf",
")",
":",
"# find the integer index of EPSFStar pixels in the oversampled",
"# ePSF grid",
"x",
"=",
"epsf",
".",
"_oversampling",
"[",
"0",
"]",
"*",
"star",
".",
"_xidx_centered",
"y",
"=",
"epsf",
".",
"_oversampling",
"[",
"1",
"]",
"*",
"star",
".",
"_yidx_centered",
"epsf_xcenter",
",",
"epsf_ycenter",
"=",
"epsf",
".",
"origin",
"xidx",
"=",
"_py2intround",
"(",
"x",
"+",
"epsf_xcenter",
")",
"yidx",
"=",
"_py2intround",
"(",
"y",
"+",
"epsf_ycenter",
")",
"mask",
"=",
"np",
".",
"logical_and",
"(",
"np",
".",
"logical_and",
"(",
"xidx",
">=",
"0",
",",
"xidx",
"<",
"epsf",
".",
"shape",
"[",
"1",
"]",
")",
",",
"np",
".",
"logical_and",
"(",
"yidx",
">=",
"0",
",",
"yidx",
"<",
"epsf",
".",
"shape",
"[",
"0",
"]",
")",
")",
"xidx",
"=",
"xidx",
"[",
"mask",
"]",
"yidx",
"=",
"yidx",
"[",
"mask",
"]",
"# Compute the normalized residual image by subtracting the",
"# normalized ePSF model from the normalized star at the location",
"# of the star in the undersampled grid. Then, resample the",
"# normalized residual image in the oversampled ePSF grid.",
"# [(star - (epsf * xov * yov)) / (xov * yov)]",
"# --> [(star / (xov * yov)) - epsf]",
"stardata",
"=",
"(",
"(",
"star",
".",
"_data_values_normalized",
"/",
"np",
".",
"prod",
"(",
"epsf",
".",
"_oversampling",
")",
")",
"-",
"epsf",
".",
"evaluate",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"flux",
"=",
"1.0",
",",
"x_0",
"=",
"0.0",
",",
"y_0",
"=",
"0.0",
",",
"use_oversampling",
"=",
"False",
")",
")",
"resampled_img",
"=",
"np",
".",
"full",
"(",
"epsf",
".",
"shape",
",",
"np",
".",
"nan",
")",
"resampled_img",
"[",
"yidx",
",",
"xidx",
"]",
"=",
"stardata",
"[",
"mask",
"]",
"return",
"resampled_img"
] |
Compute a normalized residual image in the oversampled ePSF
grid.
A normalized residual image is calculated by subtracting the
normalized ePSF model from the normalized star at the location
of the star in the undersampled grid. The normalized residual
image is then resampled from the undersampled star grid to the
oversampled ePSF grid.
Parameters
----------
star : `EPSFStar` object
A single star object.
epsf : `EPSFModel` object
The ePSF model.
Returns
-------
image : 2D `~numpy.ndarray`
A 2D image containing the resampled residual image. The
image contains NaNs where there is no data.
|
[
"Compute",
"a",
"normalized",
"residual",
"image",
"in",
"the",
"oversampled",
"ePSF",
"grid",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/epsf.py#L432-L484
|
10,648
|
astropy/photutils
|
photutils/psf/epsf.py
|
EPSFBuilder._resample_residuals
|
def _resample_residuals(self, stars, epsf):
"""
Compute normalized residual images for all the input stars.
Parameters
----------
stars : `EPSFStars` object
The stars used to build the ePSF.
epsf : `EPSFModel` object
The ePSF model.
Returns
-------
star_imgs : 3D `~numpy.ndarray`
A 3D cube containing the resampled residual images.
"""
shape = (stars.n_good_stars, epsf.shape[0], epsf.shape[1])
star_imgs = np.zeros(shape)
for i, star in enumerate(stars.all_good_stars):
star_imgs[i, :, :] = self._resample_residual(star, epsf)
return star_imgs
|
python
|
def _resample_residuals(self, stars, epsf):
"""
Compute normalized residual images for all the input stars.
Parameters
----------
stars : `EPSFStars` object
The stars used to build the ePSF.
epsf : `EPSFModel` object
The ePSF model.
Returns
-------
star_imgs : 3D `~numpy.ndarray`
A 3D cube containing the resampled residual images.
"""
shape = (stars.n_good_stars, epsf.shape[0], epsf.shape[1])
star_imgs = np.zeros(shape)
for i, star in enumerate(stars.all_good_stars):
star_imgs[i, :, :] = self._resample_residual(star, epsf)
return star_imgs
|
[
"def",
"_resample_residuals",
"(",
"self",
",",
"stars",
",",
"epsf",
")",
":",
"shape",
"=",
"(",
"stars",
".",
"n_good_stars",
",",
"epsf",
".",
"shape",
"[",
"0",
"]",
",",
"epsf",
".",
"shape",
"[",
"1",
"]",
")",
"star_imgs",
"=",
"np",
".",
"zeros",
"(",
"shape",
")",
"for",
"i",
",",
"star",
"in",
"enumerate",
"(",
"stars",
".",
"all_good_stars",
")",
":",
"star_imgs",
"[",
"i",
",",
":",
",",
":",
"]",
"=",
"self",
".",
"_resample_residual",
"(",
"star",
",",
"epsf",
")",
"return",
"star_imgs"
] |
Compute normalized residual images for all the input stars.
Parameters
----------
stars : `EPSFStars` object
The stars used to build the ePSF.
epsf : `EPSFModel` object
The ePSF model.
Returns
-------
star_imgs : 3D `~numpy.ndarray`
A 3D cube containing the resampled residual images.
|
[
"Compute",
"normalized",
"residual",
"images",
"for",
"all",
"the",
"input",
"stars",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/epsf.py#L486-L509
|
10,649
|
astropy/photutils
|
photutils/psf/epsf.py
|
EPSFBuilder._smooth_epsf
|
def _smooth_epsf(self, epsf_data):
"""
Smooth the ePSF array by convolving it with a kernel.
Parameters
----------
epsf_data : 2D `~numpy.ndarray`
A 2D array containing the ePSF image.
Returns
-------
result : 2D `~numpy.ndarray`
The smoothed (convolved) ePSF data.
"""
from scipy.ndimage import convolve
if self.smoothing_kernel is None:
return epsf_data
elif self.smoothing_kernel == 'quartic':
# from Polynomial2D fit with degree=4 to 5x5 array of
# zeros with 1. at the center
# Polynomial2D(4, c0_0=0.04163265, c1_0=-0.76326531,
# c2_0=0.99081633, c3_0=-0.4, c4_0=0.05,
# c0_1=-0.76326531, c0_2=0.99081633, c0_3=-0.4,
# c0_4=0.05, c1_1=0.32653061, c1_2=-0.08163265,
# c1_3=0., c2_1=-0.08163265, c2_2=0.02040816,
# c3_1=-0.)>
kernel = np.array(
[[+0.041632, -0.080816, 0.078368, -0.080816, +0.041632],
[-0.080816, -0.019592, 0.200816, -0.019592, -0.080816],
[+0.078368, +0.200816, 0.441632, +0.200816, +0.078368],
[-0.080816, -0.019592, 0.200816, -0.019592, -0.080816],
[+0.041632, -0.080816, 0.078368, -0.080816, +0.041632]])
elif self.smoothing_kernel == 'quadratic':
# from Polynomial2D fit with degree=2 to 5x5 array of
# zeros with 1. at the center
# Polynomial2D(2, c0_0=-0.07428571, c1_0=0.11428571,
# c2_0=-0.02857143, c0_1=0.11428571,
# c0_2=-0.02857143, c1_1=-0.)
kernel = np.array(
[[-0.07428311, 0.01142786, 0.03999952, 0.01142786,
-0.07428311],
[+0.01142786, 0.09714283, 0.12571449, 0.09714283,
+0.01142786],
[+0.03999952, 0.12571449, 0.15428215, 0.12571449,
+0.03999952],
[+0.01142786, 0.09714283, 0.12571449, 0.09714283,
+0.01142786],
[-0.07428311, 0.01142786, 0.03999952, 0.01142786,
-0.07428311]])
elif isinstance(self.smoothing_kernel, np.ndarray):
kernel = self.kernel
else:
raise TypeError('Unsupported kernel.')
return convolve(epsf_data, kernel)
|
python
|
def _smooth_epsf(self, epsf_data):
"""
Smooth the ePSF array by convolving it with a kernel.
Parameters
----------
epsf_data : 2D `~numpy.ndarray`
A 2D array containing the ePSF image.
Returns
-------
result : 2D `~numpy.ndarray`
The smoothed (convolved) ePSF data.
"""
from scipy.ndimage import convolve
if self.smoothing_kernel is None:
return epsf_data
elif self.smoothing_kernel == 'quartic':
# from Polynomial2D fit with degree=4 to 5x5 array of
# zeros with 1. at the center
# Polynomial2D(4, c0_0=0.04163265, c1_0=-0.76326531,
# c2_0=0.99081633, c3_0=-0.4, c4_0=0.05,
# c0_1=-0.76326531, c0_2=0.99081633, c0_3=-0.4,
# c0_4=0.05, c1_1=0.32653061, c1_2=-0.08163265,
# c1_3=0., c2_1=-0.08163265, c2_2=0.02040816,
# c3_1=-0.)>
kernel = np.array(
[[+0.041632, -0.080816, 0.078368, -0.080816, +0.041632],
[-0.080816, -0.019592, 0.200816, -0.019592, -0.080816],
[+0.078368, +0.200816, 0.441632, +0.200816, +0.078368],
[-0.080816, -0.019592, 0.200816, -0.019592, -0.080816],
[+0.041632, -0.080816, 0.078368, -0.080816, +0.041632]])
elif self.smoothing_kernel == 'quadratic':
# from Polynomial2D fit with degree=2 to 5x5 array of
# zeros with 1. at the center
# Polynomial2D(2, c0_0=-0.07428571, c1_0=0.11428571,
# c2_0=-0.02857143, c0_1=0.11428571,
# c0_2=-0.02857143, c1_1=-0.)
kernel = np.array(
[[-0.07428311, 0.01142786, 0.03999952, 0.01142786,
-0.07428311],
[+0.01142786, 0.09714283, 0.12571449, 0.09714283,
+0.01142786],
[+0.03999952, 0.12571449, 0.15428215, 0.12571449,
+0.03999952],
[+0.01142786, 0.09714283, 0.12571449, 0.09714283,
+0.01142786],
[-0.07428311, 0.01142786, 0.03999952, 0.01142786,
-0.07428311]])
elif isinstance(self.smoothing_kernel, np.ndarray):
kernel = self.kernel
else:
raise TypeError('Unsupported kernel.')
return convolve(epsf_data, kernel)
|
[
"def",
"_smooth_epsf",
"(",
"self",
",",
"epsf_data",
")",
":",
"from",
"scipy",
".",
"ndimage",
"import",
"convolve",
"if",
"self",
".",
"smoothing_kernel",
"is",
"None",
":",
"return",
"epsf_data",
"elif",
"self",
".",
"smoothing_kernel",
"==",
"'quartic'",
":",
"# from Polynomial2D fit with degree=4 to 5x5 array of",
"# zeros with 1. at the center",
"# Polynomial2D(4, c0_0=0.04163265, c1_0=-0.76326531,",
"# c2_0=0.99081633, c3_0=-0.4, c4_0=0.05,",
"# c0_1=-0.76326531, c0_2=0.99081633, c0_3=-0.4,",
"# c0_4=0.05, c1_1=0.32653061, c1_2=-0.08163265,",
"# c1_3=0., c2_1=-0.08163265, c2_2=0.02040816,",
"# c3_1=-0.)>",
"kernel",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"+",
"0.041632",
",",
"-",
"0.080816",
",",
"0.078368",
",",
"-",
"0.080816",
",",
"+",
"0.041632",
"]",
",",
"[",
"-",
"0.080816",
",",
"-",
"0.019592",
",",
"0.200816",
",",
"-",
"0.019592",
",",
"-",
"0.080816",
"]",
",",
"[",
"+",
"0.078368",
",",
"+",
"0.200816",
",",
"0.441632",
",",
"+",
"0.200816",
",",
"+",
"0.078368",
"]",
",",
"[",
"-",
"0.080816",
",",
"-",
"0.019592",
",",
"0.200816",
",",
"-",
"0.019592",
",",
"-",
"0.080816",
"]",
",",
"[",
"+",
"0.041632",
",",
"-",
"0.080816",
",",
"0.078368",
",",
"-",
"0.080816",
",",
"+",
"0.041632",
"]",
"]",
")",
"elif",
"self",
".",
"smoothing_kernel",
"==",
"'quadratic'",
":",
"# from Polynomial2D fit with degree=2 to 5x5 array of",
"# zeros with 1. at the center",
"# Polynomial2D(2, c0_0=-0.07428571, c1_0=0.11428571,",
"# c2_0=-0.02857143, c0_1=0.11428571,",
"# c0_2=-0.02857143, c1_1=-0.)",
"kernel",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"-",
"0.07428311",
",",
"0.01142786",
",",
"0.03999952",
",",
"0.01142786",
",",
"-",
"0.07428311",
"]",
",",
"[",
"+",
"0.01142786",
",",
"0.09714283",
",",
"0.12571449",
",",
"0.09714283",
",",
"+",
"0.01142786",
"]",
",",
"[",
"+",
"0.03999952",
",",
"0.12571449",
",",
"0.15428215",
",",
"0.12571449",
",",
"+",
"0.03999952",
"]",
",",
"[",
"+",
"0.01142786",
",",
"0.09714283",
",",
"0.12571449",
",",
"0.09714283",
",",
"+",
"0.01142786",
"]",
",",
"[",
"-",
"0.07428311",
",",
"0.01142786",
",",
"0.03999952",
",",
"0.01142786",
",",
"-",
"0.07428311",
"]",
"]",
")",
"elif",
"isinstance",
"(",
"self",
".",
"smoothing_kernel",
",",
"np",
".",
"ndarray",
")",
":",
"kernel",
"=",
"self",
".",
"kernel",
"else",
":",
"raise",
"TypeError",
"(",
"'Unsupported kernel.'",
")",
"return",
"convolve",
"(",
"epsf_data",
",",
"kernel",
")"
] |
Smooth the ePSF array by convolving it with a kernel.
Parameters
----------
epsf_data : 2D `~numpy.ndarray`
A 2D array containing the ePSF image.
Returns
-------
result : 2D `~numpy.ndarray`
The smoothed (convolved) ePSF data.
|
[
"Smooth",
"the",
"ePSF",
"array",
"by",
"convolving",
"it",
"with",
"a",
"kernel",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/epsf.py#L511-L571
|
10,650
|
astropy/photutils
|
photutils/psf/epsf.py
|
EPSFBuilder._recenter_epsf
|
def _recenter_epsf(self, epsf_data, epsf, centroid_func=centroid_com,
box_size=5, maxiters=20, center_accuracy=1.0e-4):
"""
Calculate the center of the ePSF data and shift the data so the
ePSF center is at the center of the ePSF data array.
Parameters
----------
epsf_data : 2D `~numpy.ndarray`
A 2D array containing the ePSF image.
epsf : `EPSFModel` object
The ePSF model.
centroid_func : callable, optional
A callable object (e.g. function or class) that is used to
calculate the centroid of a 2D array. The callable must
accept a 2D `~numpy.ndarray`, have a ``mask`` keyword and
optionally an ``error`` keyword. The callable object must
return a tuple of two 1D `~numpy.ndarray`\\s, representing
the x and y centroids. The default is
`~photutils.centroids.centroid_com`.
recentering_boxsize : float or tuple of two floats, optional
The size (in pixels) of the box used to calculate the
centroid of the ePSF during each build iteration. If a
single integer number is provided, then a square box will be
used. If two values are provided, then they should be in
``(ny, nx)`` order. The default is 5.
maxiters : int, optional
The maximum number of recentering iterations to perform.
The default is 20.
center_accuracy : float, optional
The desired accuracy for the centers of stars. The building
iterations will stop if the center of the ePSF changes by
less than ``center_accuracy`` pixels between iterations.
The default is 1.0e-4.
Returns
-------
result : 2D `~numpy.ndarray`
The recentered ePSF data.
"""
# Define an EPSFModel for the input data. This EPSFModel will be
# used to evaluate the model on a shifted pixel grid to place the
# centroid at the array center.
epsf = EPSFModel(data=epsf_data, origin=epsf.origin, normalize=False,
oversampling=epsf.oversampling)
epsf.fill_value = 0.0
xcenter, ycenter = epsf.origin
dx_total = 0
dy_total = 0
y, x = np.indices(epsf_data.shape, dtype=np.float)
iter_num = 0
center_accuracy_sq = center_accuracy ** 2
center_dist_sq = center_accuracy_sq + 1.e6
center_dist_sq_prev = center_dist_sq + 1
while (iter_num < maxiters and
center_dist_sq >= center_accuracy_sq):
iter_num += 1
# extract a cutout from the ePSF
slices_large, slices_small = overlap_slices(epsf_data.shape,
box_size,
(ycenter, xcenter))
epsf_cutout = epsf_data[slices_large]
mask = ~np.isfinite(epsf_cutout)
# find a new center position
xcenter_new, ycenter_new = centroid_func(epsf_cutout, mask=mask)
xcenter_new += slices_large[1].start
ycenter_new += slices_large[0].start
# calculate the shift
dx = xcenter - xcenter_new
dy = ycenter - ycenter_new
center_dist_sq = dx**2 + dy**2
if center_dist_sq >= center_dist_sq_prev: # don't shift
break
center_dist_sq_prev = center_dist_sq
# Resample the ePSF data to a shifted grid to place the
# centroid in the center of the central pixel. The shift is
# always performed on the input epsf_data.
dx_total += dx # accumulated shifts for the input epsf_data
dy_total += dy
epsf_data = epsf.evaluate(x=x, y=y, flux=1.0,
x_0=xcenter + dx_total,
y_0=ycenter + dy_total,
use_oversampling=False)
return epsf_data
|
python
|
def _recenter_epsf(self, epsf_data, epsf, centroid_func=centroid_com,
box_size=5, maxiters=20, center_accuracy=1.0e-4):
"""
Calculate the center of the ePSF data and shift the data so the
ePSF center is at the center of the ePSF data array.
Parameters
----------
epsf_data : 2D `~numpy.ndarray`
A 2D array containing the ePSF image.
epsf : `EPSFModel` object
The ePSF model.
centroid_func : callable, optional
A callable object (e.g. function or class) that is used to
calculate the centroid of a 2D array. The callable must
accept a 2D `~numpy.ndarray`, have a ``mask`` keyword and
optionally an ``error`` keyword. The callable object must
return a tuple of two 1D `~numpy.ndarray`\\s, representing
the x and y centroids. The default is
`~photutils.centroids.centroid_com`.
recentering_boxsize : float or tuple of two floats, optional
The size (in pixels) of the box used to calculate the
centroid of the ePSF during each build iteration. If a
single integer number is provided, then a square box will be
used. If two values are provided, then they should be in
``(ny, nx)`` order. The default is 5.
maxiters : int, optional
The maximum number of recentering iterations to perform.
The default is 20.
center_accuracy : float, optional
The desired accuracy for the centers of stars. The building
iterations will stop if the center of the ePSF changes by
less than ``center_accuracy`` pixels between iterations.
The default is 1.0e-4.
Returns
-------
result : 2D `~numpy.ndarray`
The recentered ePSF data.
"""
# Define an EPSFModel for the input data. This EPSFModel will be
# used to evaluate the model on a shifted pixel grid to place the
# centroid at the array center.
epsf = EPSFModel(data=epsf_data, origin=epsf.origin, normalize=False,
oversampling=epsf.oversampling)
epsf.fill_value = 0.0
xcenter, ycenter = epsf.origin
dx_total = 0
dy_total = 0
y, x = np.indices(epsf_data.shape, dtype=np.float)
iter_num = 0
center_accuracy_sq = center_accuracy ** 2
center_dist_sq = center_accuracy_sq + 1.e6
center_dist_sq_prev = center_dist_sq + 1
while (iter_num < maxiters and
center_dist_sq >= center_accuracy_sq):
iter_num += 1
# extract a cutout from the ePSF
slices_large, slices_small = overlap_slices(epsf_data.shape,
box_size,
(ycenter, xcenter))
epsf_cutout = epsf_data[slices_large]
mask = ~np.isfinite(epsf_cutout)
# find a new center position
xcenter_new, ycenter_new = centroid_func(epsf_cutout, mask=mask)
xcenter_new += slices_large[1].start
ycenter_new += slices_large[0].start
# calculate the shift
dx = xcenter - xcenter_new
dy = ycenter - ycenter_new
center_dist_sq = dx**2 + dy**2
if center_dist_sq >= center_dist_sq_prev: # don't shift
break
center_dist_sq_prev = center_dist_sq
# Resample the ePSF data to a shifted grid to place the
# centroid in the center of the central pixel. The shift is
# always performed on the input epsf_data.
dx_total += dx # accumulated shifts for the input epsf_data
dy_total += dy
epsf_data = epsf.evaluate(x=x, y=y, flux=1.0,
x_0=xcenter + dx_total,
y_0=ycenter + dy_total,
use_oversampling=False)
return epsf_data
|
[
"def",
"_recenter_epsf",
"(",
"self",
",",
"epsf_data",
",",
"epsf",
",",
"centroid_func",
"=",
"centroid_com",
",",
"box_size",
"=",
"5",
",",
"maxiters",
"=",
"20",
",",
"center_accuracy",
"=",
"1.0e-4",
")",
":",
"# Define an EPSFModel for the input data. This EPSFModel will be",
"# used to evaluate the model on a shifted pixel grid to place the",
"# centroid at the array center.",
"epsf",
"=",
"EPSFModel",
"(",
"data",
"=",
"epsf_data",
",",
"origin",
"=",
"epsf",
".",
"origin",
",",
"normalize",
"=",
"False",
",",
"oversampling",
"=",
"epsf",
".",
"oversampling",
")",
"epsf",
".",
"fill_value",
"=",
"0.0",
"xcenter",
",",
"ycenter",
"=",
"epsf",
".",
"origin",
"dx_total",
"=",
"0",
"dy_total",
"=",
"0",
"y",
",",
"x",
"=",
"np",
".",
"indices",
"(",
"epsf_data",
".",
"shape",
",",
"dtype",
"=",
"np",
".",
"float",
")",
"iter_num",
"=",
"0",
"center_accuracy_sq",
"=",
"center_accuracy",
"**",
"2",
"center_dist_sq",
"=",
"center_accuracy_sq",
"+",
"1.e6",
"center_dist_sq_prev",
"=",
"center_dist_sq",
"+",
"1",
"while",
"(",
"iter_num",
"<",
"maxiters",
"and",
"center_dist_sq",
">=",
"center_accuracy_sq",
")",
":",
"iter_num",
"+=",
"1",
"# extract a cutout from the ePSF",
"slices_large",
",",
"slices_small",
"=",
"overlap_slices",
"(",
"epsf_data",
".",
"shape",
",",
"box_size",
",",
"(",
"ycenter",
",",
"xcenter",
")",
")",
"epsf_cutout",
"=",
"epsf_data",
"[",
"slices_large",
"]",
"mask",
"=",
"~",
"np",
".",
"isfinite",
"(",
"epsf_cutout",
")",
"# find a new center position",
"xcenter_new",
",",
"ycenter_new",
"=",
"centroid_func",
"(",
"epsf_cutout",
",",
"mask",
"=",
"mask",
")",
"xcenter_new",
"+=",
"slices_large",
"[",
"1",
"]",
".",
"start",
"ycenter_new",
"+=",
"slices_large",
"[",
"0",
"]",
".",
"start",
"# calculate the shift",
"dx",
"=",
"xcenter",
"-",
"xcenter_new",
"dy",
"=",
"ycenter",
"-",
"ycenter_new",
"center_dist_sq",
"=",
"dx",
"**",
"2",
"+",
"dy",
"**",
"2",
"if",
"center_dist_sq",
">=",
"center_dist_sq_prev",
":",
"# don't shift",
"break",
"center_dist_sq_prev",
"=",
"center_dist_sq",
"# Resample the ePSF data to a shifted grid to place the",
"# centroid in the center of the central pixel. The shift is",
"# always performed on the input epsf_data.",
"dx_total",
"+=",
"dx",
"# accumulated shifts for the input epsf_data",
"dy_total",
"+=",
"dy",
"epsf_data",
"=",
"epsf",
".",
"evaluate",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"flux",
"=",
"1.0",
",",
"x_0",
"=",
"xcenter",
"+",
"dx_total",
",",
"y_0",
"=",
"ycenter",
"+",
"dy_total",
",",
"use_oversampling",
"=",
"False",
")",
"return",
"epsf_data"
] |
Calculate the center of the ePSF data and shift the data so the
ePSF center is at the center of the ePSF data array.
Parameters
----------
epsf_data : 2D `~numpy.ndarray`
A 2D array containing the ePSF image.
epsf : `EPSFModel` object
The ePSF model.
centroid_func : callable, optional
A callable object (e.g. function or class) that is used to
calculate the centroid of a 2D array. The callable must
accept a 2D `~numpy.ndarray`, have a ``mask`` keyword and
optionally an ``error`` keyword. The callable object must
return a tuple of two 1D `~numpy.ndarray`\\s, representing
the x and y centroids. The default is
`~photutils.centroids.centroid_com`.
recentering_boxsize : float or tuple of two floats, optional
The size (in pixels) of the box used to calculate the
centroid of the ePSF during each build iteration. If a
single integer number is provided, then a square box will be
used. If two values are provided, then they should be in
``(ny, nx)`` order. The default is 5.
maxiters : int, optional
The maximum number of recentering iterations to perform.
The default is 20.
center_accuracy : float, optional
The desired accuracy for the centers of stars. The building
iterations will stop if the center of the ePSF changes by
less than ``center_accuracy`` pixels between iterations.
The default is 1.0e-4.
Returns
-------
result : 2D `~numpy.ndarray`
The recentered ePSF data.
|
[
"Calculate",
"the",
"center",
"of",
"the",
"ePSF",
"data",
"and",
"shift",
"the",
"data",
"so",
"the",
"ePSF",
"center",
"is",
"at",
"the",
"center",
"of",
"the",
"ePSF",
"data",
"array",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/epsf.py#L573-L671
|
10,651
|
astropy/photutils
|
photutils/psf/epsf.py
|
EPSFBuilder._build_epsf_step
|
def _build_epsf_step(self, stars, epsf=None):
"""
A single iteration of improving an ePSF.
Parameters
----------
stars : `EPSFStars` object
The stars used to build the ePSF.
epsf : `EPSFModel` object, optional
The initial ePSF model. If not input, then the ePSF will be
built from scratch.
Returns
-------
epsf : `EPSFModel` object
The updated ePSF.
"""
if len(stars) < 1:
raise ValueError('stars must contain at least one EPSFStar or '
'LinkedEPSFStar object.')
if epsf is None:
# create an initial ePSF (array of zeros)
epsf = self._create_initial_epsf(stars)
else:
# improve the input ePSF
epsf = copy.deepcopy(epsf)
# compute a 3D stack of 2D residual images
residuals = self._resample_residuals(stars, epsf)
self._residuals.append(residuals)
# compute the sigma-clipped median along the 3D stack
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=RuntimeWarning)
warnings.simplefilter('ignore', category=AstropyUserWarning)
residuals = self.sigclip(residuals, axis=0, masked=False,
return_bounds=False)
if HAS_BOTTLENECK:
residuals = bottleneck.nanmedian(residuals, axis=0)
else:
residuals = np.nanmedian(residuals, axis=0)
self._residuals_sigclip.append(residuals)
# interpolate any missing data (np.nan)
mask = ~np.isfinite(residuals)
if np.any(mask):
residuals = _interpolate_missing_data(residuals, mask,
method='cubic')
# fill any remaining nans (outer points) with zeros
residuals[~np.isfinite(residuals)] = 0.
self._residuals_interp.append(residuals)
# add the residuals to the previous ePSF image
new_epsf = epsf.normalized_data + residuals
# smooth the ePSF
new_epsf = self._smooth_epsf(new_epsf)
# recenter the ePSF
new_epsf = self._recenter_epsf(new_epsf, epsf,
centroid_func=self.recentering_func,
box_size=self.recentering_boxsize,
maxiters=self.recentering_maxiters,
center_accuracy=1.0e-4)
# normalize the ePSF data
new_epsf /= np.sum(new_epsf, dtype=np.float64)
# return the new ePSF object
xcenter = (new_epsf.shape[1] - 1) / 2.
ycenter = (new_epsf.shape[0] - 1) / 2.
epsf_new = EPSFModel(data=new_epsf, origin=(xcenter, ycenter),
normalize=False, oversampling=epsf.oversampling)
return epsf_new
|
python
|
def _build_epsf_step(self, stars, epsf=None):
"""
A single iteration of improving an ePSF.
Parameters
----------
stars : `EPSFStars` object
The stars used to build the ePSF.
epsf : `EPSFModel` object, optional
The initial ePSF model. If not input, then the ePSF will be
built from scratch.
Returns
-------
epsf : `EPSFModel` object
The updated ePSF.
"""
if len(stars) < 1:
raise ValueError('stars must contain at least one EPSFStar or '
'LinkedEPSFStar object.')
if epsf is None:
# create an initial ePSF (array of zeros)
epsf = self._create_initial_epsf(stars)
else:
# improve the input ePSF
epsf = copy.deepcopy(epsf)
# compute a 3D stack of 2D residual images
residuals = self._resample_residuals(stars, epsf)
self._residuals.append(residuals)
# compute the sigma-clipped median along the 3D stack
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=RuntimeWarning)
warnings.simplefilter('ignore', category=AstropyUserWarning)
residuals = self.sigclip(residuals, axis=0, masked=False,
return_bounds=False)
if HAS_BOTTLENECK:
residuals = bottleneck.nanmedian(residuals, axis=0)
else:
residuals = np.nanmedian(residuals, axis=0)
self._residuals_sigclip.append(residuals)
# interpolate any missing data (np.nan)
mask = ~np.isfinite(residuals)
if np.any(mask):
residuals = _interpolate_missing_data(residuals, mask,
method='cubic')
# fill any remaining nans (outer points) with zeros
residuals[~np.isfinite(residuals)] = 0.
self._residuals_interp.append(residuals)
# add the residuals to the previous ePSF image
new_epsf = epsf.normalized_data + residuals
# smooth the ePSF
new_epsf = self._smooth_epsf(new_epsf)
# recenter the ePSF
new_epsf = self._recenter_epsf(new_epsf, epsf,
centroid_func=self.recentering_func,
box_size=self.recentering_boxsize,
maxiters=self.recentering_maxiters,
center_accuracy=1.0e-4)
# normalize the ePSF data
new_epsf /= np.sum(new_epsf, dtype=np.float64)
# return the new ePSF object
xcenter = (new_epsf.shape[1] - 1) / 2.
ycenter = (new_epsf.shape[0] - 1) / 2.
epsf_new = EPSFModel(data=new_epsf, origin=(xcenter, ycenter),
normalize=False, oversampling=epsf.oversampling)
return epsf_new
|
[
"def",
"_build_epsf_step",
"(",
"self",
",",
"stars",
",",
"epsf",
"=",
"None",
")",
":",
"if",
"len",
"(",
"stars",
")",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"'stars must contain at least one EPSFStar or '",
"'LinkedEPSFStar object.'",
")",
"if",
"epsf",
"is",
"None",
":",
"# create an initial ePSF (array of zeros)",
"epsf",
"=",
"self",
".",
"_create_initial_epsf",
"(",
"stars",
")",
"else",
":",
"# improve the input ePSF",
"epsf",
"=",
"copy",
".",
"deepcopy",
"(",
"epsf",
")",
"# compute a 3D stack of 2D residual images",
"residuals",
"=",
"self",
".",
"_resample_residuals",
"(",
"stars",
",",
"epsf",
")",
"self",
".",
"_residuals",
".",
"append",
"(",
"residuals",
")",
"# compute the sigma-clipped median along the 3D stack",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"warnings",
".",
"simplefilter",
"(",
"'ignore'",
",",
"category",
"=",
"RuntimeWarning",
")",
"warnings",
".",
"simplefilter",
"(",
"'ignore'",
",",
"category",
"=",
"AstropyUserWarning",
")",
"residuals",
"=",
"self",
".",
"sigclip",
"(",
"residuals",
",",
"axis",
"=",
"0",
",",
"masked",
"=",
"False",
",",
"return_bounds",
"=",
"False",
")",
"if",
"HAS_BOTTLENECK",
":",
"residuals",
"=",
"bottleneck",
".",
"nanmedian",
"(",
"residuals",
",",
"axis",
"=",
"0",
")",
"else",
":",
"residuals",
"=",
"np",
".",
"nanmedian",
"(",
"residuals",
",",
"axis",
"=",
"0",
")",
"self",
".",
"_residuals_sigclip",
".",
"append",
"(",
"residuals",
")",
"# interpolate any missing data (np.nan)",
"mask",
"=",
"~",
"np",
".",
"isfinite",
"(",
"residuals",
")",
"if",
"np",
".",
"any",
"(",
"mask",
")",
":",
"residuals",
"=",
"_interpolate_missing_data",
"(",
"residuals",
",",
"mask",
",",
"method",
"=",
"'cubic'",
")",
"# fill any remaining nans (outer points) with zeros",
"residuals",
"[",
"~",
"np",
".",
"isfinite",
"(",
"residuals",
")",
"]",
"=",
"0.",
"self",
".",
"_residuals_interp",
".",
"append",
"(",
"residuals",
")",
"# add the residuals to the previous ePSF image",
"new_epsf",
"=",
"epsf",
".",
"normalized_data",
"+",
"residuals",
"# smooth the ePSF",
"new_epsf",
"=",
"self",
".",
"_smooth_epsf",
"(",
"new_epsf",
")",
"# recenter the ePSF",
"new_epsf",
"=",
"self",
".",
"_recenter_epsf",
"(",
"new_epsf",
",",
"epsf",
",",
"centroid_func",
"=",
"self",
".",
"recentering_func",
",",
"box_size",
"=",
"self",
".",
"recentering_boxsize",
",",
"maxiters",
"=",
"self",
".",
"recentering_maxiters",
",",
"center_accuracy",
"=",
"1.0e-4",
")",
"# normalize the ePSF data",
"new_epsf",
"/=",
"np",
".",
"sum",
"(",
"new_epsf",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"# return the new ePSF object",
"xcenter",
"=",
"(",
"new_epsf",
".",
"shape",
"[",
"1",
"]",
"-",
"1",
")",
"/",
"2.",
"ycenter",
"=",
"(",
"new_epsf",
".",
"shape",
"[",
"0",
"]",
"-",
"1",
")",
"/",
"2.",
"epsf_new",
"=",
"EPSFModel",
"(",
"data",
"=",
"new_epsf",
",",
"origin",
"=",
"(",
"xcenter",
",",
"ycenter",
")",
",",
"normalize",
"=",
"False",
",",
"oversampling",
"=",
"epsf",
".",
"oversampling",
")",
"return",
"epsf_new"
] |
A single iteration of improving an ePSF.
Parameters
----------
stars : `EPSFStars` object
The stars used to build the ePSF.
epsf : `EPSFModel` object, optional
The initial ePSF model. If not input, then the ePSF will be
built from scratch.
Returns
-------
epsf : `EPSFModel` object
The updated ePSF.
|
[
"A",
"single",
"iteration",
"of",
"improving",
"an",
"ePSF",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/epsf.py#L673-L755
|
10,652
|
astropy/photutils
|
photutils/psf/epsf.py
|
EPSFBuilder.build_epsf
|
def build_epsf(self, stars, init_epsf=None):
"""
Iteratively build an ePSF from star cutouts.
Parameters
----------
stars : `EPSFStars` object
The stars used to build the ePSF.
init_epsf : `EPSFModel` object, optional
The initial ePSF model. If not input, then the ePSF will be
built from scratch.
Returns
-------
epsf : `EPSFModel` object
The constructed ePSF.
fitted_stars : `EPSFStars` object
The input stars with updated centers and fluxes derived
from fitting the output ``epsf``.
"""
iter_num = 0
center_dist_sq = self.center_accuracy_sq + 1.
centers = stars.cutout_center_flat
n_stars = stars.n_stars
fit_failed = np.zeros(n_stars, dtype=bool)
dx_dy = np.zeros((n_stars, 2), dtype=np.float)
epsf = init_epsf
dt = 0.
while (iter_num < self.maxiters and
np.max(center_dist_sq) >= self.center_accuracy_sq and
not np.all(fit_failed)):
t_start = time.time()
iter_num += 1
if self.progress_bar:
if iter_num == 1:
dt_str = ' [? s/iter]'
else:
dt_str = ' [{:.1f} s/iter]'.format(dt)
print('PROGRESS: iteration {0:d} (of max {1}){2}'
.format(iter_num, self.maxiters, dt_str), end='\r')
# build/improve the ePSF
epsf = self._build_epsf_step(stars, epsf=epsf)
# fit the new ePSF to the stars to find improved centers
# we catch fit warnings here -- stars with unsuccessful fits
# are excluded from the ePSF build process
with warnings.catch_warnings():
message = '.*The fit may be unsuccessful;.*'
warnings.filterwarnings('ignore', message=message,
category=AstropyUserWarning)
stars = self.fitter(epsf, stars)
# find all stars where the fit failed
fit_failed = np.array([star._fit_error_status > 0
for star in stars.all_stars])
if np.all(fit_failed):
raise ValueError('The ePSF fitting failed for all stars.')
# permanently exclude fitting any star where the fit fails
# after 3 iterations
if iter_num > 3 and np.any(fit_failed):
idx = fit_failed.nonzero()[0]
for i in idx:
stars.all_stars[i]._excluded_from_fit = True
dx_dy = stars.cutout_center_flat - centers
dx_dy = dx_dy[np.logical_not(fit_failed)]
center_dist_sq = np.sum(dx_dy * dx_dy, axis=1, dtype=np.float64)
centers = stars.cutout_center_flat
self._nfit_failed.append(np.count_nonzero(fit_failed))
self._center_dist_sq.append(center_dist_sq)
self._max_center_dist_sq.append(np.max(center_dist_sq))
self._epsf.append(epsf)
dt = time.time() - t_start
return epsf, stars
|
python
|
def build_epsf(self, stars, init_epsf=None):
"""
Iteratively build an ePSF from star cutouts.
Parameters
----------
stars : `EPSFStars` object
The stars used to build the ePSF.
init_epsf : `EPSFModel` object, optional
The initial ePSF model. If not input, then the ePSF will be
built from scratch.
Returns
-------
epsf : `EPSFModel` object
The constructed ePSF.
fitted_stars : `EPSFStars` object
The input stars with updated centers and fluxes derived
from fitting the output ``epsf``.
"""
iter_num = 0
center_dist_sq = self.center_accuracy_sq + 1.
centers = stars.cutout_center_flat
n_stars = stars.n_stars
fit_failed = np.zeros(n_stars, dtype=bool)
dx_dy = np.zeros((n_stars, 2), dtype=np.float)
epsf = init_epsf
dt = 0.
while (iter_num < self.maxiters and
np.max(center_dist_sq) >= self.center_accuracy_sq and
not np.all(fit_failed)):
t_start = time.time()
iter_num += 1
if self.progress_bar:
if iter_num == 1:
dt_str = ' [? s/iter]'
else:
dt_str = ' [{:.1f} s/iter]'.format(dt)
print('PROGRESS: iteration {0:d} (of max {1}){2}'
.format(iter_num, self.maxiters, dt_str), end='\r')
# build/improve the ePSF
epsf = self._build_epsf_step(stars, epsf=epsf)
# fit the new ePSF to the stars to find improved centers
# we catch fit warnings here -- stars with unsuccessful fits
# are excluded from the ePSF build process
with warnings.catch_warnings():
message = '.*The fit may be unsuccessful;.*'
warnings.filterwarnings('ignore', message=message,
category=AstropyUserWarning)
stars = self.fitter(epsf, stars)
# find all stars where the fit failed
fit_failed = np.array([star._fit_error_status > 0
for star in stars.all_stars])
if np.all(fit_failed):
raise ValueError('The ePSF fitting failed for all stars.')
# permanently exclude fitting any star where the fit fails
# after 3 iterations
if iter_num > 3 and np.any(fit_failed):
idx = fit_failed.nonzero()[0]
for i in idx:
stars.all_stars[i]._excluded_from_fit = True
dx_dy = stars.cutout_center_flat - centers
dx_dy = dx_dy[np.logical_not(fit_failed)]
center_dist_sq = np.sum(dx_dy * dx_dy, axis=1, dtype=np.float64)
centers = stars.cutout_center_flat
self._nfit_failed.append(np.count_nonzero(fit_failed))
self._center_dist_sq.append(center_dist_sq)
self._max_center_dist_sq.append(np.max(center_dist_sq))
self._epsf.append(epsf)
dt = time.time() - t_start
return epsf, stars
|
[
"def",
"build_epsf",
"(",
"self",
",",
"stars",
",",
"init_epsf",
"=",
"None",
")",
":",
"iter_num",
"=",
"0",
"center_dist_sq",
"=",
"self",
".",
"center_accuracy_sq",
"+",
"1.",
"centers",
"=",
"stars",
".",
"cutout_center_flat",
"n_stars",
"=",
"stars",
".",
"n_stars",
"fit_failed",
"=",
"np",
".",
"zeros",
"(",
"n_stars",
",",
"dtype",
"=",
"bool",
")",
"dx_dy",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_stars",
",",
"2",
")",
",",
"dtype",
"=",
"np",
".",
"float",
")",
"epsf",
"=",
"init_epsf",
"dt",
"=",
"0.",
"while",
"(",
"iter_num",
"<",
"self",
".",
"maxiters",
"and",
"np",
".",
"max",
"(",
"center_dist_sq",
")",
">=",
"self",
".",
"center_accuracy_sq",
"and",
"not",
"np",
".",
"all",
"(",
"fit_failed",
")",
")",
":",
"t_start",
"=",
"time",
".",
"time",
"(",
")",
"iter_num",
"+=",
"1",
"if",
"self",
".",
"progress_bar",
":",
"if",
"iter_num",
"==",
"1",
":",
"dt_str",
"=",
"' [? s/iter]'",
"else",
":",
"dt_str",
"=",
"' [{:.1f} s/iter]'",
".",
"format",
"(",
"dt",
")",
"print",
"(",
"'PROGRESS: iteration {0:d} (of max {1}){2}'",
".",
"format",
"(",
"iter_num",
",",
"self",
".",
"maxiters",
",",
"dt_str",
")",
",",
"end",
"=",
"'\\r'",
")",
"# build/improve the ePSF",
"epsf",
"=",
"self",
".",
"_build_epsf_step",
"(",
"stars",
",",
"epsf",
"=",
"epsf",
")",
"# fit the new ePSF to the stars to find improved centers",
"# we catch fit warnings here -- stars with unsuccessful fits",
"# are excluded from the ePSF build process",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"message",
"=",
"'.*The fit may be unsuccessful;.*'",
"warnings",
".",
"filterwarnings",
"(",
"'ignore'",
",",
"message",
"=",
"message",
",",
"category",
"=",
"AstropyUserWarning",
")",
"stars",
"=",
"self",
".",
"fitter",
"(",
"epsf",
",",
"stars",
")",
"# find all stars where the fit failed",
"fit_failed",
"=",
"np",
".",
"array",
"(",
"[",
"star",
".",
"_fit_error_status",
">",
"0",
"for",
"star",
"in",
"stars",
".",
"all_stars",
"]",
")",
"if",
"np",
".",
"all",
"(",
"fit_failed",
")",
":",
"raise",
"ValueError",
"(",
"'The ePSF fitting failed for all stars.'",
")",
"# permanently exclude fitting any star where the fit fails",
"# after 3 iterations",
"if",
"iter_num",
">",
"3",
"and",
"np",
".",
"any",
"(",
"fit_failed",
")",
":",
"idx",
"=",
"fit_failed",
".",
"nonzero",
"(",
")",
"[",
"0",
"]",
"for",
"i",
"in",
"idx",
":",
"stars",
".",
"all_stars",
"[",
"i",
"]",
".",
"_excluded_from_fit",
"=",
"True",
"dx_dy",
"=",
"stars",
".",
"cutout_center_flat",
"-",
"centers",
"dx_dy",
"=",
"dx_dy",
"[",
"np",
".",
"logical_not",
"(",
"fit_failed",
")",
"]",
"center_dist_sq",
"=",
"np",
".",
"sum",
"(",
"dx_dy",
"*",
"dx_dy",
",",
"axis",
"=",
"1",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"centers",
"=",
"stars",
".",
"cutout_center_flat",
"self",
".",
"_nfit_failed",
".",
"append",
"(",
"np",
".",
"count_nonzero",
"(",
"fit_failed",
")",
")",
"self",
".",
"_center_dist_sq",
".",
"append",
"(",
"center_dist_sq",
")",
"self",
".",
"_max_center_dist_sq",
".",
"append",
"(",
"np",
".",
"max",
"(",
"center_dist_sq",
")",
")",
"self",
".",
"_epsf",
".",
"append",
"(",
"epsf",
")",
"dt",
"=",
"time",
".",
"time",
"(",
")",
"-",
"t_start",
"return",
"epsf",
",",
"stars"
] |
Iteratively build an ePSF from star cutouts.
Parameters
----------
stars : `EPSFStars` object
The stars used to build the ePSF.
init_epsf : `EPSFModel` object, optional
The initial ePSF model. If not input, then the ePSF will be
built from scratch.
Returns
-------
epsf : `EPSFModel` object
The constructed ePSF.
fitted_stars : `EPSFStars` object
The input stars with updated centers and fluxes derived
from fitting the output ``epsf``.
|
[
"Iteratively",
"build",
"an",
"ePSF",
"from",
"star",
"cutouts",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/epsf.py#L757-L842
|
10,653
|
astropy/photutils
|
photutils/psf/models.py
|
FittableImageModel._set_oversampling
|
def _set_oversampling(self, value):
"""
This is a private method because it's used in the initializer by the
``oversampling``
"""
try:
value = np.atleast_1d(value).astype(float)
if len(value) == 1:
value = np.repeat(value, 2)
except ValueError:
raise ValueError('Oversampling factors must be float')
if np.any(value <= 0):
raise ValueError('Oversampling factors must be greater than 0')
self._oversampling = value
|
python
|
def _set_oversampling(self, value):
"""
This is a private method because it's used in the initializer by the
``oversampling``
"""
try:
value = np.atleast_1d(value).astype(float)
if len(value) == 1:
value = np.repeat(value, 2)
except ValueError:
raise ValueError('Oversampling factors must be float')
if np.any(value <= 0):
raise ValueError('Oversampling factors must be greater than 0')
self._oversampling = value
|
[
"def",
"_set_oversampling",
"(",
"self",
",",
"value",
")",
":",
"try",
":",
"value",
"=",
"np",
".",
"atleast_1d",
"(",
"value",
")",
".",
"astype",
"(",
"float",
")",
"if",
"len",
"(",
"value",
")",
"==",
"1",
":",
"value",
"=",
"np",
".",
"repeat",
"(",
"value",
",",
"2",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"'Oversampling factors must be float'",
")",
"if",
"np",
".",
"any",
"(",
"value",
"<=",
"0",
")",
":",
"raise",
"ValueError",
"(",
"'Oversampling factors must be greater than 0'",
")",
"self",
".",
"_oversampling",
"=",
"value"
] |
This is a private method because it's used in the initializer by the
``oversampling``
|
[
"This",
"is",
"a",
"private",
"method",
"because",
"it",
"s",
"used",
"in",
"the",
"initializer",
"by",
"the",
"oversampling"
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/models.py#L234-L249
|
10,654
|
astropy/photutils
|
photutils/psf/models.py
|
FittableImageModel.evaluate
|
def evaluate(self, x, y, flux, x_0, y_0, use_oversampling=True):
"""
Evaluate the model on some input variables and provided model
parameters.
Parameters
----------
use_oversampling : bool, optional
Whether to use the oversampling factor to calculate the
model pixel indices. The default is `True`, which means the
input indices will be multipled by this factor.
"""
if use_oversampling:
xi = self._oversampling[0] * (np.asarray(x) - x_0)
yi = self._oversampling[1] * (np.asarray(y) - y_0)
else:
xi = np.asarray(x) - x_0
yi = np.asarray(y) - y_0
xi += self._x_origin
yi += self._y_origin
f = flux * self._normalization_constant
evaluated_model = f * self.interpolator.ev(xi, yi)
if self._fill_value is not None:
# find indices of pixels that are outside the input pixel grid and
# set these pixels to the 'fill_value':
invalid = (((xi < 0) | (xi > self._nx - 1)) |
((yi < 0) | (yi > self._ny - 1)))
evaluated_model[invalid] = self._fill_value
return evaluated_model
|
python
|
def evaluate(self, x, y, flux, x_0, y_0, use_oversampling=True):
"""
Evaluate the model on some input variables and provided model
parameters.
Parameters
----------
use_oversampling : bool, optional
Whether to use the oversampling factor to calculate the
model pixel indices. The default is `True`, which means the
input indices will be multipled by this factor.
"""
if use_oversampling:
xi = self._oversampling[0] * (np.asarray(x) - x_0)
yi = self._oversampling[1] * (np.asarray(y) - y_0)
else:
xi = np.asarray(x) - x_0
yi = np.asarray(y) - y_0
xi += self._x_origin
yi += self._y_origin
f = flux * self._normalization_constant
evaluated_model = f * self.interpolator.ev(xi, yi)
if self._fill_value is not None:
# find indices of pixels that are outside the input pixel grid and
# set these pixels to the 'fill_value':
invalid = (((xi < 0) | (xi > self._nx - 1)) |
((yi < 0) | (yi > self._ny - 1)))
evaluated_model[invalid] = self._fill_value
return evaluated_model
|
[
"def",
"evaluate",
"(",
"self",
",",
"x",
",",
"y",
",",
"flux",
",",
"x_0",
",",
"y_0",
",",
"use_oversampling",
"=",
"True",
")",
":",
"if",
"use_oversampling",
":",
"xi",
"=",
"self",
".",
"_oversampling",
"[",
"0",
"]",
"*",
"(",
"np",
".",
"asarray",
"(",
"x",
")",
"-",
"x_0",
")",
"yi",
"=",
"self",
".",
"_oversampling",
"[",
"1",
"]",
"*",
"(",
"np",
".",
"asarray",
"(",
"y",
")",
"-",
"y_0",
")",
"else",
":",
"xi",
"=",
"np",
".",
"asarray",
"(",
"x",
")",
"-",
"x_0",
"yi",
"=",
"np",
".",
"asarray",
"(",
"y",
")",
"-",
"y_0",
"xi",
"+=",
"self",
".",
"_x_origin",
"yi",
"+=",
"self",
".",
"_y_origin",
"f",
"=",
"flux",
"*",
"self",
".",
"_normalization_constant",
"evaluated_model",
"=",
"f",
"*",
"self",
".",
"interpolator",
".",
"ev",
"(",
"xi",
",",
"yi",
")",
"if",
"self",
".",
"_fill_value",
"is",
"not",
"None",
":",
"# find indices of pixels that are outside the input pixel grid and",
"# set these pixels to the 'fill_value':",
"invalid",
"=",
"(",
"(",
"(",
"xi",
"<",
"0",
")",
"|",
"(",
"xi",
">",
"self",
".",
"_nx",
"-",
"1",
")",
")",
"|",
"(",
"(",
"yi",
"<",
"0",
")",
"|",
"(",
"yi",
">",
"self",
".",
"_ny",
"-",
"1",
")",
")",
")",
"evaluated_model",
"[",
"invalid",
"]",
"=",
"self",
".",
"_fill_value",
"return",
"evaluated_model"
] |
Evaluate the model on some input variables and provided model
parameters.
Parameters
----------
use_oversampling : bool, optional
Whether to use the oversampling factor to calculate the
model pixel indices. The default is `True`, which means the
input indices will be multipled by this factor.
|
[
"Evaluate",
"the",
"model",
"on",
"some",
"input",
"variables",
"and",
"provided",
"model",
"parameters",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/models.py#L453-L486
|
10,655
|
astropy/photutils
|
photutils/psf/models.py
|
GriddedPSFModel._find_bounds_1d
|
def _find_bounds_1d(data, x):
"""
Find the index of the lower bound where ``x`` should be inserted
into ``a`` to maintain order.
The index of the upper bound is the index of the lower bound
plus 2. Both bound indices must be within the array.
Parameters
----------
data : 1D `~numpy.ndarray`
The 1D array to search.
x : float
The value to insert.
Returns
-------
index : int
The index of the lower bound.
"""
idx = np.searchsorted(data, x)
if idx == 0:
idx0 = 0
elif idx == len(data): # pragma: no cover
idx0 = idx - 2
else:
idx0 = idx - 1
return idx0
|
python
|
def _find_bounds_1d(data, x):
"""
Find the index of the lower bound where ``x`` should be inserted
into ``a`` to maintain order.
The index of the upper bound is the index of the lower bound
plus 2. Both bound indices must be within the array.
Parameters
----------
data : 1D `~numpy.ndarray`
The 1D array to search.
x : float
The value to insert.
Returns
-------
index : int
The index of the lower bound.
"""
idx = np.searchsorted(data, x)
if idx == 0:
idx0 = 0
elif idx == len(data): # pragma: no cover
idx0 = idx - 2
else:
idx0 = idx - 1
return idx0
|
[
"def",
"_find_bounds_1d",
"(",
"data",
",",
"x",
")",
":",
"idx",
"=",
"np",
".",
"searchsorted",
"(",
"data",
",",
"x",
")",
"if",
"idx",
"==",
"0",
":",
"idx0",
"=",
"0",
"elif",
"idx",
"==",
"len",
"(",
"data",
")",
":",
"# pragma: no cover",
"idx0",
"=",
"idx",
"-",
"2",
"else",
":",
"idx0",
"=",
"idx",
"-",
"1",
"return",
"idx0"
] |
Find the index of the lower bound where ``x`` should be inserted
into ``a`` to maintain order.
The index of the upper bound is the index of the lower bound
plus 2. Both bound indices must be within the array.
Parameters
----------
data : 1D `~numpy.ndarray`
The 1D array to search.
x : float
The value to insert.
Returns
-------
index : int
The index of the lower bound.
|
[
"Find",
"the",
"index",
"of",
"the",
"lower",
"bound",
"where",
"x",
"should",
"be",
"inserted",
"into",
"a",
"to",
"maintain",
"order",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/models.py#L582-L612
|
10,656
|
astropy/photutils
|
photutils/psf/models.py
|
GriddedPSFModel._bilinear_interp
|
def _bilinear_interp(xyref, zref, xi, yi):
"""
Perform bilinear interpolation of four 2D arrays located at
points on a regular grid.
Parameters
----------
xyref : list of 4 (x, y) pairs
A list of 4 ``(x, y)`` pairs that form a rectangle.
refdata : 3D `~numpy.ndarray`
A 3D `~numpy.ndarray` of shape ``(4, nx, ny)``. The first
axis corresponds to ``xyref``, i.e. ``refdata[0, :, :]`` is
the 2D array located at ``xyref[0]``.
xi, yi : float
The ``(xi, yi)`` point at which to perform the
interpolation. The ``(xi, yi)`` point must lie within the
rectangle defined by ``xyref``.
Returns
-------
result : 2D `~numpy.ndarray`
The 2D interpolated array.
"""
if len(xyref) != 4:
raise ValueError('xyref must contain only 4 (x, y) pairs')
if zref.shape[0] != 4:
raise ValueError('zref must have a length of 4 on the first '
'axis.')
xyref = [tuple(i) for i in xyref]
idx = sorted(range(len(xyref)), key=xyref.__getitem__)
xyref = sorted(xyref) # sort by x, then y
(x0, y0), (_x0, y1), (x1, _y0), (_x1, _y1) = xyref
if x0 != _x0 or x1 != _x1 or y0 != _y0 or y1 != _y1:
raise ValueError('The refxy points do not form a rectangle.')
if not np.isscalar(xi):
xi = xi[0]
if not np.isscalar(yi):
yi = yi[0]
if not x0 <= xi <= x1 or not y0 <= yi <= y1:
raise ValueError('The (x, y) input is not within the rectangle '
'defined by xyref.')
data = np.asarray(zref)[idx]
weights = np.array([(x1 - xi) * (y1 - yi), (x1 - xi) * (yi - y0),
(xi - x0) * (y1 - yi), (xi - x0) * (yi - y0)])
norm = (x1 - x0) * (y1 - y0)
return np.sum(data * weights[:, None, None], axis=0) / norm
|
python
|
def _bilinear_interp(xyref, zref, xi, yi):
"""
Perform bilinear interpolation of four 2D arrays located at
points on a regular grid.
Parameters
----------
xyref : list of 4 (x, y) pairs
A list of 4 ``(x, y)`` pairs that form a rectangle.
refdata : 3D `~numpy.ndarray`
A 3D `~numpy.ndarray` of shape ``(4, nx, ny)``. The first
axis corresponds to ``xyref``, i.e. ``refdata[0, :, :]`` is
the 2D array located at ``xyref[0]``.
xi, yi : float
The ``(xi, yi)`` point at which to perform the
interpolation. The ``(xi, yi)`` point must lie within the
rectangle defined by ``xyref``.
Returns
-------
result : 2D `~numpy.ndarray`
The 2D interpolated array.
"""
if len(xyref) != 4:
raise ValueError('xyref must contain only 4 (x, y) pairs')
if zref.shape[0] != 4:
raise ValueError('zref must have a length of 4 on the first '
'axis.')
xyref = [tuple(i) for i in xyref]
idx = sorted(range(len(xyref)), key=xyref.__getitem__)
xyref = sorted(xyref) # sort by x, then y
(x0, y0), (_x0, y1), (x1, _y0), (_x1, _y1) = xyref
if x0 != _x0 or x1 != _x1 or y0 != _y0 or y1 != _y1:
raise ValueError('The refxy points do not form a rectangle.')
if not np.isscalar(xi):
xi = xi[0]
if not np.isscalar(yi):
yi = yi[0]
if not x0 <= xi <= x1 or not y0 <= yi <= y1:
raise ValueError('The (x, y) input is not within the rectangle '
'defined by xyref.')
data = np.asarray(zref)[idx]
weights = np.array([(x1 - xi) * (y1 - yi), (x1 - xi) * (yi - y0),
(xi - x0) * (y1 - yi), (xi - x0) * (yi - y0)])
norm = (x1 - x0) * (y1 - y0)
return np.sum(data * weights[:, None, None], axis=0) / norm
|
[
"def",
"_bilinear_interp",
"(",
"xyref",
",",
"zref",
",",
"xi",
",",
"yi",
")",
":",
"if",
"len",
"(",
"xyref",
")",
"!=",
"4",
":",
"raise",
"ValueError",
"(",
"'xyref must contain only 4 (x, y) pairs'",
")",
"if",
"zref",
".",
"shape",
"[",
"0",
"]",
"!=",
"4",
":",
"raise",
"ValueError",
"(",
"'zref must have a length of 4 on the first '",
"'axis.'",
")",
"xyref",
"=",
"[",
"tuple",
"(",
"i",
")",
"for",
"i",
"in",
"xyref",
"]",
"idx",
"=",
"sorted",
"(",
"range",
"(",
"len",
"(",
"xyref",
")",
")",
",",
"key",
"=",
"xyref",
".",
"__getitem__",
")",
"xyref",
"=",
"sorted",
"(",
"xyref",
")",
"# sort by x, then y",
"(",
"x0",
",",
"y0",
")",
",",
"(",
"_x0",
",",
"y1",
")",
",",
"(",
"x1",
",",
"_y0",
")",
",",
"(",
"_x1",
",",
"_y1",
")",
"=",
"xyref",
"if",
"x0",
"!=",
"_x0",
"or",
"x1",
"!=",
"_x1",
"or",
"y0",
"!=",
"_y0",
"or",
"y1",
"!=",
"_y1",
":",
"raise",
"ValueError",
"(",
"'The refxy points do not form a rectangle.'",
")",
"if",
"not",
"np",
".",
"isscalar",
"(",
"xi",
")",
":",
"xi",
"=",
"xi",
"[",
"0",
"]",
"if",
"not",
"np",
".",
"isscalar",
"(",
"yi",
")",
":",
"yi",
"=",
"yi",
"[",
"0",
"]",
"if",
"not",
"x0",
"<=",
"xi",
"<=",
"x1",
"or",
"not",
"y0",
"<=",
"yi",
"<=",
"y1",
":",
"raise",
"ValueError",
"(",
"'The (x, y) input is not within the rectangle '",
"'defined by xyref.'",
")",
"data",
"=",
"np",
".",
"asarray",
"(",
"zref",
")",
"[",
"idx",
"]",
"weights",
"=",
"np",
".",
"array",
"(",
"[",
"(",
"x1",
"-",
"xi",
")",
"*",
"(",
"y1",
"-",
"yi",
")",
",",
"(",
"x1",
"-",
"xi",
")",
"*",
"(",
"yi",
"-",
"y0",
")",
",",
"(",
"xi",
"-",
"x0",
")",
"*",
"(",
"y1",
"-",
"yi",
")",
",",
"(",
"xi",
"-",
"x0",
")",
"*",
"(",
"yi",
"-",
"y0",
")",
"]",
")",
"norm",
"=",
"(",
"x1",
"-",
"x0",
")",
"*",
"(",
"y1",
"-",
"y0",
")",
"return",
"np",
".",
"sum",
"(",
"data",
"*",
"weights",
"[",
":",
",",
"None",
",",
"None",
"]",
",",
"axis",
"=",
"0",
")",
"/",
"norm"
] |
Perform bilinear interpolation of four 2D arrays located at
points on a regular grid.
Parameters
----------
xyref : list of 4 (x, y) pairs
A list of 4 ``(x, y)`` pairs that form a rectangle.
refdata : 3D `~numpy.ndarray`
A 3D `~numpy.ndarray` of shape ``(4, nx, ny)``. The first
axis corresponds to ``xyref``, i.e. ``refdata[0, :, :]`` is
the 2D array located at ``xyref[0]``.
xi, yi : float
The ``(xi, yi)`` point at which to perform the
interpolation. The ``(xi, yi)`` point must lie within the
rectangle defined by ``xyref``.
Returns
-------
result : 2D `~numpy.ndarray`
The 2D interpolated array.
|
[
"Perform",
"bilinear",
"interpolation",
"of",
"four",
"2D",
"arrays",
"located",
"at",
"points",
"on",
"a",
"regular",
"grid",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/models.py#L651-L706
|
10,657
|
astropy/photutils
|
photutils/psf/models.py
|
GriddedPSFModel.evaluate
|
def evaluate(self, x, y, flux, x_0, y_0):
"""
Evaluate the `GriddedPSFModel` for the input parameters.
"""
# NOTE: this is needed because the PSF photometry routines input
# length-1 values instead of scalars. TODO: fix the photometry
# routines.
if not np.isscalar(x_0):
x_0 = x_0[0]
if not np.isscalar(y_0):
y_0 = y_0[0]
if (x_0 < self._xgrid_min or x_0 > self._xgrid_max or
y_0 < self._ygrid_min or y_0 > self._ygrid_max):
# position is outside of the grid, so simply use the
# closest reference PSF
self._ref_indices = np.argsort(np.hypot(self._grid_xpos - x_0,
self._grid_ypos - y_0))[0]
self._psf_interp = self.data[self._ref_indices, :, :]
else:
# find the four bounding reference PSFs and interpolate
self._ref_indices = self._find_bounding_points(x_0, y_0)
xyref = np.array(self.grid_xypos)[self._ref_indices]
psfs = self.data[self._ref_indices, :, :]
self._psf_interp = self._bilinear_interp(xyref, psfs, x_0, y_0)
# now evaluate the PSF at the (x_0, y_0) subpixel position on
# the input (x, y) values
psfmodel = FittableImageModel(self._psf_interp,
oversampling=self.oversampling)
return psfmodel.evaluate(x, y, flux, x_0, y_0)
|
python
|
def evaluate(self, x, y, flux, x_0, y_0):
"""
Evaluate the `GriddedPSFModel` for the input parameters.
"""
# NOTE: this is needed because the PSF photometry routines input
# length-1 values instead of scalars. TODO: fix the photometry
# routines.
if not np.isscalar(x_0):
x_0 = x_0[0]
if not np.isscalar(y_0):
y_0 = y_0[0]
if (x_0 < self._xgrid_min or x_0 > self._xgrid_max or
y_0 < self._ygrid_min or y_0 > self._ygrid_max):
# position is outside of the grid, so simply use the
# closest reference PSF
self._ref_indices = np.argsort(np.hypot(self._grid_xpos - x_0,
self._grid_ypos - y_0))[0]
self._psf_interp = self.data[self._ref_indices, :, :]
else:
# find the four bounding reference PSFs and interpolate
self._ref_indices = self._find_bounding_points(x_0, y_0)
xyref = np.array(self.grid_xypos)[self._ref_indices]
psfs = self.data[self._ref_indices, :, :]
self._psf_interp = self._bilinear_interp(xyref, psfs, x_0, y_0)
# now evaluate the PSF at the (x_0, y_0) subpixel position on
# the input (x, y) values
psfmodel = FittableImageModel(self._psf_interp,
oversampling=self.oversampling)
return psfmodel.evaluate(x, y, flux, x_0, y_0)
|
[
"def",
"evaluate",
"(",
"self",
",",
"x",
",",
"y",
",",
"flux",
",",
"x_0",
",",
"y_0",
")",
":",
"# NOTE: this is needed because the PSF photometry routines input",
"# length-1 values instead of scalars. TODO: fix the photometry",
"# routines.",
"if",
"not",
"np",
".",
"isscalar",
"(",
"x_0",
")",
":",
"x_0",
"=",
"x_0",
"[",
"0",
"]",
"if",
"not",
"np",
".",
"isscalar",
"(",
"y_0",
")",
":",
"y_0",
"=",
"y_0",
"[",
"0",
"]",
"if",
"(",
"x_0",
"<",
"self",
".",
"_xgrid_min",
"or",
"x_0",
">",
"self",
".",
"_xgrid_max",
"or",
"y_0",
"<",
"self",
".",
"_ygrid_min",
"or",
"y_0",
">",
"self",
".",
"_ygrid_max",
")",
":",
"# position is outside of the grid, so simply use the",
"# closest reference PSF",
"self",
".",
"_ref_indices",
"=",
"np",
".",
"argsort",
"(",
"np",
".",
"hypot",
"(",
"self",
".",
"_grid_xpos",
"-",
"x_0",
",",
"self",
".",
"_grid_ypos",
"-",
"y_0",
")",
")",
"[",
"0",
"]",
"self",
".",
"_psf_interp",
"=",
"self",
".",
"data",
"[",
"self",
".",
"_ref_indices",
",",
":",
",",
":",
"]",
"else",
":",
"# find the four bounding reference PSFs and interpolate",
"self",
".",
"_ref_indices",
"=",
"self",
".",
"_find_bounding_points",
"(",
"x_0",
",",
"y_0",
")",
"xyref",
"=",
"np",
".",
"array",
"(",
"self",
".",
"grid_xypos",
")",
"[",
"self",
".",
"_ref_indices",
"]",
"psfs",
"=",
"self",
".",
"data",
"[",
"self",
".",
"_ref_indices",
",",
":",
",",
":",
"]",
"self",
".",
"_psf_interp",
"=",
"self",
".",
"_bilinear_interp",
"(",
"xyref",
",",
"psfs",
",",
"x_0",
",",
"y_0",
")",
"# now evaluate the PSF at the (x_0, y_0) subpixel position on",
"# the input (x, y) values",
"psfmodel",
"=",
"FittableImageModel",
"(",
"self",
".",
"_psf_interp",
",",
"oversampling",
"=",
"self",
".",
"oversampling",
")",
"return",
"psfmodel",
".",
"evaluate",
"(",
"x",
",",
"y",
",",
"flux",
",",
"x_0",
",",
"y_0",
")"
] |
Evaluate the `GriddedPSFModel` for the input parameters.
|
[
"Evaluate",
"the",
"GriddedPSFModel",
"for",
"the",
"input",
"parameters",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/models.py#L708-L742
|
10,658
|
astropy/photutils
|
photutils/psf/models.py
|
IntegratedGaussianPRF.evaluate
|
def evaluate(self, x, y, flux, x_0, y_0, sigma):
"""Model function Gaussian PSF model."""
return (flux / 4 *
((self._erf((x - x_0 + 0.5) / (np.sqrt(2) * sigma)) -
self._erf((x - x_0 - 0.5) / (np.sqrt(2) * sigma))) *
(self._erf((y - y_0 + 0.5) / (np.sqrt(2) * sigma)) -
self._erf((y - y_0 - 0.5) / (np.sqrt(2) * sigma)))))
|
python
|
def evaluate(self, x, y, flux, x_0, y_0, sigma):
"""Model function Gaussian PSF model."""
return (flux / 4 *
((self._erf((x - x_0 + 0.5) / (np.sqrt(2) * sigma)) -
self._erf((x - x_0 - 0.5) / (np.sqrt(2) * sigma))) *
(self._erf((y - y_0 + 0.5) / (np.sqrt(2) * sigma)) -
self._erf((y - y_0 - 0.5) / (np.sqrt(2) * sigma)))))
|
[
"def",
"evaluate",
"(",
"self",
",",
"x",
",",
"y",
",",
"flux",
",",
"x_0",
",",
"y_0",
",",
"sigma",
")",
":",
"return",
"(",
"flux",
"/",
"4",
"*",
"(",
"(",
"self",
".",
"_erf",
"(",
"(",
"x",
"-",
"x_0",
"+",
"0.5",
")",
"/",
"(",
"np",
".",
"sqrt",
"(",
"2",
")",
"*",
"sigma",
")",
")",
"-",
"self",
".",
"_erf",
"(",
"(",
"x",
"-",
"x_0",
"-",
"0.5",
")",
"/",
"(",
"np",
".",
"sqrt",
"(",
"2",
")",
"*",
"sigma",
")",
")",
")",
"*",
"(",
"self",
".",
"_erf",
"(",
"(",
"y",
"-",
"y_0",
"+",
"0.5",
")",
"/",
"(",
"np",
".",
"sqrt",
"(",
"2",
")",
"*",
"sigma",
")",
")",
"-",
"self",
".",
"_erf",
"(",
"(",
"y",
"-",
"y_0",
"-",
"0.5",
")",
"/",
"(",
"np",
".",
"sqrt",
"(",
"2",
")",
"*",
"sigma",
")",
")",
")",
")",
")"
] |
Model function Gaussian PSF model.
|
[
"Model",
"function",
"Gaussian",
"PSF",
"model",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/models.py#L819-L826
|
10,659
|
astropy/photutils
|
photutils/psf/models.py
|
PRFAdapter.evaluate
|
def evaluate(self, x, y, flux, x_0, y_0):
"""The evaluation function for PRFAdapter."""
if self.xname is None:
dx = x - x_0
else:
dx = x
setattr(self.psfmodel, self.xname, x_0)
if self.xname is None:
dy = y - y_0
else:
dy = y
setattr(self.psfmodel, self.yname, y_0)
if self.fluxname is None:
return (flux * self._psf_scale_factor *
self._integrated_psfmodel(dx, dy))
else:
setattr(self.psfmodel, self.yname, flux * self._psf_scale_factor)
return self._integrated_psfmodel(dx, dy)
|
python
|
def evaluate(self, x, y, flux, x_0, y_0):
"""The evaluation function for PRFAdapter."""
if self.xname is None:
dx = x - x_0
else:
dx = x
setattr(self.psfmodel, self.xname, x_0)
if self.xname is None:
dy = y - y_0
else:
dy = y
setattr(self.psfmodel, self.yname, y_0)
if self.fluxname is None:
return (flux * self._psf_scale_factor *
self._integrated_psfmodel(dx, dy))
else:
setattr(self.psfmodel, self.yname, flux * self._psf_scale_factor)
return self._integrated_psfmodel(dx, dy)
|
[
"def",
"evaluate",
"(",
"self",
",",
"x",
",",
"y",
",",
"flux",
",",
"x_0",
",",
"y_0",
")",
":",
"if",
"self",
".",
"xname",
"is",
"None",
":",
"dx",
"=",
"x",
"-",
"x_0",
"else",
":",
"dx",
"=",
"x",
"setattr",
"(",
"self",
".",
"psfmodel",
",",
"self",
".",
"xname",
",",
"x_0",
")",
"if",
"self",
".",
"xname",
"is",
"None",
":",
"dy",
"=",
"y",
"-",
"y_0",
"else",
":",
"dy",
"=",
"y",
"setattr",
"(",
"self",
".",
"psfmodel",
",",
"self",
".",
"yname",
",",
"y_0",
")",
"if",
"self",
".",
"fluxname",
"is",
"None",
":",
"return",
"(",
"flux",
"*",
"self",
".",
"_psf_scale_factor",
"*",
"self",
".",
"_integrated_psfmodel",
"(",
"dx",
",",
"dy",
")",
")",
"else",
":",
"setattr",
"(",
"self",
".",
"psfmodel",
",",
"self",
".",
"yname",
",",
"flux",
"*",
"self",
".",
"_psf_scale_factor",
")",
"return",
"self",
".",
"_integrated_psfmodel",
"(",
"dx",
",",
"dy",
")"
] |
The evaluation function for PRFAdapter.
|
[
"The",
"evaluation",
"function",
"for",
"PRFAdapter",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/models.py#L895-L915
|
10,660
|
astropy/photutils
|
photutils/isophote/isophote.py
|
_isophote_list_to_table
|
def _isophote_list_to_table(isophote_list):
"""
Convert an `~photutils.isophote.IsophoteList` instance to
a `~astropy.table.QTable`.
Parameters
----------
isophote_list : list of `~photutils.isophote.Isophote` or a `~photutils.isophote.IsophoteList` instance
A list of isophotes.
Returns
-------
result : `~astropy.table.QTable`
An astropy QTable with the main isophote parameters.
"""
properties = OrderedDict()
properties['sma'] = 'sma'
properties['intens'] = 'intens'
properties['int_err'] = 'intens_err'
properties['eps'] = 'ellipticity'
properties['ellip_err'] = 'ellipticity_err'
properties['pa'] = 'pa'
properties['pa_err'] = 'pa_err'
properties['grad_r_error'] = 'grad_rerr'
properties['ndata'] = 'ndata'
properties['nflag'] = 'flag'
properties['niter'] = 'niter'
properties['stop_code'] = 'stop_code'
isotable = QTable()
for k, v in properties.items():
isotable[v] = np.array([getattr(iso, k) for iso in isophote_list])
if k in ('pa', 'pa_err'):
isotable[v] = isotable[v] * 180. / np.pi * u.deg
return isotable
|
python
|
def _isophote_list_to_table(isophote_list):
"""
Convert an `~photutils.isophote.IsophoteList` instance to
a `~astropy.table.QTable`.
Parameters
----------
isophote_list : list of `~photutils.isophote.Isophote` or a `~photutils.isophote.IsophoteList` instance
A list of isophotes.
Returns
-------
result : `~astropy.table.QTable`
An astropy QTable with the main isophote parameters.
"""
properties = OrderedDict()
properties['sma'] = 'sma'
properties['intens'] = 'intens'
properties['int_err'] = 'intens_err'
properties['eps'] = 'ellipticity'
properties['ellip_err'] = 'ellipticity_err'
properties['pa'] = 'pa'
properties['pa_err'] = 'pa_err'
properties['grad_r_error'] = 'grad_rerr'
properties['ndata'] = 'ndata'
properties['nflag'] = 'flag'
properties['niter'] = 'niter'
properties['stop_code'] = 'stop_code'
isotable = QTable()
for k, v in properties.items():
isotable[v] = np.array([getattr(iso, k) for iso in isophote_list])
if k in ('pa', 'pa_err'):
isotable[v] = isotable[v] * 180. / np.pi * u.deg
return isotable
|
[
"def",
"_isophote_list_to_table",
"(",
"isophote_list",
")",
":",
"properties",
"=",
"OrderedDict",
"(",
")",
"properties",
"[",
"'sma'",
"]",
"=",
"'sma'",
"properties",
"[",
"'intens'",
"]",
"=",
"'intens'",
"properties",
"[",
"'int_err'",
"]",
"=",
"'intens_err'",
"properties",
"[",
"'eps'",
"]",
"=",
"'ellipticity'",
"properties",
"[",
"'ellip_err'",
"]",
"=",
"'ellipticity_err'",
"properties",
"[",
"'pa'",
"]",
"=",
"'pa'",
"properties",
"[",
"'pa_err'",
"]",
"=",
"'pa_err'",
"properties",
"[",
"'grad_r_error'",
"]",
"=",
"'grad_rerr'",
"properties",
"[",
"'ndata'",
"]",
"=",
"'ndata'",
"properties",
"[",
"'nflag'",
"]",
"=",
"'flag'",
"properties",
"[",
"'niter'",
"]",
"=",
"'niter'",
"properties",
"[",
"'stop_code'",
"]",
"=",
"'stop_code'",
"isotable",
"=",
"QTable",
"(",
")",
"for",
"k",
",",
"v",
"in",
"properties",
".",
"items",
"(",
")",
":",
"isotable",
"[",
"v",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"getattr",
"(",
"iso",
",",
"k",
")",
"for",
"iso",
"in",
"isophote_list",
"]",
")",
"if",
"k",
"in",
"(",
"'pa'",
",",
"'pa_err'",
")",
":",
"isotable",
"[",
"v",
"]",
"=",
"isotable",
"[",
"v",
"]",
"*",
"180.",
"/",
"np",
".",
"pi",
"*",
"u",
".",
"deg",
"return",
"isotable"
] |
Convert an `~photutils.isophote.IsophoteList` instance to
a `~astropy.table.QTable`.
Parameters
----------
isophote_list : list of `~photutils.isophote.Isophote` or a `~photutils.isophote.IsophoteList` instance
A list of isophotes.
Returns
-------
result : `~astropy.table.QTable`
An astropy QTable with the main isophote parameters.
|
[
"Convert",
"an",
"~photutils",
".",
"isophote",
".",
"IsophoteList",
"instance",
"to",
"a",
"~astropy",
".",
"table",
".",
"QTable",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/isophote/isophote.py#L730-L768
|
10,661
|
astropy/photutils
|
photutils/isophote/isophote.py
|
Isophote._compute_fluxes
|
def _compute_fluxes(self):
"""
Compute integrated flux inside ellipse, as well as inside a
circle defined with the same semimajor axis.
Pixels in a square section enclosing circle are scanned; the
distance of each pixel to the isophote center is compared both
with the semimajor axis length and with the length of the
ellipse radius vector, and integrals are updated if the pixel
distance is smaller.
"""
# Compute limits of square array that encloses circle.
sma = self.sample.geometry.sma
x0 = self.sample.geometry.x0
y0 = self.sample.geometry.y0
xsize = self.sample.image.shape[1]
ysize = self.sample.image.shape[0]
imin = max(0, int(x0 - sma - 0.5) - 1)
jmin = max(0, int(y0 - sma - 0.5) - 1)
imax = min(xsize, int(x0 + sma + 0.5) + 1)
jmax = min(ysize, int(y0 + sma + 0.5) + 1)
# Integrate
if (jmax-jmin > 1) and (imax-imin) > 1:
y, x = np.mgrid[jmin:jmax, imin:imax]
radius, angle = self.sample.geometry.to_polar(x, y)
radius_e = self.sample.geometry.radius(angle)
midx = (radius <= sma)
values = self.sample.image[y[midx], x[midx]]
tflux_c = np.ma.sum(values)
npix_c = np.ma.count(values)
midx2 = (radius <= radius_e)
values = self.sample.image[y[midx2], x[midx2]]
tflux_e = np.ma.sum(values)
npix_e = np.ma.count(values)
else:
tflux_e = 0.
tflux_c = 0.
npix_e = 0
npix_c = 0
return tflux_e, tflux_c, npix_e, npix_c
|
python
|
def _compute_fluxes(self):
"""
Compute integrated flux inside ellipse, as well as inside a
circle defined with the same semimajor axis.
Pixels in a square section enclosing circle are scanned; the
distance of each pixel to the isophote center is compared both
with the semimajor axis length and with the length of the
ellipse radius vector, and integrals are updated if the pixel
distance is smaller.
"""
# Compute limits of square array that encloses circle.
sma = self.sample.geometry.sma
x0 = self.sample.geometry.x0
y0 = self.sample.geometry.y0
xsize = self.sample.image.shape[1]
ysize = self.sample.image.shape[0]
imin = max(0, int(x0 - sma - 0.5) - 1)
jmin = max(0, int(y0 - sma - 0.5) - 1)
imax = min(xsize, int(x0 + sma + 0.5) + 1)
jmax = min(ysize, int(y0 + sma + 0.5) + 1)
# Integrate
if (jmax-jmin > 1) and (imax-imin) > 1:
y, x = np.mgrid[jmin:jmax, imin:imax]
radius, angle = self.sample.geometry.to_polar(x, y)
radius_e = self.sample.geometry.radius(angle)
midx = (radius <= sma)
values = self.sample.image[y[midx], x[midx]]
tflux_c = np.ma.sum(values)
npix_c = np.ma.count(values)
midx2 = (radius <= radius_e)
values = self.sample.image[y[midx2], x[midx2]]
tflux_e = np.ma.sum(values)
npix_e = np.ma.count(values)
else:
tflux_e = 0.
tflux_c = 0.
npix_e = 0
npix_c = 0
return tflux_e, tflux_c, npix_e, npix_c
|
[
"def",
"_compute_fluxes",
"(",
"self",
")",
":",
"# Compute limits of square array that encloses circle.",
"sma",
"=",
"self",
".",
"sample",
".",
"geometry",
".",
"sma",
"x0",
"=",
"self",
".",
"sample",
".",
"geometry",
".",
"x0",
"y0",
"=",
"self",
".",
"sample",
".",
"geometry",
".",
"y0",
"xsize",
"=",
"self",
".",
"sample",
".",
"image",
".",
"shape",
"[",
"1",
"]",
"ysize",
"=",
"self",
".",
"sample",
".",
"image",
".",
"shape",
"[",
"0",
"]",
"imin",
"=",
"max",
"(",
"0",
",",
"int",
"(",
"x0",
"-",
"sma",
"-",
"0.5",
")",
"-",
"1",
")",
"jmin",
"=",
"max",
"(",
"0",
",",
"int",
"(",
"y0",
"-",
"sma",
"-",
"0.5",
")",
"-",
"1",
")",
"imax",
"=",
"min",
"(",
"xsize",
",",
"int",
"(",
"x0",
"+",
"sma",
"+",
"0.5",
")",
"+",
"1",
")",
"jmax",
"=",
"min",
"(",
"ysize",
",",
"int",
"(",
"y0",
"+",
"sma",
"+",
"0.5",
")",
"+",
"1",
")",
"# Integrate",
"if",
"(",
"jmax",
"-",
"jmin",
">",
"1",
")",
"and",
"(",
"imax",
"-",
"imin",
")",
">",
"1",
":",
"y",
",",
"x",
"=",
"np",
".",
"mgrid",
"[",
"jmin",
":",
"jmax",
",",
"imin",
":",
"imax",
"]",
"radius",
",",
"angle",
"=",
"self",
".",
"sample",
".",
"geometry",
".",
"to_polar",
"(",
"x",
",",
"y",
")",
"radius_e",
"=",
"self",
".",
"sample",
".",
"geometry",
".",
"radius",
"(",
"angle",
")",
"midx",
"=",
"(",
"radius",
"<=",
"sma",
")",
"values",
"=",
"self",
".",
"sample",
".",
"image",
"[",
"y",
"[",
"midx",
"]",
",",
"x",
"[",
"midx",
"]",
"]",
"tflux_c",
"=",
"np",
".",
"ma",
".",
"sum",
"(",
"values",
")",
"npix_c",
"=",
"np",
".",
"ma",
".",
"count",
"(",
"values",
")",
"midx2",
"=",
"(",
"radius",
"<=",
"radius_e",
")",
"values",
"=",
"self",
".",
"sample",
".",
"image",
"[",
"y",
"[",
"midx2",
"]",
",",
"x",
"[",
"midx2",
"]",
"]",
"tflux_e",
"=",
"np",
".",
"ma",
".",
"sum",
"(",
"values",
")",
"npix_e",
"=",
"np",
".",
"ma",
".",
"count",
"(",
"values",
")",
"else",
":",
"tflux_e",
"=",
"0.",
"tflux_c",
"=",
"0.",
"npix_e",
"=",
"0",
"npix_c",
"=",
"0",
"return",
"tflux_e",
",",
"tflux_c",
",",
"npix_e",
",",
"npix_c"
] |
Compute integrated flux inside ellipse, as well as inside a
circle defined with the same semimajor axis.
Pixels in a square section enclosing circle are scanned; the
distance of each pixel to the isophote center is compared both
with the semimajor axis length and with the length of the
ellipse radius vector, and integrals are updated if the pixel
distance is smaller.
|
[
"Compute",
"integrated",
"flux",
"inside",
"ellipse",
"as",
"well",
"as",
"inside",
"a",
"circle",
"defined",
"with",
"the",
"same",
"semimajor",
"axis",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/isophote/isophote.py#L176-L221
|
10,662
|
astropy/photutils
|
photutils/isophote/isophote.py
|
Isophote._compute_deviations
|
def _compute_deviations(self, sample, n):
"""
Compute deviations from a perfect ellipse, based on the
amplitudes and errors for harmonic "n". Note that we first
subtract the first and second harmonics from the raw data.
"""
try:
coeffs = fit_first_and_second_harmonics(self.sample.values[0],
self.sample.values[2])
coeffs = coeffs[0]
model = first_and_second_harmonic_function(self.sample.values[0],
coeffs)
residual = self.sample.values[2] - model
c = fit_upper_harmonic(residual, sample.values[2], n)
covariance = c[1]
ce = np.diagonal(covariance)
c = c[0]
a = c[1] / self.sma / sample.gradient
b = c[2] / self.sma / sample.gradient
# this comes from the old code. Likely it was based on
# empirical experience with the STSDAS task, so we leave
# it here without too much thought.
gre = self.grad_r_error if self.grad_r_error is not None else 0.64
a_err = abs(a) * np.sqrt((ce[1] / c[1])**2 + gre**2)
b_err = abs(b) * np.sqrt((ce[2] / c[2])**2 + gre**2)
except Exception: # we want to catch everything
a = b = a_err = b_err = None
return a, b, a_err, b_err
|
python
|
def _compute_deviations(self, sample, n):
"""
Compute deviations from a perfect ellipse, based on the
amplitudes and errors for harmonic "n". Note that we first
subtract the first and second harmonics from the raw data.
"""
try:
coeffs = fit_first_and_second_harmonics(self.sample.values[0],
self.sample.values[2])
coeffs = coeffs[0]
model = first_and_second_harmonic_function(self.sample.values[0],
coeffs)
residual = self.sample.values[2] - model
c = fit_upper_harmonic(residual, sample.values[2], n)
covariance = c[1]
ce = np.diagonal(covariance)
c = c[0]
a = c[1] / self.sma / sample.gradient
b = c[2] / self.sma / sample.gradient
# this comes from the old code. Likely it was based on
# empirical experience with the STSDAS task, so we leave
# it here without too much thought.
gre = self.grad_r_error if self.grad_r_error is not None else 0.64
a_err = abs(a) * np.sqrt((ce[1] / c[1])**2 + gre**2)
b_err = abs(b) * np.sqrt((ce[2] / c[2])**2 + gre**2)
except Exception: # we want to catch everything
a = b = a_err = b_err = None
return a, b, a_err, b_err
|
[
"def",
"_compute_deviations",
"(",
"self",
",",
"sample",
",",
"n",
")",
":",
"try",
":",
"coeffs",
"=",
"fit_first_and_second_harmonics",
"(",
"self",
".",
"sample",
".",
"values",
"[",
"0",
"]",
",",
"self",
".",
"sample",
".",
"values",
"[",
"2",
"]",
")",
"coeffs",
"=",
"coeffs",
"[",
"0",
"]",
"model",
"=",
"first_and_second_harmonic_function",
"(",
"self",
".",
"sample",
".",
"values",
"[",
"0",
"]",
",",
"coeffs",
")",
"residual",
"=",
"self",
".",
"sample",
".",
"values",
"[",
"2",
"]",
"-",
"model",
"c",
"=",
"fit_upper_harmonic",
"(",
"residual",
",",
"sample",
".",
"values",
"[",
"2",
"]",
",",
"n",
")",
"covariance",
"=",
"c",
"[",
"1",
"]",
"ce",
"=",
"np",
".",
"diagonal",
"(",
"covariance",
")",
"c",
"=",
"c",
"[",
"0",
"]",
"a",
"=",
"c",
"[",
"1",
"]",
"/",
"self",
".",
"sma",
"/",
"sample",
".",
"gradient",
"b",
"=",
"c",
"[",
"2",
"]",
"/",
"self",
".",
"sma",
"/",
"sample",
".",
"gradient",
"# this comes from the old code. Likely it was based on",
"# empirical experience with the STSDAS task, so we leave",
"# it here without too much thought.",
"gre",
"=",
"self",
".",
"grad_r_error",
"if",
"self",
".",
"grad_r_error",
"is",
"not",
"None",
"else",
"0.64",
"a_err",
"=",
"abs",
"(",
"a",
")",
"*",
"np",
".",
"sqrt",
"(",
"(",
"ce",
"[",
"1",
"]",
"/",
"c",
"[",
"1",
"]",
")",
"**",
"2",
"+",
"gre",
"**",
"2",
")",
"b_err",
"=",
"abs",
"(",
"b",
")",
"*",
"np",
".",
"sqrt",
"(",
"(",
"ce",
"[",
"2",
"]",
"/",
"c",
"[",
"2",
"]",
")",
"**",
"2",
"+",
"gre",
"**",
"2",
")",
"except",
"Exception",
":",
"# we want to catch everything",
"a",
"=",
"b",
"=",
"a_err",
"=",
"b_err",
"=",
"None",
"return",
"a",
",",
"b",
",",
"a_err",
",",
"b_err"
] |
Compute deviations from a perfect ellipse, based on the
amplitudes and errors for harmonic "n". Note that we first
subtract the first and second harmonics from the raw data.
|
[
"Compute",
"deviations",
"from",
"a",
"perfect",
"ellipse",
"based",
"on",
"the",
"amplitudes",
"and",
"errors",
"for",
"harmonic",
"n",
".",
"Note",
"that",
"we",
"first",
"subtract",
"the",
"first",
"and",
"second",
"harmonics",
"from",
"the",
"raw",
"data",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/isophote/isophote.py#L223-L257
|
10,663
|
astropy/photutils
|
photutils/isophote/isophote.py
|
Isophote._compute_errors
|
def _compute_errors(self):
"""
Compute parameter errors based on the diagonal of the covariance
matrix of the four harmonic coefficients for harmonics n=1 and
n=2.
"""
try:
coeffs = fit_first_and_second_harmonics(self.sample.values[0],
self.sample.values[2])
covariance = coeffs[1]
coeffs = coeffs[0]
model = first_and_second_harmonic_function(self.sample.values[0],
coeffs)
residual_rms = np.std(self.sample.values[2] - model)
errors = np.diagonal(covariance) * residual_rms
eps = self.sample.geometry.eps
pa = self.sample.geometry.pa
# parameter errors result from direct projection of
# coefficient errors. These showed to be the error estimators
# that best convey the errors measured in Monte Carlo
# experiments (see Busko 1996; ASPC 101, 139).
ea = abs(errors[2] / self.grad)
eb = abs(errors[1] * (1. - eps) / self.grad)
self.x0_err = np.sqrt((ea * np.cos(pa))**2 + (eb * np.sin(pa))**2)
self.y0_err = np.sqrt((ea * np.sin(pa))**2 + (eb * np.cos(pa))**2)
self.ellip_err = (abs(2. * errors[4] * (1. - eps) / self.sma /
self.grad))
if (abs(eps) > np.finfo(float).resolution):
self.pa_err = (abs(2. * errors[3] * (1. - eps) / self.sma /
self.grad / (1. - (1. - eps)**2)))
else:
self.pa_err = 0.
except Exception: # we want to catch everything
self.x0_err = self.y0_err = self.pa_err = self.ellip_err = 0.
|
python
|
def _compute_errors(self):
"""
Compute parameter errors based on the diagonal of the covariance
matrix of the four harmonic coefficients for harmonics n=1 and
n=2.
"""
try:
coeffs = fit_first_and_second_harmonics(self.sample.values[0],
self.sample.values[2])
covariance = coeffs[1]
coeffs = coeffs[0]
model = first_and_second_harmonic_function(self.sample.values[0],
coeffs)
residual_rms = np.std(self.sample.values[2] - model)
errors = np.diagonal(covariance) * residual_rms
eps = self.sample.geometry.eps
pa = self.sample.geometry.pa
# parameter errors result from direct projection of
# coefficient errors. These showed to be the error estimators
# that best convey the errors measured in Monte Carlo
# experiments (see Busko 1996; ASPC 101, 139).
ea = abs(errors[2] / self.grad)
eb = abs(errors[1] * (1. - eps) / self.grad)
self.x0_err = np.sqrt((ea * np.cos(pa))**2 + (eb * np.sin(pa))**2)
self.y0_err = np.sqrt((ea * np.sin(pa))**2 + (eb * np.cos(pa))**2)
self.ellip_err = (abs(2. * errors[4] * (1. - eps) / self.sma /
self.grad))
if (abs(eps) > np.finfo(float).resolution):
self.pa_err = (abs(2. * errors[3] * (1. - eps) / self.sma /
self.grad / (1. - (1. - eps)**2)))
else:
self.pa_err = 0.
except Exception: # we want to catch everything
self.x0_err = self.y0_err = self.pa_err = self.ellip_err = 0.
|
[
"def",
"_compute_errors",
"(",
"self",
")",
":",
"try",
":",
"coeffs",
"=",
"fit_first_and_second_harmonics",
"(",
"self",
".",
"sample",
".",
"values",
"[",
"0",
"]",
",",
"self",
".",
"sample",
".",
"values",
"[",
"2",
"]",
")",
"covariance",
"=",
"coeffs",
"[",
"1",
"]",
"coeffs",
"=",
"coeffs",
"[",
"0",
"]",
"model",
"=",
"first_and_second_harmonic_function",
"(",
"self",
".",
"sample",
".",
"values",
"[",
"0",
"]",
",",
"coeffs",
")",
"residual_rms",
"=",
"np",
".",
"std",
"(",
"self",
".",
"sample",
".",
"values",
"[",
"2",
"]",
"-",
"model",
")",
"errors",
"=",
"np",
".",
"diagonal",
"(",
"covariance",
")",
"*",
"residual_rms",
"eps",
"=",
"self",
".",
"sample",
".",
"geometry",
".",
"eps",
"pa",
"=",
"self",
".",
"sample",
".",
"geometry",
".",
"pa",
"# parameter errors result from direct projection of",
"# coefficient errors. These showed to be the error estimators",
"# that best convey the errors measured in Monte Carlo",
"# experiments (see Busko 1996; ASPC 101, 139).",
"ea",
"=",
"abs",
"(",
"errors",
"[",
"2",
"]",
"/",
"self",
".",
"grad",
")",
"eb",
"=",
"abs",
"(",
"errors",
"[",
"1",
"]",
"*",
"(",
"1.",
"-",
"eps",
")",
"/",
"self",
".",
"grad",
")",
"self",
".",
"x0_err",
"=",
"np",
".",
"sqrt",
"(",
"(",
"ea",
"*",
"np",
".",
"cos",
"(",
"pa",
")",
")",
"**",
"2",
"+",
"(",
"eb",
"*",
"np",
".",
"sin",
"(",
"pa",
")",
")",
"**",
"2",
")",
"self",
".",
"y0_err",
"=",
"np",
".",
"sqrt",
"(",
"(",
"ea",
"*",
"np",
".",
"sin",
"(",
"pa",
")",
")",
"**",
"2",
"+",
"(",
"eb",
"*",
"np",
".",
"cos",
"(",
"pa",
")",
")",
"**",
"2",
")",
"self",
".",
"ellip_err",
"=",
"(",
"abs",
"(",
"2.",
"*",
"errors",
"[",
"4",
"]",
"*",
"(",
"1.",
"-",
"eps",
")",
"/",
"self",
".",
"sma",
"/",
"self",
".",
"grad",
")",
")",
"if",
"(",
"abs",
"(",
"eps",
")",
">",
"np",
".",
"finfo",
"(",
"float",
")",
".",
"resolution",
")",
":",
"self",
".",
"pa_err",
"=",
"(",
"abs",
"(",
"2.",
"*",
"errors",
"[",
"3",
"]",
"*",
"(",
"1.",
"-",
"eps",
")",
"/",
"self",
".",
"sma",
"/",
"self",
".",
"grad",
"/",
"(",
"1.",
"-",
"(",
"1.",
"-",
"eps",
")",
"**",
"2",
")",
")",
")",
"else",
":",
"self",
".",
"pa_err",
"=",
"0.",
"except",
"Exception",
":",
"# we want to catch everything",
"self",
".",
"x0_err",
"=",
"self",
".",
"y0_err",
"=",
"self",
".",
"pa_err",
"=",
"self",
".",
"ellip_err",
"=",
"0."
] |
Compute parameter errors based on the diagonal of the covariance
matrix of the four harmonic coefficients for harmonics n=1 and
n=2.
|
[
"Compute",
"parameter",
"errors",
"based",
"on",
"the",
"diagonal",
"of",
"the",
"covariance",
"matrix",
"of",
"the",
"four",
"harmonic",
"coefficients",
"for",
"harmonics",
"n",
"=",
"1",
"and",
"n",
"=",
"2",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/isophote/isophote.py#L259-L295
|
10,664
|
astropy/photutils
|
photutils/isophote/isophote.py
|
Isophote.fix_geometry
|
def fix_geometry(self, isophote):
"""
Fix the geometry of a problematic isophote to be identical to
the input isophote.
This method should be called when the fitting goes berserk and
delivers an isophote with bad geometry, such as ellipticity > 1
or another meaningless situation. This is not a problem in
itself when fitting any given isophote, but will create an error
when the affected isophote is used as starting guess for the
next fit.
Parameters
----------
isophote : `~photutils.isophote.Isophote` instance
The isophote from which to take the geometry information.
"""
self.sample.geometry.eps = isophote.sample.geometry.eps
self.sample.geometry.pa = isophote.sample.geometry.pa
self.sample.geometry.x0 = isophote.sample.geometry.x0
self.sample.geometry.y0 = isophote.sample.geometry.y0
|
python
|
def fix_geometry(self, isophote):
"""
Fix the geometry of a problematic isophote to be identical to
the input isophote.
This method should be called when the fitting goes berserk and
delivers an isophote with bad geometry, such as ellipticity > 1
or another meaningless situation. This is not a problem in
itself when fitting any given isophote, but will create an error
when the affected isophote is used as starting guess for the
next fit.
Parameters
----------
isophote : `~photutils.isophote.Isophote` instance
The isophote from which to take the geometry information.
"""
self.sample.geometry.eps = isophote.sample.geometry.eps
self.sample.geometry.pa = isophote.sample.geometry.pa
self.sample.geometry.x0 = isophote.sample.geometry.x0
self.sample.geometry.y0 = isophote.sample.geometry.y0
|
[
"def",
"fix_geometry",
"(",
"self",
",",
"isophote",
")",
":",
"self",
".",
"sample",
".",
"geometry",
".",
"eps",
"=",
"isophote",
".",
"sample",
".",
"geometry",
".",
"eps",
"self",
".",
"sample",
".",
"geometry",
".",
"pa",
"=",
"isophote",
".",
"sample",
".",
"geometry",
".",
"pa",
"self",
".",
"sample",
".",
"geometry",
".",
"x0",
"=",
"isophote",
".",
"sample",
".",
"geometry",
".",
"x0",
"self",
".",
"sample",
".",
"geometry",
".",
"y0",
"=",
"isophote",
".",
"sample",
".",
"geometry",
".",
"y0"
] |
Fix the geometry of a problematic isophote to be identical to
the input isophote.
This method should be called when the fitting goes berserk and
delivers an isophote with bad geometry, such as ellipticity > 1
or another meaningless situation. This is not a problem in
itself when fitting any given isophote, but will create an error
when the affected isophote is used as starting guess for the
next fit.
Parameters
----------
isophote : `~photutils.isophote.Isophote` instance
The isophote from which to take the geometry information.
|
[
"Fix",
"the",
"geometry",
"of",
"a",
"problematic",
"isophote",
"to",
"be",
"identical",
"to",
"the",
"input",
"isophote",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/isophote/isophote.py#L297-L318
|
10,665
|
astropy/photutils
|
photutils/isophote/isophote.py
|
IsophoteList.get_closest
|
def get_closest(self, sma):
"""
Return the `~photutils.isophote.Isophote` instance that has the
closest semimajor axis length to the input semimajor axis.
Parameters
----------
sma : float
The semimajor axis length.
Returns
-------
isophote : `~photutils.isophote.Isophote` instance
The isophote with the closest semimajor axis value.
"""
index = (np.abs(self.sma - sma)).argmin()
return self._list[index]
|
python
|
def get_closest(self, sma):
"""
Return the `~photutils.isophote.Isophote` instance that has the
closest semimajor axis length to the input semimajor axis.
Parameters
----------
sma : float
The semimajor axis length.
Returns
-------
isophote : `~photutils.isophote.Isophote` instance
The isophote with the closest semimajor axis value.
"""
index = (np.abs(self.sma - sma)).argmin()
return self._list[index]
|
[
"def",
"get_closest",
"(",
"self",
",",
"sma",
")",
":",
"index",
"=",
"(",
"np",
".",
"abs",
"(",
"self",
".",
"sma",
"-",
"sma",
")",
")",
".",
"argmin",
"(",
")",
"return",
"self",
".",
"_list",
"[",
"index",
"]"
] |
Return the `~photutils.isophote.Isophote` instance that has the
closest semimajor axis length to the input semimajor axis.
Parameters
----------
sma : float
The semimajor axis length.
Returns
-------
isophote : `~photutils.isophote.Isophote` instance
The isophote with the closest semimajor axis value.
|
[
"Return",
"the",
"~photutils",
".",
"isophote",
".",
"Isophote",
"instance",
"that",
"has",
"the",
"closest",
"semimajor",
"axis",
"length",
"to",
"the",
"input",
"semimajor",
"axis",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/isophote/isophote.py#L468-L485
|
10,666
|
astropy/photutils
|
photutils/utils/interpolation.py
|
interpolate_masked_data
|
def interpolate_masked_data(data, mask, error=None, background=None):
"""
Interpolate over masked pixels in data and optional error or
background images.
The value of masked pixels are replaced by the mean value of the
connected neighboring non-masked pixels. This function is intended
for single, isolated masked pixels (e.g. hot/warm pixels).
Parameters
----------
data : array_like or `~astropy.units.Quantity`
The data array.
mask : array_like (bool)
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
error : array_like or `~astropy.units.Quantity`, optional
The pixel-wise Gaussian 1-sigma errors of the input ``data``.
``error`` must have the same shape as ``data``.
background : array_like, or `~astropy.units.Quantity`, optional
The pixel-wise background level of the input ``data``.
``background`` must have the same shape as ``data``.
Returns
-------
data : `~numpy.ndarray` or `~astropy.units.Quantity`
Input ``data`` with interpolated masked pixels.
error : `~numpy.ndarray` or `~astropy.units.Quantity`
Input ``error`` with interpolated masked pixels. `None` if
input ``error`` is not input.
background : `~numpy.ndarray` or `~astropy.units.Quantity`
Input ``background`` with interpolated masked pixels. `None` if
input ``background`` is not input.
"""
if data.shape != mask.shape:
raise ValueError('data and mask must have the same shape')
data_out = np.copy(data) # do not alter input data
mask_idx = mask.nonzero()
if mask_idx[0].size == 0:
raise ValueError('All items in data are masked')
for x in zip(*mask_idx):
X = np.array([[max(x[i] - 1, 0), min(x[i] + 1, data.shape[i] - 1)]
for i in range(len(data.shape))])
goodpix = ~mask[X]
if not np.any(goodpix):
warnings.warn('The masked pixel at "{}" is completely '
'surrounded by (connected) masked pixels, '
'thus unable to interpolate'.format(x,),
AstropyUserWarning)
continue
data_out[x] = np.mean(data[X][goodpix])
if background is not None:
if background.shape != data.shape:
raise ValueError('background and data must have the same '
'shape')
background_out = np.copy(background)
background_out[x] = np.mean(background[X][goodpix])
else:
background_out = None
if error is not None:
if error.shape != data.shape:
raise ValueError('error and data must have the same '
'shape')
error_out = np.copy(error)
error_out[x] = np.sqrt(np.mean(error[X][goodpix]**2))
else:
error_out = None
return data_out, error_out, background_out
|
python
|
def interpolate_masked_data(data, mask, error=None, background=None):
"""
Interpolate over masked pixels in data and optional error or
background images.
The value of masked pixels are replaced by the mean value of the
connected neighboring non-masked pixels. This function is intended
for single, isolated masked pixels (e.g. hot/warm pixels).
Parameters
----------
data : array_like or `~astropy.units.Quantity`
The data array.
mask : array_like (bool)
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
error : array_like or `~astropy.units.Quantity`, optional
The pixel-wise Gaussian 1-sigma errors of the input ``data``.
``error`` must have the same shape as ``data``.
background : array_like, or `~astropy.units.Quantity`, optional
The pixel-wise background level of the input ``data``.
``background`` must have the same shape as ``data``.
Returns
-------
data : `~numpy.ndarray` or `~astropy.units.Quantity`
Input ``data`` with interpolated masked pixels.
error : `~numpy.ndarray` or `~astropy.units.Quantity`
Input ``error`` with interpolated masked pixels. `None` if
input ``error`` is not input.
background : `~numpy.ndarray` or `~astropy.units.Quantity`
Input ``background`` with interpolated masked pixels. `None` if
input ``background`` is not input.
"""
if data.shape != mask.shape:
raise ValueError('data and mask must have the same shape')
data_out = np.copy(data) # do not alter input data
mask_idx = mask.nonzero()
if mask_idx[0].size == 0:
raise ValueError('All items in data are masked')
for x in zip(*mask_idx):
X = np.array([[max(x[i] - 1, 0), min(x[i] + 1, data.shape[i] - 1)]
for i in range(len(data.shape))])
goodpix = ~mask[X]
if not np.any(goodpix):
warnings.warn('The masked pixel at "{}" is completely '
'surrounded by (connected) masked pixels, '
'thus unable to interpolate'.format(x,),
AstropyUserWarning)
continue
data_out[x] = np.mean(data[X][goodpix])
if background is not None:
if background.shape != data.shape:
raise ValueError('background and data must have the same '
'shape')
background_out = np.copy(background)
background_out[x] = np.mean(background[X][goodpix])
else:
background_out = None
if error is not None:
if error.shape != data.shape:
raise ValueError('error and data must have the same '
'shape')
error_out = np.copy(error)
error_out[x] = np.sqrt(np.mean(error[X][goodpix]**2))
else:
error_out = None
return data_out, error_out, background_out
|
[
"def",
"interpolate_masked_data",
"(",
"data",
",",
"mask",
",",
"error",
"=",
"None",
",",
"background",
"=",
"None",
")",
":",
"if",
"data",
".",
"shape",
"!=",
"mask",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"'data and mask must have the same shape'",
")",
"data_out",
"=",
"np",
".",
"copy",
"(",
"data",
")",
"# do not alter input data",
"mask_idx",
"=",
"mask",
".",
"nonzero",
"(",
")",
"if",
"mask_idx",
"[",
"0",
"]",
".",
"size",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"'All items in data are masked'",
")",
"for",
"x",
"in",
"zip",
"(",
"*",
"mask_idx",
")",
":",
"X",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"max",
"(",
"x",
"[",
"i",
"]",
"-",
"1",
",",
"0",
")",
",",
"min",
"(",
"x",
"[",
"i",
"]",
"+",
"1",
",",
"data",
".",
"shape",
"[",
"i",
"]",
"-",
"1",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"data",
".",
"shape",
")",
")",
"]",
")",
"goodpix",
"=",
"~",
"mask",
"[",
"X",
"]",
"if",
"not",
"np",
".",
"any",
"(",
"goodpix",
")",
":",
"warnings",
".",
"warn",
"(",
"'The masked pixel at \"{}\" is completely '",
"'surrounded by (connected) masked pixels, '",
"'thus unable to interpolate'",
".",
"format",
"(",
"x",
",",
")",
",",
"AstropyUserWarning",
")",
"continue",
"data_out",
"[",
"x",
"]",
"=",
"np",
".",
"mean",
"(",
"data",
"[",
"X",
"]",
"[",
"goodpix",
"]",
")",
"if",
"background",
"is",
"not",
"None",
":",
"if",
"background",
".",
"shape",
"!=",
"data",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"'background and data must have the same '",
"'shape'",
")",
"background_out",
"=",
"np",
".",
"copy",
"(",
"background",
")",
"background_out",
"[",
"x",
"]",
"=",
"np",
".",
"mean",
"(",
"background",
"[",
"X",
"]",
"[",
"goodpix",
"]",
")",
"else",
":",
"background_out",
"=",
"None",
"if",
"error",
"is",
"not",
"None",
":",
"if",
"error",
".",
"shape",
"!=",
"data",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"'error and data must have the same '",
"'shape'",
")",
"error_out",
"=",
"np",
".",
"copy",
"(",
"error",
")",
"error_out",
"[",
"x",
"]",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"mean",
"(",
"error",
"[",
"X",
"]",
"[",
"goodpix",
"]",
"**",
"2",
")",
")",
"else",
":",
"error_out",
"=",
"None",
"return",
"data_out",
",",
"error_out",
",",
"background_out"
] |
Interpolate over masked pixels in data and optional error or
background images.
The value of masked pixels are replaced by the mean value of the
connected neighboring non-masked pixels. This function is intended
for single, isolated masked pixels (e.g. hot/warm pixels).
Parameters
----------
data : array_like or `~astropy.units.Quantity`
The data array.
mask : array_like (bool)
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
error : array_like or `~astropy.units.Quantity`, optional
The pixel-wise Gaussian 1-sigma errors of the input ``data``.
``error`` must have the same shape as ``data``.
background : array_like, or `~astropy.units.Quantity`, optional
The pixel-wise background level of the input ``data``.
``background`` must have the same shape as ``data``.
Returns
-------
data : `~numpy.ndarray` or `~astropy.units.Quantity`
Input ``data`` with interpolated masked pixels.
error : `~numpy.ndarray` or `~astropy.units.Quantity`
Input ``error`` with interpolated masked pixels. `None` if
input ``error`` is not input.
background : `~numpy.ndarray` or `~astropy.units.Quantity`
Input ``background`` with interpolated masked pixels. `None` if
input ``background`` is not input.
|
[
"Interpolate",
"over",
"masked",
"pixels",
"in",
"data",
"and",
"optional",
"error",
"or",
"background",
"images",
"."
] |
cc9bb4534ab76bac98cb5f374a348a2573d10401
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/utils/interpolation.py#L289-L370
|
10,667
|
google/pyringe
|
pyringe/plugins/inject_sentinel.py
|
SentinelInjectPlugin.ThreadsWithRunningExecServers
|
def ThreadsWithRunningExecServers(self):
"""Returns a list of tids of inferior threads with open exec servers."""
socket_dir = '/tmp/pyringe_%s' % self.inferior.pid
if os.path.isdir(socket_dir):
return [int(fname[:-9])
for fname in os.listdir(socket_dir)
if fname.endswith('.execsock')]
return []
|
python
|
def ThreadsWithRunningExecServers(self):
"""Returns a list of tids of inferior threads with open exec servers."""
socket_dir = '/tmp/pyringe_%s' % self.inferior.pid
if os.path.isdir(socket_dir):
return [int(fname[:-9])
for fname in os.listdir(socket_dir)
if fname.endswith('.execsock')]
return []
|
[
"def",
"ThreadsWithRunningExecServers",
"(",
"self",
")",
":",
"socket_dir",
"=",
"'/tmp/pyringe_%s'",
"%",
"self",
".",
"inferior",
".",
"pid",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"socket_dir",
")",
":",
"return",
"[",
"int",
"(",
"fname",
"[",
":",
"-",
"9",
"]",
")",
"for",
"fname",
"in",
"os",
".",
"listdir",
"(",
"socket_dir",
")",
"if",
"fname",
".",
"endswith",
"(",
"'.execsock'",
")",
"]",
"return",
"[",
"]"
] |
Returns a list of tids of inferior threads with open exec servers.
|
[
"Returns",
"a",
"list",
"of",
"tids",
"of",
"inferior",
"threads",
"with",
"open",
"exec",
"servers",
"."
] |
76dff5d1ac29cd5e7bf32677654a83291a15ad8a
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/plugins/inject_sentinel.py#L46-L53
|
10,668
|
google/pyringe
|
pyringe/plugins/inject_sentinel.py
|
SentinelInjectPlugin.SendToExecSocket
|
def SendToExecSocket(self, code, tid=None):
"""Inject python code into exec socket."""
response = self._SendToExecSocketRaw(json.dumps(code), tid)
return json.loads(response)
|
python
|
def SendToExecSocket(self, code, tid=None):
"""Inject python code into exec socket."""
response = self._SendToExecSocketRaw(json.dumps(code), tid)
return json.loads(response)
|
[
"def",
"SendToExecSocket",
"(",
"self",
",",
"code",
",",
"tid",
"=",
"None",
")",
":",
"response",
"=",
"self",
".",
"_SendToExecSocketRaw",
"(",
"json",
".",
"dumps",
"(",
"code",
")",
",",
"tid",
")",
"return",
"json",
".",
"loads",
"(",
"response",
")"
] |
Inject python code into exec socket.
|
[
"Inject",
"python",
"code",
"into",
"exec",
"socket",
"."
] |
76dff5d1ac29cd5e7bf32677654a83291a15ad8a
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/plugins/inject_sentinel.py#L55-L58
|
10,669
|
google/pyringe
|
pyringe/plugins/inject_sentinel.py
|
SentinelInjectPlugin.CloseExecSocket
|
def CloseExecSocket(self, tid=None):
"""Send closing request to exec socket."""
response = self._SendToExecSocketRaw('__kill__', tid)
if response != '__kill_ack__':
logging.warning('May not have succeeded in closing socket, make sure '
'using execsocks().')
|
python
|
def CloseExecSocket(self, tid=None):
"""Send closing request to exec socket."""
response = self._SendToExecSocketRaw('__kill__', tid)
if response != '__kill_ack__':
logging.warning('May not have succeeded in closing socket, make sure '
'using execsocks().')
|
[
"def",
"CloseExecSocket",
"(",
"self",
",",
"tid",
"=",
"None",
")",
":",
"response",
"=",
"self",
".",
"_SendToExecSocketRaw",
"(",
"'__kill__'",
",",
"tid",
")",
"if",
"response",
"!=",
"'__kill_ack__'",
":",
"logging",
".",
"warning",
"(",
"'May not have succeeded in closing socket, make sure '",
"'using execsocks().'",
")"
] |
Send closing request to exec socket.
|
[
"Send",
"closing",
"request",
"to",
"exec",
"socket",
"."
] |
76dff5d1ac29cd5e7bf32677654a83291a15ad8a
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/plugins/inject_sentinel.py#L79-L84
|
10,670
|
google/pyringe
|
pyringe/plugins/read_only.py
|
ReadonlyPlugin.Backtrace
|
def Backtrace(self, to_string=False):
"""Get a backtrace of the current position."""
if self.inferior.is_running:
res = self.inferior.Backtrace()
if to_string:
return res
print res
else:
logging.error('Not attached to any process.')
|
python
|
def Backtrace(self, to_string=False):
"""Get a backtrace of the current position."""
if self.inferior.is_running:
res = self.inferior.Backtrace()
if to_string:
return res
print res
else:
logging.error('Not attached to any process.')
|
[
"def",
"Backtrace",
"(",
"self",
",",
"to_string",
"=",
"False",
")",
":",
"if",
"self",
".",
"inferior",
".",
"is_running",
":",
"res",
"=",
"self",
".",
"inferior",
".",
"Backtrace",
"(",
")",
"if",
"to_string",
":",
"return",
"res",
"print",
"res",
"else",
":",
"logging",
".",
"error",
"(",
"'Not attached to any process.'",
")"
] |
Get a backtrace of the current position.
|
[
"Get",
"a",
"backtrace",
"of",
"the",
"current",
"position",
"."
] |
76dff5d1ac29cd5e7bf32677654a83291a15ad8a
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/plugins/read_only.py#L52-L60
|
10,671
|
google/pyringe
|
pyringe/plugins/read_only.py
|
ReadonlyPlugin.ListThreads
|
def ListThreads(self):
"""List the currently running python threads.
Returns:
A list of the inferior's thread idents, or None if the debugger is not
attached to any process.
"""
if self.inferior.is_running:
return self.inferior.threads
logging.error('Not attached to any process.')
return []
|
python
|
def ListThreads(self):
"""List the currently running python threads.
Returns:
A list of the inferior's thread idents, or None if the debugger is not
attached to any process.
"""
if self.inferior.is_running:
return self.inferior.threads
logging.error('Not attached to any process.')
return []
|
[
"def",
"ListThreads",
"(",
"self",
")",
":",
"if",
"self",
".",
"inferior",
".",
"is_running",
":",
"return",
"self",
".",
"inferior",
".",
"threads",
"logging",
".",
"error",
"(",
"'Not attached to any process.'",
")",
"return",
"[",
"]"
] |
List the currently running python threads.
Returns:
A list of the inferior's thread idents, or None if the debugger is not
attached to any process.
|
[
"List",
"the",
"currently",
"running",
"python",
"threads",
"."
] |
76dff5d1ac29cd5e7bf32677654a83291a15ad8a
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/plugins/read_only.py#L86-L96
|
10,672
|
google/pyringe
|
pyringe/payload/gdb_service.py
|
PyFrameObjectPtr.extract_filename
|
def extract_filename(self):
"""Alternative way of getting the executed file which inspects globals."""
globals_gdbval = self._gdbval['f_globals'].cast(GdbCache.DICT)
global_dict = libpython.PyDictObjectPtr(globals_gdbval)
for key, value in global_dict.iteritems():
if str(key.proxyval(set())) == '__file__':
return str(value.proxyval(set()))
|
python
|
def extract_filename(self):
"""Alternative way of getting the executed file which inspects globals."""
globals_gdbval = self._gdbval['f_globals'].cast(GdbCache.DICT)
global_dict = libpython.PyDictObjectPtr(globals_gdbval)
for key, value in global_dict.iteritems():
if str(key.proxyval(set())) == '__file__':
return str(value.proxyval(set()))
|
[
"def",
"extract_filename",
"(",
"self",
")",
":",
"globals_gdbval",
"=",
"self",
".",
"_gdbval",
"[",
"'f_globals'",
"]",
".",
"cast",
"(",
"GdbCache",
".",
"DICT",
")",
"global_dict",
"=",
"libpython",
".",
"PyDictObjectPtr",
"(",
"globals_gdbval",
")",
"for",
"key",
",",
"value",
"in",
"global_dict",
".",
"iteritems",
"(",
")",
":",
"if",
"str",
"(",
"key",
".",
"proxyval",
"(",
"set",
"(",
")",
")",
")",
"==",
"'__file__'",
":",
"return",
"str",
"(",
"value",
".",
"proxyval",
"(",
"set",
"(",
")",
")",
")"
] |
Alternative way of getting the executed file which inspects globals.
|
[
"Alternative",
"way",
"of",
"getting",
"the",
"executed",
"file",
"which",
"inspects",
"globals",
"."
] |
76dff5d1ac29cd5e7bf32677654a83291a15ad8a
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/gdb_service.py#L157-L163
|
10,673
|
google/pyringe
|
pyringe/payload/gdb_service.py
|
GdbService._UnserializableObjectFallback
|
def _UnserializableObjectFallback(self, obj):
"""Handles sanitizing of unserializable objects for Json.
For instances of heap types, we take the class dict, augment it with the
instance's __dict__, tag it and transmit it over to the RPC client to be
reconstructed there. (Works with both old and new style classes)
Args:
obj: The object to Json-serialize
Returns:
A Json-serializable version of the parameter
"""
if isinstance(obj, libpython.PyInstanceObjectPtr):
# old-style classes use 'classobj'/'instance'
# get class attribute dictionary
in_class = obj.pyop_field('in_class')
result_dict = in_class.pyop_field('cl_dict').proxyval(set())
# let libpython.py do the work of getting the instance dict
instanceproxy = obj.proxyval(set())
result_dict.update(instanceproxy.attrdict)
result_dict['__pyringe_type_name__'] = instanceproxy.cl_name
result_dict['__pyringe_address__'] = instanceproxy.address
return result_dict
if isinstance(obj, libpython.HeapTypeObjectPtr):
# interestingly enough, HeapTypeObjectPtr seems to handle all pointers to
# heap type PyObjects, not only pointers to PyHeapTypeObject. This
# corresponds to new-style class instances. However, as all instances of
# new-style classes are simple PyObject pointers to the interpreter,
# libpython.py tends to give us HeapTypeObjectPtrs for things we can't
# handle properly.
try:
# get class attribute dictionary
type_ptr = obj.field('ob_type')
tp_dict = type_ptr.cast(GdbCache.TYPE)['tp_dict'].cast(GdbCache.DICT)
result_dict = libpython.PyDictObjectPtr(tp_dict).proxyval(set())
except gdb.error:
# There was probably a type mismatch triggered by wrong assumptions in
# libpython.py
result_dict = {}
try:
# get instance attributes
result_dict.update(obj.get_attr_dict().proxyval(set()))
result_dict['__pyringe_type_name__'] = obj.safe_tp_name()
result_dict['__pyringe_address__'] = long(obj._gdbval) # pylint: disable=protected-access
return result_dict
except TypeError:
# This happens in the case where we're not really looking at a heap type
# instance. There isn't really anything we can do, so we fall back to
# the default handling.
pass
# Default handler -- this does not result in proxy objects or fancy dicts,
# but most of the time, we end up emitting strings of the format
# '<object at remote 0x345a235>'
try:
proxy = obj.proxyval(set())
# json doesn't accept non-strings as keys, so we're helping along
if isinstance(proxy, dict):
return {str(key): val for key, val in proxy.iteritems()}
return proxy
except AttributeError:
return str(obj)
|
python
|
def _UnserializableObjectFallback(self, obj):
"""Handles sanitizing of unserializable objects for Json.
For instances of heap types, we take the class dict, augment it with the
instance's __dict__, tag it and transmit it over to the RPC client to be
reconstructed there. (Works with both old and new style classes)
Args:
obj: The object to Json-serialize
Returns:
A Json-serializable version of the parameter
"""
if isinstance(obj, libpython.PyInstanceObjectPtr):
# old-style classes use 'classobj'/'instance'
# get class attribute dictionary
in_class = obj.pyop_field('in_class')
result_dict = in_class.pyop_field('cl_dict').proxyval(set())
# let libpython.py do the work of getting the instance dict
instanceproxy = obj.proxyval(set())
result_dict.update(instanceproxy.attrdict)
result_dict['__pyringe_type_name__'] = instanceproxy.cl_name
result_dict['__pyringe_address__'] = instanceproxy.address
return result_dict
if isinstance(obj, libpython.HeapTypeObjectPtr):
# interestingly enough, HeapTypeObjectPtr seems to handle all pointers to
# heap type PyObjects, not only pointers to PyHeapTypeObject. This
# corresponds to new-style class instances. However, as all instances of
# new-style classes are simple PyObject pointers to the interpreter,
# libpython.py tends to give us HeapTypeObjectPtrs for things we can't
# handle properly.
try:
# get class attribute dictionary
type_ptr = obj.field('ob_type')
tp_dict = type_ptr.cast(GdbCache.TYPE)['tp_dict'].cast(GdbCache.DICT)
result_dict = libpython.PyDictObjectPtr(tp_dict).proxyval(set())
except gdb.error:
# There was probably a type mismatch triggered by wrong assumptions in
# libpython.py
result_dict = {}
try:
# get instance attributes
result_dict.update(obj.get_attr_dict().proxyval(set()))
result_dict['__pyringe_type_name__'] = obj.safe_tp_name()
result_dict['__pyringe_address__'] = long(obj._gdbval) # pylint: disable=protected-access
return result_dict
except TypeError:
# This happens in the case where we're not really looking at a heap type
# instance. There isn't really anything we can do, so we fall back to
# the default handling.
pass
# Default handler -- this does not result in proxy objects or fancy dicts,
# but most of the time, we end up emitting strings of the format
# '<object at remote 0x345a235>'
try:
proxy = obj.proxyval(set())
# json doesn't accept non-strings as keys, so we're helping along
if isinstance(proxy, dict):
return {str(key): val for key, val in proxy.iteritems()}
return proxy
except AttributeError:
return str(obj)
|
[
"def",
"_UnserializableObjectFallback",
"(",
"self",
",",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"libpython",
".",
"PyInstanceObjectPtr",
")",
":",
"# old-style classes use 'classobj'/'instance'",
"# get class attribute dictionary",
"in_class",
"=",
"obj",
".",
"pyop_field",
"(",
"'in_class'",
")",
"result_dict",
"=",
"in_class",
".",
"pyop_field",
"(",
"'cl_dict'",
")",
".",
"proxyval",
"(",
"set",
"(",
")",
")",
"# let libpython.py do the work of getting the instance dict",
"instanceproxy",
"=",
"obj",
".",
"proxyval",
"(",
"set",
"(",
")",
")",
"result_dict",
".",
"update",
"(",
"instanceproxy",
".",
"attrdict",
")",
"result_dict",
"[",
"'__pyringe_type_name__'",
"]",
"=",
"instanceproxy",
".",
"cl_name",
"result_dict",
"[",
"'__pyringe_address__'",
"]",
"=",
"instanceproxy",
".",
"address",
"return",
"result_dict",
"if",
"isinstance",
"(",
"obj",
",",
"libpython",
".",
"HeapTypeObjectPtr",
")",
":",
"# interestingly enough, HeapTypeObjectPtr seems to handle all pointers to",
"# heap type PyObjects, not only pointers to PyHeapTypeObject. This",
"# corresponds to new-style class instances. However, as all instances of",
"# new-style classes are simple PyObject pointers to the interpreter,",
"# libpython.py tends to give us HeapTypeObjectPtrs for things we can't",
"# handle properly.",
"try",
":",
"# get class attribute dictionary",
"type_ptr",
"=",
"obj",
".",
"field",
"(",
"'ob_type'",
")",
"tp_dict",
"=",
"type_ptr",
".",
"cast",
"(",
"GdbCache",
".",
"TYPE",
")",
"[",
"'tp_dict'",
"]",
".",
"cast",
"(",
"GdbCache",
".",
"DICT",
")",
"result_dict",
"=",
"libpython",
".",
"PyDictObjectPtr",
"(",
"tp_dict",
")",
".",
"proxyval",
"(",
"set",
"(",
")",
")",
"except",
"gdb",
".",
"error",
":",
"# There was probably a type mismatch triggered by wrong assumptions in",
"# libpython.py",
"result_dict",
"=",
"{",
"}",
"try",
":",
"# get instance attributes",
"result_dict",
".",
"update",
"(",
"obj",
".",
"get_attr_dict",
"(",
")",
".",
"proxyval",
"(",
"set",
"(",
")",
")",
")",
"result_dict",
"[",
"'__pyringe_type_name__'",
"]",
"=",
"obj",
".",
"safe_tp_name",
"(",
")",
"result_dict",
"[",
"'__pyringe_address__'",
"]",
"=",
"long",
"(",
"obj",
".",
"_gdbval",
")",
"# pylint: disable=protected-access",
"return",
"result_dict",
"except",
"TypeError",
":",
"# This happens in the case where we're not really looking at a heap type",
"# instance. There isn't really anything we can do, so we fall back to",
"# the default handling.",
"pass",
"# Default handler -- this does not result in proxy objects or fancy dicts,",
"# but most of the time, we end up emitting strings of the format",
"# '<object at remote 0x345a235>'",
"try",
":",
"proxy",
"=",
"obj",
".",
"proxyval",
"(",
"set",
"(",
")",
")",
"# json doesn't accept non-strings as keys, so we're helping along",
"if",
"isinstance",
"(",
"proxy",
",",
"dict",
")",
":",
"return",
"{",
"str",
"(",
"key",
")",
":",
"val",
"for",
"key",
",",
"val",
"in",
"proxy",
".",
"iteritems",
"(",
")",
"}",
"return",
"proxy",
"except",
"AttributeError",
":",
"return",
"str",
"(",
"obj",
")"
] |
Handles sanitizing of unserializable objects for Json.
For instances of heap types, we take the class dict, augment it with the
instance's __dict__, tag it and transmit it over to the RPC client to be
reconstructed there. (Works with both old and new style classes)
Args:
obj: The object to Json-serialize
Returns:
A Json-serializable version of the parameter
|
[
"Handles",
"sanitizing",
"of",
"unserializable",
"objects",
"for",
"Json",
"."
] |
76dff5d1ac29cd5e7bf32677654a83291a15ad8a
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/gdb_service.py#L208-L271
|
10,674
|
google/pyringe
|
pyringe/payload/gdb_service.py
|
GdbService._AcceptRPC
|
def _AcceptRPC(self):
"""Reads RPC request from stdin and processes it, writing result to stdout.
Returns:
True as long as execution is to be continued, False otherwise.
Raises:
RpcException: if no function was specified in the RPC or no such API
function exists.
"""
request = self._ReadObject()
if request['func'] == '__kill__':
self.ClearBreakpoints()
self._WriteObject('__kill_ack__')
return False
if 'func' not in request or request['func'].startswith('_'):
raise RpcException('Not a valid public API function.')
rpc_result = getattr(self, request['func'])(*request['args'])
self._WriteObject(rpc_result)
return True
|
python
|
def _AcceptRPC(self):
"""Reads RPC request from stdin and processes it, writing result to stdout.
Returns:
True as long as execution is to be continued, False otherwise.
Raises:
RpcException: if no function was specified in the RPC or no such API
function exists.
"""
request = self._ReadObject()
if request['func'] == '__kill__':
self.ClearBreakpoints()
self._WriteObject('__kill_ack__')
return False
if 'func' not in request or request['func'].startswith('_'):
raise RpcException('Not a valid public API function.')
rpc_result = getattr(self, request['func'])(*request['args'])
self._WriteObject(rpc_result)
return True
|
[
"def",
"_AcceptRPC",
"(",
"self",
")",
":",
"request",
"=",
"self",
".",
"_ReadObject",
"(",
")",
"if",
"request",
"[",
"'func'",
"]",
"==",
"'__kill__'",
":",
"self",
".",
"ClearBreakpoints",
"(",
")",
"self",
".",
"_WriteObject",
"(",
"'__kill_ack__'",
")",
"return",
"False",
"if",
"'func'",
"not",
"in",
"request",
"or",
"request",
"[",
"'func'",
"]",
".",
"startswith",
"(",
"'_'",
")",
":",
"raise",
"RpcException",
"(",
"'Not a valid public API function.'",
")",
"rpc_result",
"=",
"getattr",
"(",
"self",
",",
"request",
"[",
"'func'",
"]",
")",
"(",
"*",
"request",
"[",
"'args'",
"]",
")",
"self",
".",
"_WriteObject",
"(",
"rpc_result",
")",
"return",
"True"
] |
Reads RPC request from stdin and processes it, writing result to stdout.
Returns:
True as long as execution is to be continued, False otherwise.
Raises:
RpcException: if no function was specified in the RPC or no such API
function exists.
|
[
"Reads",
"RPC",
"request",
"from",
"stdin",
"and",
"processes",
"it",
"writing",
"result",
"to",
"stdout",
"."
] |
76dff5d1ac29cd5e7bf32677654a83291a15ad8a
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/gdb_service.py#L293-L311
|
10,675
|
google/pyringe
|
pyringe/payload/gdb_service.py
|
GdbService._UnpackGdbVal
|
def _UnpackGdbVal(self, gdb_value):
"""Unpacks gdb.Value objects and returns the best-matched python object."""
val_type = gdb_value.type.code
if val_type == gdb.TYPE_CODE_INT or val_type == gdb.TYPE_CODE_ENUM:
return int(gdb_value)
if val_type == gdb.TYPE_CODE_VOID:
return None
if val_type == gdb.TYPE_CODE_PTR:
return long(gdb_value)
if val_type == gdb.TYPE_CODE_ARRAY:
# This is probably a string
return str(gdb_value)
# I'm out of ideas, let's return it as a string
return str(gdb_value)
|
python
|
def _UnpackGdbVal(self, gdb_value):
"""Unpacks gdb.Value objects and returns the best-matched python object."""
val_type = gdb_value.type.code
if val_type == gdb.TYPE_CODE_INT or val_type == gdb.TYPE_CODE_ENUM:
return int(gdb_value)
if val_type == gdb.TYPE_CODE_VOID:
return None
if val_type == gdb.TYPE_CODE_PTR:
return long(gdb_value)
if val_type == gdb.TYPE_CODE_ARRAY:
# This is probably a string
return str(gdb_value)
# I'm out of ideas, let's return it as a string
return str(gdb_value)
|
[
"def",
"_UnpackGdbVal",
"(",
"self",
",",
"gdb_value",
")",
":",
"val_type",
"=",
"gdb_value",
".",
"type",
".",
"code",
"if",
"val_type",
"==",
"gdb",
".",
"TYPE_CODE_INT",
"or",
"val_type",
"==",
"gdb",
".",
"TYPE_CODE_ENUM",
":",
"return",
"int",
"(",
"gdb_value",
")",
"if",
"val_type",
"==",
"gdb",
".",
"TYPE_CODE_VOID",
":",
"return",
"None",
"if",
"val_type",
"==",
"gdb",
".",
"TYPE_CODE_PTR",
":",
"return",
"long",
"(",
"gdb_value",
")",
"if",
"val_type",
"==",
"gdb",
".",
"TYPE_CODE_ARRAY",
":",
"# This is probably a string",
"return",
"str",
"(",
"gdb_value",
")",
"# I'm out of ideas, let's return it as a string",
"return",
"str",
"(",
"gdb_value",
")"
] |
Unpacks gdb.Value objects and returns the best-matched python object.
|
[
"Unpacks",
"gdb",
".",
"Value",
"objects",
"and",
"returns",
"the",
"best",
"-",
"matched",
"python",
"object",
"."
] |
76dff5d1ac29cd5e7bf32677654a83291a15ad8a
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/gdb_service.py#L313-L326
|
10,676
|
google/pyringe
|
pyringe/payload/gdb_service.py
|
GdbService.EnsureGdbPosition
|
def EnsureGdbPosition(self, pid, tid, frame_depth):
"""Make sure our position matches the request.
Args:
pid: The process ID of the target process
tid: The python thread ident of the target thread
frame_depth: The 'depth' of the requested frame in the frame stack
Raises:
PositionUnavailableException: If the requested process, thread or frame
can't be found or accessed.
"""
position = [pid, tid, frame_depth]
if not pid:
return
if not self.IsAttached():
try:
self.Attach(position)
except gdb.error as exc:
raise PositionUnavailableException(exc.message)
if gdb.selected_inferior().pid != pid:
self.Detach()
try:
self.Attach(position)
except gdb.error as exc:
raise PositionUnavailableException(exc.message)
if tid:
tstate_head = GdbCache.INTERP_HEAD['tstate_head']
for tstate in self._IterateChainedList(tstate_head, 'next'):
if tid == tstate['thread_id']:
self.selected_tstate = tstate
break
else:
raise PositionUnavailableException('Thread %s does not exist.' %
str(tid))
stack_head = self.selected_tstate['frame']
if frame_depth is not None:
frames = list(self._IterateChainedList(stack_head, 'f_back'))
frames.reverse()
try:
self.selected_frame = frames[frame_depth]
except IndexError:
raise PositionUnavailableException('Stack is not %s frames deep' %
str(frame_depth + 1))
|
python
|
def EnsureGdbPosition(self, pid, tid, frame_depth):
"""Make sure our position matches the request.
Args:
pid: The process ID of the target process
tid: The python thread ident of the target thread
frame_depth: The 'depth' of the requested frame in the frame stack
Raises:
PositionUnavailableException: If the requested process, thread or frame
can't be found or accessed.
"""
position = [pid, tid, frame_depth]
if not pid:
return
if not self.IsAttached():
try:
self.Attach(position)
except gdb.error as exc:
raise PositionUnavailableException(exc.message)
if gdb.selected_inferior().pid != pid:
self.Detach()
try:
self.Attach(position)
except gdb.error as exc:
raise PositionUnavailableException(exc.message)
if tid:
tstate_head = GdbCache.INTERP_HEAD['tstate_head']
for tstate in self._IterateChainedList(tstate_head, 'next'):
if tid == tstate['thread_id']:
self.selected_tstate = tstate
break
else:
raise PositionUnavailableException('Thread %s does not exist.' %
str(tid))
stack_head = self.selected_tstate['frame']
if frame_depth is not None:
frames = list(self._IterateChainedList(stack_head, 'f_back'))
frames.reverse()
try:
self.selected_frame = frames[frame_depth]
except IndexError:
raise PositionUnavailableException('Stack is not %s frames deep' %
str(frame_depth + 1))
|
[
"def",
"EnsureGdbPosition",
"(",
"self",
",",
"pid",
",",
"tid",
",",
"frame_depth",
")",
":",
"position",
"=",
"[",
"pid",
",",
"tid",
",",
"frame_depth",
"]",
"if",
"not",
"pid",
":",
"return",
"if",
"not",
"self",
".",
"IsAttached",
"(",
")",
":",
"try",
":",
"self",
".",
"Attach",
"(",
"position",
")",
"except",
"gdb",
".",
"error",
"as",
"exc",
":",
"raise",
"PositionUnavailableException",
"(",
"exc",
".",
"message",
")",
"if",
"gdb",
".",
"selected_inferior",
"(",
")",
".",
"pid",
"!=",
"pid",
":",
"self",
".",
"Detach",
"(",
")",
"try",
":",
"self",
".",
"Attach",
"(",
"position",
")",
"except",
"gdb",
".",
"error",
"as",
"exc",
":",
"raise",
"PositionUnavailableException",
"(",
"exc",
".",
"message",
")",
"if",
"tid",
":",
"tstate_head",
"=",
"GdbCache",
".",
"INTERP_HEAD",
"[",
"'tstate_head'",
"]",
"for",
"tstate",
"in",
"self",
".",
"_IterateChainedList",
"(",
"tstate_head",
",",
"'next'",
")",
":",
"if",
"tid",
"==",
"tstate",
"[",
"'thread_id'",
"]",
":",
"self",
".",
"selected_tstate",
"=",
"tstate",
"break",
"else",
":",
"raise",
"PositionUnavailableException",
"(",
"'Thread %s does not exist.'",
"%",
"str",
"(",
"tid",
")",
")",
"stack_head",
"=",
"self",
".",
"selected_tstate",
"[",
"'frame'",
"]",
"if",
"frame_depth",
"is",
"not",
"None",
":",
"frames",
"=",
"list",
"(",
"self",
".",
"_IterateChainedList",
"(",
"stack_head",
",",
"'f_back'",
")",
")",
"frames",
".",
"reverse",
"(",
")",
"try",
":",
"self",
".",
"selected_frame",
"=",
"frames",
"[",
"frame_depth",
"]",
"except",
"IndexError",
":",
"raise",
"PositionUnavailableException",
"(",
"'Stack is not %s frames deep'",
"%",
"str",
"(",
"frame_depth",
"+",
"1",
")",
")"
] |
Make sure our position matches the request.
Args:
pid: The process ID of the target process
tid: The python thread ident of the target thread
frame_depth: The 'depth' of the requested frame in the frame stack
Raises:
PositionUnavailableException: If the requested process, thread or frame
can't be found or accessed.
|
[
"Make",
"sure",
"our",
"position",
"matches",
"the",
"request",
"."
] |
76dff5d1ac29cd5e7bf32677654a83291a15ad8a
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/gdb_service.py#L335-L378
|
10,677
|
google/pyringe
|
pyringe/payload/gdb_service.py
|
GdbService.IsSymbolFileSane
|
def IsSymbolFileSane(self, position):
"""Performs basic sanity check by trying to look up a bunch of symbols."""
pos = [position[0], None, None]
self.EnsureGdbPosition(*pos)
try:
if GdbCache.DICT and GdbCache.TYPE and GdbCache.INTERP_HEAD:
# pylint: disable=pointless-statement
tstate = GdbCache.INTERP_HEAD['tstate_head']
tstate['thread_id']
frame = tstate['frame']
frame_attrs = ['f_back',
'f_locals',
'f_localsplus',
'f_globals',
'f_builtins',
'f_lineno',
'f_lasti']
for attr_name in frame_attrs:
# This lookup shouldn't throw an exception
frame[attr_name]
code = frame['f_code']
code_attrs = ['co_name',
'co_filename',
'co_nlocals',
'co_varnames',
'co_lnotab',
'co_firstlineno']
for attr_name in code_attrs:
# Same as above, just checking whether the lookup succeeds.
code[attr_name]
# if we've gotten this far, we should be fine, as it means gdb managed
# to look up values for all of these. They might still be null, the
# symbol file might still be bogus, but making gdb check for null values
# and letting it run into access violations is the best we can do. We
# haven't checked any of the python types (dict, etc.), but this symbol
# file seems to be useful for some things, so let's give it our seal of
# approval.
return True
except gdb.error:
return False
# looks like the initial GdbCache refresh failed. That's no good.
return False
|
python
|
def IsSymbolFileSane(self, position):
"""Performs basic sanity check by trying to look up a bunch of symbols."""
pos = [position[0], None, None]
self.EnsureGdbPosition(*pos)
try:
if GdbCache.DICT and GdbCache.TYPE and GdbCache.INTERP_HEAD:
# pylint: disable=pointless-statement
tstate = GdbCache.INTERP_HEAD['tstate_head']
tstate['thread_id']
frame = tstate['frame']
frame_attrs = ['f_back',
'f_locals',
'f_localsplus',
'f_globals',
'f_builtins',
'f_lineno',
'f_lasti']
for attr_name in frame_attrs:
# This lookup shouldn't throw an exception
frame[attr_name]
code = frame['f_code']
code_attrs = ['co_name',
'co_filename',
'co_nlocals',
'co_varnames',
'co_lnotab',
'co_firstlineno']
for attr_name in code_attrs:
# Same as above, just checking whether the lookup succeeds.
code[attr_name]
# if we've gotten this far, we should be fine, as it means gdb managed
# to look up values for all of these. They might still be null, the
# symbol file might still be bogus, but making gdb check for null values
# and letting it run into access violations is the best we can do. We
# haven't checked any of the python types (dict, etc.), but this symbol
# file seems to be useful for some things, so let's give it our seal of
# approval.
return True
except gdb.error:
return False
# looks like the initial GdbCache refresh failed. That's no good.
return False
|
[
"def",
"IsSymbolFileSane",
"(",
"self",
",",
"position",
")",
":",
"pos",
"=",
"[",
"position",
"[",
"0",
"]",
",",
"None",
",",
"None",
"]",
"self",
".",
"EnsureGdbPosition",
"(",
"*",
"pos",
")",
"try",
":",
"if",
"GdbCache",
".",
"DICT",
"and",
"GdbCache",
".",
"TYPE",
"and",
"GdbCache",
".",
"INTERP_HEAD",
":",
"# pylint: disable=pointless-statement",
"tstate",
"=",
"GdbCache",
".",
"INTERP_HEAD",
"[",
"'tstate_head'",
"]",
"tstate",
"[",
"'thread_id'",
"]",
"frame",
"=",
"tstate",
"[",
"'frame'",
"]",
"frame_attrs",
"=",
"[",
"'f_back'",
",",
"'f_locals'",
",",
"'f_localsplus'",
",",
"'f_globals'",
",",
"'f_builtins'",
",",
"'f_lineno'",
",",
"'f_lasti'",
"]",
"for",
"attr_name",
"in",
"frame_attrs",
":",
"# This lookup shouldn't throw an exception",
"frame",
"[",
"attr_name",
"]",
"code",
"=",
"frame",
"[",
"'f_code'",
"]",
"code_attrs",
"=",
"[",
"'co_name'",
",",
"'co_filename'",
",",
"'co_nlocals'",
",",
"'co_varnames'",
",",
"'co_lnotab'",
",",
"'co_firstlineno'",
"]",
"for",
"attr_name",
"in",
"code_attrs",
":",
"# Same as above, just checking whether the lookup succeeds.",
"code",
"[",
"attr_name",
"]",
"# if we've gotten this far, we should be fine, as it means gdb managed",
"# to look up values for all of these. They might still be null, the",
"# symbol file might still be bogus, but making gdb check for null values",
"# and letting it run into access violations is the best we can do. We",
"# haven't checked any of the python types (dict, etc.), but this symbol",
"# file seems to be useful for some things, so let's give it our seal of",
"# approval.",
"return",
"True",
"except",
"gdb",
".",
"error",
":",
"return",
"False",
"# looks like the initial GdbCache refresh failed. That's no good.",
"return",
"False"
] |
Performs basic sanity check by trying to look up a bunch of symbols.
|
[
"Performs",
"basic",
"sanity",
"check",
"by",
"trying",
"to",
"look",
"up",
"a",
"bunch",
"of",
"symbols",
"."
] |
76dff5d1ac29cd5e7bf32677654a83291a15ad8a
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/gdb_service.py#L392-L433
|
10,678
|
google/pyringe
|
pyringe/payload/gdb_service.py
|
GdbService.Detach
|
def Detach(self):
"""Detaches from the inferior. If not attached, this is a no-op."""
# We have to work around the python APIs weirdness :\
if not self.IsAttached():
return None
# Gdb doesn't drain any pending SIGINTs it may have sent to the inferior
# when it simply detaches. We can do this by letting the inferior continue,
# and gdb will intercept any SIGINT that's still to-be-delivered; as soon as
# we do so however, we may lose control of gdb (if we're running in
# synchronous mode). So we queue an interruption and continue gdb right
# afterwards, it will waitpid() for its inferior and collect all signals
# that may have been queued.
pid = gdb.selected_inferior().pid
self.Interrupt([pid, None, None])
self.Continue([pid, None, None])
result = gdb.execute('detach', to_string=True)
if not result:
return None
return result
|
python
|
def Detach(self):
"""Detaches from the inferior. If not attached, this is a no-op."""
# We have to work around the python APIs weirdness :\
if not self.IsAttached():
return None
# Gdb doesn't drain any pending SIGINTs it may have sent to the inferior
# when it simply detaches. We can do this by letting the inferior continue,
# and gdb will intercept any SIGINT that's still to-be-delivered; as soon as
# we do so however, we may lose control of gdb (if we're running in
# synchronous mode). So we queue an interruption and continue gdb right
# afterwards, it will waitpid() for its inferior and collect all signals
# that may have been queued.
pid = gdb.selected_inferior().pid
self.Interrupt([pid, None, None])
self.Continue([pid, None, None])
result = gdb.execute('detach', to_string=True)
if not result:
return None
return result
|
[
"def",
"Detach",
"(",
"self",
")",
":",
"# We have to work around the python APIs weirdness :\\",
"if",
"not",
"self",
".",
"IsAttached",
"(",
")",
":",
"return",
"None",
"# Gdb doesn't drain any pending SIGINTs it may have sent to the inferior",
"# when it simply detaches. We can do this by letting the inferior continue,",
"# and gdb will intercept any SIGINT that's still to-be-delivered; as soon as",
"# we do so however, we may lose control of gdb (if we're running in",
"# synchronous mode). So we queue an interruption and continue gdb right",
"# afterwards, it will waitpid() for its inferior and collect all signals",
"# that may have been queued.",
"pid",
"=",
"gdb",
".",
"selected_inferior",
"(",
")",
".",
"pid",
"self",
".",
"Interrupt",
"(",
"[",
"pid",
",",
"None",
",",
"None",
"]",
")",
"self",
".",
"Continue",
"(",
"[",
"pid",
",",
"None",
",",
"None",
"]",
")",
"result",
"=",
"gdb",
".",
"execute",
"(",
"'detach'",
",",
"to_string",
"=",
"True",
")",
"if",
"not",
"result",
":",
"return",
"None",
"return",
"result"
] |
Detaches from the inferior. If not attached, this is a no-op.
|
[
"Detaches",
"from",
"the",
"inferior",
".",
"If",
"not",
"attached",
"this",
"is",
"a",
"no",
"-",
"op",
"."
] |
76dff5d1ac29cd5e7bf32677654a83291a15ad8a
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/gdb_service.py#L449-L467
|
10,679
|
google/pyringe
|
pyringe/payload/gdb_service.py
|
GdbService.Call
|
def Call(self, position, function_call):
"""Perform a function call in the inferior.
WARNING: Since Gdb's concept of threads can't be directly identified with
python threads, the function call will be made from what has to be assumed
is an arbitrary thread. This *will* interrupt the inferior. Continuing it
after the call is the responsibility of the caller.
Args:
position: the context of the inferior to call the function from.
function_call: A string corresponding to a function call. Format:
'foo(0,0)'
Returns:
Thre return value of the called function.
"""
self.EnsureGdbPosition(position[0], None, None)
if not gdb.selected_thread().is_stopped():
self.Interrupt(position)
result_value = gdb.parse_and_eval(function_call)
return self._UnpackGdbVal(result_value)
|
python
|
def Call(self, position, function_call):
"""Perform a function call in the inferior.
WARNING: Since Gdb's concept of threads can't be directly identified with
python threads, the function call will be made from what has to be assumed
is an arbitrary thread. This *will* interrupt the inferior. Continuing it
after the call is the responsibility of the caller.
Args:
position: the context of the inferior to call the function from.
function_call: A string corresponding to a function call. Format:
'foo(0,0)'
Returns:
Thre return value of the called function.
"""
self.EnsureGdbPosition(position[0], None, None)
if not gdb.selected_thread().is_stopped():
self.Interrupt(position)
result_value = gdb.parse_and_eval(function_call)
return self._UnpackGdbVal(result_value)
|
[
"def",
"Call",
"(",
"self",
",",
"position",
",",
"function_call",
")",
":",
"self",
".",
"EnsureGdbPosition",
"(",
"position",
"[",
"0",
"]",
",",
"None",
",",
"None",
")",
"if",
"not",
"gdb",
".",
"selected_thread",
"(",
")",
".",
"is_stopped",
"(",
")",
":",
"self",
".",
"Interrupt",
"(",
"position",
")",
"result_value",
"=",
"gdb",
".",
"parse_and_eval",
"(",
"function_call",
")",
"return",
"self",
".",
"_UnpackGdbVal",
"(",
"result_value",
")"
] |
Perform a function call in the inferior.
WARNING: Since Gdb's concept of threads can't be directly identified with
python threads, the function call will be made from what has to be assumed
is an arbitrary thread. This *will* interrupt the inferior. Continuing it
after the call is the responsibility of the caller.
Args:
position: the context of the inferior to call the function from.
function_call: A string corresponding to a function call. Format:
'foo(0,0)'
Returns:
Thre return value of the called function.
|
[
"Perform",
"a",
"function",
"call",
"in",
"the",
"inferior",
"."
] |
76dff5d1ac29cd5e7bf32677654a83291a15ad8a
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/gdb_service.py#L492-L511
|
10,680
|
google/pyringe
|
pyringe/payload/gdb_service.py
|
GdbService.ExecuteRaw
|
def ExecuteRaw(self, position, command):
"""Send a command string to gdb."""
self.EnsureGdbPosition(position[0], None, None)
return gdb.execute(command, to_string=True)
|
python
|
def ExecuteRaw(self, position, command):
"""Send a command string to gdb."""
self.EnsureGdbPosition(position[0], None, None)
return gdb.execute(command, to_string=True)
|
[
"def",
"ExecuteRaw",
"(",
"self",
",",
"position",
",",
"command",
")",
":",
"self",
".",
"EnsureGdbPosition",
"(",
"position",
"[",
"0",
"]",
",",
"None",
",",
"None",
")",
"return",
"gdb",
".",
"execute",
"(",
"command",
",",
"to_string",
"=",
"True",
")"
] |
Send a command string to gdb.
|
[
"Send",
"a",
"command",
"string",
"to",
"gdb",
"."
] |
76dff5d1ac29cd5e7bf32677654a83291a15ad8a
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/gdb_service.py#L513-L516
|
10,681
|
google/pyringe
|
pyringe/payload/gdb_service.py
|
GdbService._GetGdbThreadMapping
|
def _GetGdbThreadMapping(self, position):
"""Gets a mapping from python tid to gdb thread num.
There's no way to get the thread ident from a gdb thread. We only get the
"ID of the thread, as assigned by GDB", which is completely useless for
everything except talking to gdb. So in order to translate between these
two, we have to execute 'info threads' and parse its output. Note that this
may only work on linux, and only when python was compiled to use pthreads.
It may work elsewhere, but we won't guarantee it.
Args:
position: array of pid, tid, framedepth specifying the requested position.
Returns:
A dictionary of the form {python_tid: gdb_threadnum}.
"""
if len(gdb.selected_inferior().threads()) == 1:
# gdb's output for info threads changes and only displays PID. We cheat.
return {position[1]: 1}
# example:
# 8 Thread 0x7f0a637fe700 (LWP 11894) "test.py" 0x00007f0a69563e63 in
# select () from /usr/lib64/libc.so.6
thread_line_regexp = r'\s*\**\s*([0-9]+)\s+[a-zA-Z]+\s+([x0-9a-fA-F]+)\s.*'
output = gdb.execute('info threads', to_string=True)
matches = [re.match(thread_line_regexp, line) for line
in output.split('\n')[1:]]
return {int(match.group(2), 16): int(match.group(1))
for match in matches if match}
|
python
|
def _GetGdbThreadMapping(self, position):
"""Gets a mapping from python tid to gdb thread num.
There's no way to get the thread ident from a gdb thread. We only get the
"ID of the thread, as assigned by GDB", which is completely useless for
everything except talking to gdb. So in order to translate between these
two, we have to execute 'info threads' and parse its output. Note that this
may only work on linux, and only when python was compiled to use pthreads.
It may work elsewhere, but we won't guarantee it.
Args:
position: array of pid, tid, framedepth specifying the requested position.
Returns:
A dictionary of the form {python_tid: gdb_threadnum}.
"""
if len(gdb.selected_inferior().threads()) == 1:
# gdb's output for info threads changes and only displays PID. We cheat.
return {position[1]: 1}
# example:
# 8 Thread 0x7f0a637fe700 (LWP 11894) "test.py" 0x00007f0a69563e63 in
# select () from /usr/lib64/libc.so.6
thread_line_regexp = r'\s*\**\s*([0-9]+)\s+[a-zA-Z]+\s+([x0-9a-fA-F]+)\s.*'
output = gdb.execute('info threads', to_string=True)
matches = [re.match(thread_line_regexp, line) for line
in output.split('\n')[1:]]
return {int(match.group(2), 16): int(match.group(1))
for match in matches if match}
|
[
"def",
"_GetGdbThreadMapping",
"(",
"self",
",",
"position",
")",
":",
"if",
"len",
"(",
"gdb",
".",
"selected_inferior",
"(",
")",
".",
"threads",
"(",
")",
")",
"==",
"1",
":",
"# gdb's output for info threads changes and only displays PID. We cheat.",
"return",
"{",
"position",
"[",
"1",
"]",
":",
"1",
"}",
"# example:",
"# 8 Thread 0x7f0a637fe700 (LWP 11894) \"test.py\" 0x00007f0a69563e63 in",
"# select () from /usr/lib64/libc.so.6",
"thread_line_regexp",
"=",
"r'\\s*\\**\\s*([0-9]+)\\s+[a-zA-Z]+\\s+([x0-9a-fA-F]+)\\s.*'",
"output",
"=",
"gdb",
".",
"execute",
"(",
"'info threads'",
",",
"to_string",
"=",
"True",
")",
"matches",
"=",
"[",
"re",
".",
"match",
"(",
"thread_line_regexp",
",",
"line",
")",
"for",
"line",
"in",
"output",
".",
"split",
"(",
"'\\n'",
")",
"[",
"1",
":",
"]",
"]",
"return",
"{",
"int",
"(",
"match",
".",
"group",
"(",
"2",
")",
",",
"16",
")",
":",
"int",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
"for",
"match",
"in",
"matches",
"if",
"match",
"}"
] |
Gets a mapping from python tid to gdb thread num.
There's no way to get the thread ident from a gdb thread. We only get the
"ID of the thread, as assigned by GDB", which is completely useless for
everything except talking to gdb. So in order to translate between these
two, we have to execute 'info threads' and parse its output. Note that this
may only work on linux, and only when python was compiled to use pthreads.
It may work elsewhere, but we won't guarantee it.
Args:
position: array of pid, tid, framedepth specifying the requested position.
Returns:
A dictionary of the form {python_tid: gdb_threadnum}.
|
[
"Gets",
"a",
"mapping",
"from",
"python",
"tid",
"to",
"gdb",
"thread",
"num",
"."
] |
76dff5d1ac29cd5e7bf32677654a83291a15ad8a
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/gdb_service.py#L518-L545
|
10,682
|
google/pyringe
|
pyringe/payload/gdb_service.py
|
GdbService._Inject
|
def _Inject(self, position, call):
"""Injects evaluation of 'call' in a safe location in the inferior.
Due to the way these injected function calls work, gdb must not be killed
until the call has returned. If that happens, the inferior will be sent
SIGTRAP upon attempting to return from the dummy frame gdb constructs for
us, and will most probably crash.
Args:
position: array of pid, tid, framedepth specifying the requested position.
call: Any expression gdb can evaluate. Usually a function call.
Raises:
RuntimeError: if gdb is not being run in synchronous exec mode.
"""
self.EnsureGdbPosition(position[0], position[1], None)
self.ClearBreakpoints()
self._AddThreadSpecificBreakpoint(position)
gdb.parse_and_eval('%s = 1' % GdbCache.PENDINGCALLS_TO_DO)
gdb.parse_and_eval('%s = 1' % GdbCache.PENDINGBUSY)
try:
# We're "armed", risk the blocking call to Continue
self.Continue(position)
# Breakpoint was hit!
if not gdb.selected_thread().is_stopped():
# This should not happen. Depending on how gdb is being used, the
# semantics of self.Continue change, so I'd rather leave this check in
# here, in case we ever *do* end up changing to async mode.
raise RuntimeError('Gdb is not acting as expected, is it being run in '
'async mode?')
finally:
gdb.parse_and_eval('%s = 0' % GdbCache.PENDINGBUSY)
self.Call(position, call)
|
python
|
def _Inject(self, position, call):
"""Injects evaluation of 'call' in a safe location in the inferior.
Due to the way these injected function calls work, gdb must not be killed
until the call has returned. If that happens, the inferior will be sent
SIGTRAP upon attempting to return from the dummy frame gdb constructs for
us, and will most probably crash.
Args:
position: array of pid, tid, framedepth specifying the requested position.
call: Any expression gdb can evaluate. Usually a function call.
Raises:
RuntimeError: if gdb is not being run in synchronous exec mode.
"""
self.EnsureGdbPosition(position[0], position[1], None)
self.ClearBreakpoints()
self._AddThreadSpecificBreakpoint(position)
gdb.parse_and_eval('%s = 1' % GdbCache.PENDINGCALLS_TO_DO)
gdb.parse_and_eval('%s = 1' % GdbCache.PENDINGBUSY)
try:
# We're "armed", risk the blocking call to Continue
self.Continue(position)
# Breakpoint was hit!
if not gdb.selected_thread().is_stopped():
# This should not happen. Depending on how gdb is being used, the
# semantics of self.Continue change, so I'd rather leave this check in
# here, in case we ever *do* end up changing to async mode.
raise RuntimeError('Gdb is not acting as expected, is it being run in '
'async mode?')
finally:
gdb.parse_and_eval('%s = 0' % GdbCache.PENDINGBUSY)
self.Call(position, call)
|
[
"def",
"_Inject",
"(",
"self",
",",
"position",
",",
"call",
")",
":",
"self",
".",
"EnsureGdbPosition",
"(",
"position",
"[",
"0",
"]",
",",
"position",
"[",
"1",
"]",
",",
"None",
")",
"self",
".",
"ClearBreakpoints",
"(",
")",
"self",
".",
"_AddThreadSpecificBreakpoint",
"(",
"position",
")",
"gdb",
".",
"parse_and_eval",
"(",
"'%s = 1'",
"%",
"GdbCache",
".",
"PENDINGCALLS_TO_DO",
")",
"gdb",
".",
"parse_and_eval",
"(",
"'%s = 1'",
"%",
"GdbCache",
".",
"PENDINGBUSY",
")",
"try",
":",
"# We're \"armed\", risk the blocking call to Continue",
"self",
".",
"Continue",
"(",
"position",
")",
"# Breakpoint was hit!",
"if",
"not",
"gdb",
".",
"selected_thread",
"(",
")",
".",
"is_stopped",
"(",
")",
":",
"# This should not happen. Depending on how gdb is being used, the",
"# semantics of self.Continue change, so I'd rather leave this check in",
"# here, in case we ever *do* end up changing to async mode.",
"raise",
"RuntimeError",
"(",
"'Gdb is not acting as expected, is it being run in '",
"'async mode?'",
")",
"finally",
":",
"gdb",
".",
"parse_and_eval",
"(",
"'%s = 0'",
"%",
"GdbCache",
".",
"PENDINGBUSY",
")",
"self",
".",
"Call",
"(",
"position",
",",
"call",
")"
] |
Injects evaluation of 'call' in a safe location in the inferior.
Due to the way these injected function calls work, gdb must not be killed
until the call has returned. If that happens, the inferior will be sent
SIGTRAP upon attempting to return from the dummy frame gdb constructs for
us, and will most probably crash.
Args:
position: array of pid, tid, framedepth specifying the requested position.
call: Any expression gdb can evaluate. Usually a function call.
Raises:
RuntimeError: if gdb is not being run in synchronous exec mode.
|
[
"Injects",
"evaluation",
"of",
"call",
"in",
"a",
"safe",
"location",
"in",
"the",
"inferior",
"."
] |
76dff5d1ac29cd5e7bf32677654a83291a15ad8a
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/gdb_service.py#L557-L587
|
10,683
|
google/pyringe
|
pyringe/payload/gdb_service.py
|
GdbService._BacktraceFromFramePtr
|
def _BacktraceFromFramePtr(self, frame_ptr):
"""Assembles and returns what looks exactly like python's backtraces."""
# expects frame_ptr to be a gdb.Value
frame_objs = [PyFrameObjectPtr(frame) for frame
in self._IterateChainedList(frame_ptr, 'f_back')]
# We want to output tracebacks in the same format python uses, so we have to
# reverse the stack
frame_objs.reverse()
tb_strings = ['Traceback (most recent call last):']
for frame in frame_objs:
line_string = (' File "%s", line %s, in %s' %
(frame.filename(),
str(frame.current_line_num()),
frame.co_name.proxyval(set())))
tb_strings.append(line_string)
line_string = ' %s' % frame.current_line().strip()
tb_strings.append(line_string)
return '\n'.join(tb_strings)
|
python
|
def _BacktraceFromFramePtr(self, frame_ptr):
"""Assembles and returns what looks exactly like python's backtraces."""
# expects frame_ptr to be a gdb.Value
frame_objs = [PyFrameObjectPtr(frame) for frame
in self._IterateChainedList(frame_ptr, 'f_back')]
# We want to output tracebacks in the same format python uses, so we have to
# reverse the stack
frame_objs.reverse()
tb_strings = ['Traceback (most recent call last):']
for frame in frame_objs:
line_string = (' File "%s", line %s, in %s' %
(frame.filename(),
str(frame.current_line_num()),
frame.co_name.proxyval(set())))
tb_strings.append(line_string)
line_string = ' %s' % frame.current_line().strip()
tb_strings.append(line_string)
return '\n'.join(tb_strings)
|
[
"def",
"_BacktraceFromFramePtr",
"(",
"self",
",",
"frame_ptr",
")",
":",
"# expects frame_ptr to be a gdb.Value",
"frame_objs",
"=",
"[",
"PyFrameObjectPtr",
"(",
"frame",
")",
"for",
"frame",
"in",
"self",
".",
"_IterateChainedList",
"(",
"frame_ptr",
",",
"'f_back'",
")",
"]",
"# We want to output tracebacks in the same format python uses, so we have to",
"# reverse the stack",
"frame_objs",
".",
"reverse",
"(",
")",
"tb_strings",
"=",
"[",
"'Traceback (most recent call last):'",
"]",
"for",
"frame",
"in",
"frame_objs",
":",
"line_string",
"=",
"(",
"' File \"%s\", line %s, in %s'",
"%",
"(",
"frame",
".",
"filename",
"(",
")",
",",
"str",
"(",
"frame",
".",
"current_line_num",
"(",
")",
")",
",",
"frame",
".",
"co_name",
".",
"proxyval",
"(",
"set",
"(",
")",
")",
")",
")",
"tb_strings",
".",
"append",
"(",
"line_string",
")",
"line_string",
"=",
"' %s'",
"%",
"frame",
".",
"current_line",
"(",
")",
".",
"strip",
"(",
")",
"tb_strings",
".",
"append",
"(",
"line_string",
")",
"return",
"'\\n'",
".",
"join",
"(",
"tb_strings",
")"
] |
Assembles and returns what looks exactly like python's backtraces.
|
[
"Assembles",
"and",
"returns",
"what",
"looks",
"exactly",
"like",
"python",
"s",
"backtraces",
"."
] |
76dff5d1ac29cd5e7bf32677654a83291a15ad8a
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/gdb_service.py#L597-L615
|
10,684
|
google/pyringe
|
pyringe/inferior.py
|
GdbProxy.Kill
|
def Kill(self):
"""Send death pill to Gdb and forcefully kill it if that doesn't work."""
try:
if self.is_running:
self.Detach()
if self._Execute('__kill__') == '__kill_ack__':
# acknowledged, let's give it some time to die in peace
time.sleep(0.1)
except (TimeoutError, ProxyError):
logging.debug('Termination request not acknowledged, killing gdb.')
if self.is_running:
# death pill didn't seem to work. We don't want the inferior to get killed
# the next time it hits a dangling breakpoint, so we send a SIGINT to gdb,
# which makes it disable instruction breakpoints for the time being.
os.kill(self._process.pid, signal.SIGINT)
# Since SIGINT has higher priority (with signal number 2) than SIGTERM
# (signal 15), SIGTERM cannot preempt the signal handler for SIGINT.
self._process.terminate()
self._process.wait()
self._errfile_r.close()
self._outfile_r.close()
|
python
|
def Kill(self):
"""Send death pill to Gdb and forcefully kill it if that doesn't work."""
try:
if self.is_running:
self.Detach()
if self._Execute('__kill__') == '__kill_ack__':
# acknowledged, let's give it some time to die in peace
time.sleep(0.1)
except (TimeoutError, ProxyError):
logging.debug('Termination request not acknowledged, killing gdb.')
if self.is_running:
# death pill didn't seem to work. We don't want the inferior to get killed
# the next time it hits a dangling breakpoint, so we send a SIGINT to gdb,
# which makes it disable instruction breakpoints for the time being.
os.kill(self._process.pid, signal.SIGINT)
# Since SIGINT has higher priority (with signal number 2) than SIGTERM
# (signal 15), SIGTERM cannot preempt the signal handler for SIGINT.
self._process.terminate()
self._process.wait()
self._errfile_r.close()
self._outfile_r.close()
|
[
"def",
"Kill",
"(",
"self",
")",
":",
"try",
":",
"if",
"self",
".",
"is_running",
":",
"self",
".",
"Detach",
"(",
")",
"if",
"self",
".",
"_Execute",
"(",
"'__kill__'",
")",
"==",
"'__kill_ack__'",
":",
"# acknowledged, let's give it some time to die in peace",
"time",
".",
"sleep",
"(",
"0.1",
")",
"except",
"(",
"TimeoutError",
",",
"ProxyError",
")",
":",
"logging",
".",
"debug",
"(",
"'Termination request not acknowledged, killing gdb.'",
")",
"if",
"self",
".",
"is_running",
":",
"# death pill didn't seem to work. We don't want the inferior to get killed",
"# the next time it hits a dangling breakpoint, so we send a SIGINT to gdb,",
"# which makes it disable instruction breakpoints for the time being.",
"os",
".",
"kill",
"(",
"self",
".",
"_process",
".",
"pid",
",",
"signal",
".",
"SIGINT",
")",
"# Since SIGINT has higher priority (with signal number 2) than SIGTERM",
"# (signal 15), SIGTERM cannot preempt the signal handler for SIGINT.",
"self",
".",
"_process",
".",
"terminate",
"(",
")",
"self",
".",
"_process",
".",
"wait",
"(",
")",
"self",
".",
"_errfile_r",
".",
"close",
"(",
")",
"self",
".",
"_outfile_r",
".",
"close",
"(",
")"
] |
Send death pill to Gdb and forcefully kill it if that doesn't work.
|
[
"Send",
"death",
"pill",
"to",
"Gdb",
"and",
"forcefully",
"kill",
"it",
"if",
"that",
"doesn",
"t",
"work",
"."
] |
76dff5d1ac29cd5e7bf32677654a83291a15ad8a
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/inferior.py#L202-L222
|
10,685
|
google/pyringe
|
pyringe/inferior.py
|
GdbProxy.Version
|
def Version():
"""Gets the version of gdb as a 3-tuple.
The gdb devs seem to think it's a good idea to make --version
output multiple lines of welcome text instead of just the actual version,
so we ignore everything it outputs after the first line.
Returns:
The installed version of gdb in the form
(<major>, <minor or None>, <micro or None>)
gdb 7.7 would hence show up as version (7,7)
"""
output = subprocess.check_output(['gdb', '--version']).split('\n')[0]
# Example output (Arch linux):
# GNU gdb (GDB) 7.7
# Example output (Debian sid):
# GNU gdb (GDB) 7.6.2 (Debian 7.6.2-1)
# Example output (Debian wheezy):
# GNU gdb (GDB) 7.4.1-debian
# Example output (centos 2.6.32):
# GNU gdb (GDB) Red Hat Enterprise Linux (7.2-56.el6)
# As we've seen in the examples above, versions may be named very liberally
# So we assume every part of that string may be the "real" version string
# and try to parse them all. This too isn't perfect (later strings will
# overwrite information gathered from previous ones), but it should be
# flexible enough for everything out there.
major = None
minor = None
micro = None
for potential_versionstring in output.split():
version = re.split('[^0-9]', potential_versionstring)
try:
major = int(version[0])
except (IndexError, ValueError):
pass
try:
minor = int(version[1])
except (IndexError, ValueError):
pass
try:
micro = int(version[2])
except (IndexError, ValueError):
pass
return (major, minor, micro)
|
python
|
def Version():
"""Gets the version of gdb as a 3-tuple.
The gdb devs seem to think it's a good idea to make --version
output multiple lines of welcome text instead of just the actual version,
so we ignore everything it outputs after the first line.
Returns:
The installed version of gdb in the form
(<major>, <minor or None>, <micro or None>)
gdb 7.7 would hence show up as version (7,7)
"""
output = subprocess.check_output(['gdb', '--version']).split('\n')[0]
# Example output (Arch linux):
# GNU gdb (GDB) 7.7
# Example output (Debian sid):
# GNU gdb (GDB) 7.6.2 (Debian 7.6.2-1)
# Example output (Debian wheezy):
# GNU gdb (GDB) 7.4.1-debian
# Example output (centos 2.6.32):
# GNU gdb (GDB) Red Hat Enterprise Linux (7.2-56.el6)
# As we've seen in the examples above, versions may be named very liberally
# So we assume every part of that string may be the "real" version string
# and try to parse them all. This too isn't perfect (later strings will
# overwrite information gathered from previous ones), but it should be
# flexible enough for everything out there.
major = None
minor = None
micro = None
for potential_versionstring in output.split():
version = re.split('[^0-9]', potential_versionstring)
try:
major = int(version[0])
except (IndexError, ValueError):
pass
try:
minor = int(version[1])
except (IndexError, ValueError):
pass
try:
micro = int(version[2])
except (IndexError, ValueError):
pass
return (major, minor, micro)
|
[
"def",
"Version",
"(",
")",
":",
"output",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"'gdb'",
",",
"'--version'",
"]",
")",
".",
"split",
"(",
"'\\n'",
")",
"[",
"0",
"]",
"# Example output (Arch linux):",
"# GNU gdb (GDB) 7.7",
"# Example output (Debian sid):",
"# GNU gdb (GDB) 7.6.2 (Debian 7.6.2-1)",
"# Example output (Debian wheezy):",
"# GNU gdb (GDB) 7.4.1-debian",
"# Example output (centos 2.6.32):",
"# GNU gdb (GDB) Red Hat Enterprise Linux (7.2-56.el6)",
"# As we've seen in the examples above, versions may be named very liberally",
"# So we assume every part of that string may be the \"real\" version string",
"# and try to parse them all. This too isn't perfect (later strings will",
"# overwrite information gathered from previous ones), but it should be",
"# flexible enough for everything out there.",
"major",
"=",
"None",
"minor",
"=",
"None",
"micro",
"=",
"None",
"for",
"potential_versionstring",
"in",
"output",
".",
"split",
"(",
")",
":",
"version",
"=",
"re",
".",
"split",
"(",
"'[^0-9]'",
",",
"potential_versionstring",
")",
"try",
":",
"major",
"=",
"int",
"(",
"version",
"[",
"0",
"]",
")",
"except",
"(",
"IndexError",
",",
"ValueError",
")",
":",
"pass",
"try",
":",
"minor",
"=",
"int",
"(",
"version",
"[",
"1",
"]",
")",
"except",
"(",
"IndexError",
",",
"ValueError",
")",
":",
"pass",
"try",
":",
"micro",
"=",
"int",
"(",
"version",
"[",
"2",
"]",
")",
"except",
"(",
"IndexError",
",",
"ValueError",
")",
":",
"pass",
"return",
"(",
"major",
",",
"minor",
",",
"micro",
")"
] |
Gets the version of gdb as a 3-tuple.
The gdb devs seem to think it's a good idea to make --version
output multiple lines of welcome text instead of just the actual version,
so we ignore everything it outputs after the first line.
Returns:
The installed version of gdb in the form
(<major>, <minor or None>, <micro or None>)
gdb 7.7 would hence show up as version (7,7)
|
[
"Gets",
"the",
"version",
"of",
"gdb",
"as",
"a",
"3",
"-",
"tuple",
"."
] |
76dff5d1ac29cd5e7bf32677654a83291a15ad8a
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/inferior.py#L229-L272
|
10,686
|
google/pyringe
|
pyringe/inferior.py
|
GdbProxy._JsonDecodeDict
|
def _JsonDecodeDict(self, data):
"""Json object decode hook that automatically converts unicode objects."""
rv = {}
for key, value in data.iteritems():
if isinstance(key, unicode):
key = self._TryStr(key)
if isinstance(value, unicode):
value = self._TryStr(value)
elif isinstance(value, list):
value = self._JsonDecodeList(value)
rv[key] = value
if '__pyringe_type_name__' in data:
# We're looking at a proxyobject
rv = ProxyObject(rv)
return rv
|
python
|
def _JsonDecodeDict(self, data):
"""Json object decode hook that automatically converts unicode objects."""
rv = {}
for key, value in data.iteritems():
if isinstance(key, unicode):
key = self._TryStr(key)
if isinstance(value, unicode):
value = self._TryStr(value)
elif isinstance(value, list):
value = self._JsonDecodeList(value)
rv[key] = value
if '__pyringe_type_name__' in data:
# We're looking at a proxyobject
rv = ProxyObject(rv)
return rv
|
[
"def",
"_JsonDecodeDict",
"(",
"self",
",",
"data",
")",
":",
"rv",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"data",
".",
"iteritems",
"(",
")",
":",
"if",
"isinstance",
"(",
"key",
",",
"unicode",
")",
":",
"key",
"=",
"self",
".",
"_TryStr",
"(",
"key",
")",
"if",
"isinstance",
"(",
"value",
",",
"unicode",
")",
":",
"value",
"=",
"self",
".",
"_TryStr",
"(",
"value",
")",
"elif",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"value",
"=",
"self",
".",
"_JsonDecodeList",
"(",
"value",
")",
"rv",
"[",
"key",
"]",
"=",
"value",
"if",
"'__pyringe_type_name__'",
"in",
"data",
":",
"# We're looking at a proxyobject",
"rv",
"=",
"ProxyObject",
"(",
"rv",
")",
"return",
"rv"
] |
Json object decode hook that automatically converts unicode objects.
|
[
"Json",
"object",
"decode",
"hook",
"that",
"automatically",
"converts",
"unicode",
"objects",
"."
] |
76dff5d1ac29cd5e7bf32677654a83291a15ad8a
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/inferior.py#L305-L319
|
10,687
|
google/pyringe
|
pyringe/inferior.py
|
GdbProxy._Execute
|
def _Execute(self, funcname, *args, **kwargs):
"""Send an RPC request to the gdb-internal python.
Blocks for 3 seconds by default and returns any results.
Args:
funcname: the name of the function to call.
*args: the function's arguments.
**kwargs: Only the key 'wait_for_completion' is inspected, which decides
whether to wait forever for completion or just 3 seconds.
Returns:
The result of the function call.
"""
wait_for_completion = kwargs.get('wait_for_completion', False)
rpc_dict = {'func': funcname, 'args': args}
self._Send(json.dumps(rpc_dict))
timeout = TIMEOUT_FOREVER if wait_for_completion else TIMEOUT_DEFAULT
result_string = self._Recv(timeout)
try:
result = json.loads(result_string, object_hook=self._JsonDecodeDict)
if isinstance(result, unicode):
result = self._TryStr(result)
elif isinstance(result, list):
result = self._JsonDecodeList(result)
except ValueError:
raise ValueError('Response JSON invalid: ' + str(result_string))
except TypeError:
raise ValueError('Response JSON invalid: ' + str(result_string))
return result
|
python
|
def _Execute(self, funcname, *args, **kwargs):
"""Send an RPC request to the gdb-internal python.
Blocks for 3 seconds by default and returns any results.
Args:
funcname: the name of the function to call.
*args: the function's arguments.
**kwargs: Only the key 'wait_for_completion' is inspected, which decides
whether to wait forever for completion or just 3 seconds.
Returns:
The result of the function call.
"""
wait_for_completion = kwargs.get('wait_for_completion', False)
rpc_dict = {'func': funcname, 'args': args}
self._Send(json.dumps(rpc_dict))
timeout = TIMEOUT_FOREVER if wait_for_completion else TIMEOUT_DEFAULT
result_string = self._Recv(timeout)
try:
result = json.loads(result_string, object_hook=self._JsonDecodeDict)
if isinstance(result, unicode):
result = self._TryStr(result)
elif isinstance(result, list):
result = self._JsonDecodeList(result)
except ValueError:
raise ValueError('Response JSON invalid: ' + str(result_string))
except TypeError:
raise ValueError('Response JSON invalid: ' + str(result_string))
return result
|
[
"def",
"_Execute",
"(",
"self",
",",
"funcname",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"wait_for_completion",
"=",
"kwargs",
".",
"get",
"(",
"'wait_for_completion'",
",",
"False",
")",
"rpc_dict",
"=",
"{",
"'func'",
":",
"funcname",
",",
"'args'",
":",
"args",
"}",
"self",
".",
"_Send",
"(",
"json",
".",
"dumps",
"(",
"rpc_dict",
")",
")",
"timeout",
"=",
"TIMEOUT_FOREVER",
"if",
"wait_for_completion",
"else",
"TIMEOUT_DEFAULT",
"result_string",
"=",
"self",
".",
"_Recv",
"(",
"timeout",
")",
"try",
":",
"result",
"=",
"json",
".",
"loads",
"(",
"result_string",
",",
"object_hook",
"=",
"self",
".",
"_JsonDecodeDict",
")",
"if",
"isinstance",
"(",
"result",
",",
"unicode",
")",
":",
"result",
"=",
"self",
".",
"_TryStr",
"(",
"result",
")",
"elif",
"isinstance",
"(",
"result",
",",
"list",
")",
":",
"result",
"=",
"self",
".",
"_JsonDecodeList",
"(",
"result",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"'Response JSON invalid: '",
"+",
"str",
"(",
"result_string",
")",
")",
"except",
"TypeError",
":",
"raise",
"ValueError",
"(",
"'Response JSON invalid: '",
"+",
"str",
"(",
"result_string",
")",
")",
"return",
"result"
] |
Send an RPC request to the gdb-internal python.
Blocks for 3 seconds by default and returns any results.
Args:
funcname: the name of the function to call.
*args: the function's arguments.
**kwargs: Only the key 'wait_for_completion' is inspected, which decides
whether to wait forever for completion or just 3 seconds.
Returns:
The result of the function call.
|
[
"Send",
"an",
"RPC",
"request",
"to",
"the",
"gdb",
"-",
"internal",
"python",
"."
] |
76dff5d1ac29cd5e7bf32677654a83291a15ad8a
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/inferior.py#L325-L355
|
10,688
|
google/pyringe
|
pyringe/inferior.py
|
GdbProxy._Recv
|
def _Recv(self, timeout):
"""Receive output from gdb.
This reads gdb's stdout and stderr streams, returns a single line of gdb's
stdout or rethrows any exceptions thrown from within gdb as well as it can.
Args:
timeout: floating point number of seconds after which to abort.
A value of None or TIMEOUT_FOREVER means "there is no timeout", i.e.
this might block forever.
Raises:
ProxyError: All exceptions received from the gdb service are generically
reraised as this.
TimeoutError: Raised if no answer is received from gdb in after the
specified time.
Returns:
The current contents of gdb's stdout buffer, read until the next newline,
or `None`, should the read fail or timeout.
"""
buf = ''
# The messiness of this stems from the "duck-typiness" of this function.
# The timeout parameter of poll has different semantics depending on whether
# it's <=0, >0, or None. Yay.
wait_for_line = timeout is TIMEOUT_FOREVER
deadline = time.time() + (timeout if not wait_for_line else 0)
def TimeLeft():
return max(1000 * (deadline - time.time()), 0)
continue_reading = True
while continue_reading:
poll_timeout = None if wait_for_line else TimeLeft()
fd_list = [event[0] for event in self._poller.poll(poll_timeout)
if event[1] & (select.POLLIN | select.POLLPRI)]
if not wait_for_line and TimeLeft() == 0:
continue_reading = False
if self._outfile_r.fileno() in fd_list:
buf += self._outfile_r.readline()
if buf.endswith('\n'):
return buf
# GDB-internal exception passing
if self._errfile_r.fileno() in fd_list:
exc = self._errfile_r.readline()
if exc:
exc_text = '\n-----------------------------------\n'
exc_text += 'Error occurred within GdbService:\n'
try:
exc_text += json.loads(exc)
except ValueError:
# whatever we got back wasn't valid JSON.
# This usually means we've run into an exception before the special
# exception handling was turned on. The first line we read up there
# will have been "Traceback (most recent call last):". Obviously, we
# want the rest, too, so we wait a bit and read it.
deadline = time.time() + 0.5
while self.is_running and TimeLeft() > 0:
exc += self._errfile_r.read()
try:
exc_text += json.loads(exc)
except ValueError:
exc_text = exc
raise ProxyError(exc_text)
# timeout
raise TimeoutError()
|
python
|
def _Recv(self, timeout):
"""Receive output from gdb.
This reads gdb's stdout and stderr streams, returns a single line of gdb's
stdout or rethrows any exceptions thrown from within gdb as well as it can.
Args:
timeout: floating point number of seconds after which to abort.
A value of None or TIMEOUT_FOREVER means "there is no timeout", i.e.
this might block forever.
Raises:
ProxyError: All exceptions received from the gdb service are generically
reraised as this.
TimeoutError: Raised if no answer is received from gdb in after the
specified time.
Returns:
The current contents of gdb's stdout buffer, read until the next newline,
or `None`, should the read fail or timeout.
"""
buf = ''
# The messiness of this stems from the "duck-typiness" of this function.
# The timeout parameter of poll has different semantics depending on whether
# it's <=0, >0, or None. Yay.
wait_for_line = timeout is TIMEOUT_FOREVER
deadline = time.time() + (timeout if not wait_for_line else 0)
def TimeLeft():
return max(1000 * (deadline - time.time()), 0)
continue_reading = True
while continue_reading:
poll_timeout = None if wait_for_line else TimeLeft()
fd_list = [event[0] for event in self._poller.poll(poll_timeout)
if event[1] & (select.POLLIN | select.POLLPRI)]
if not wait_for_line and TimeLeft() == 0:
continue_reading = False
if self._outfile_r.fileno() in fd_list:
buf += self._outfile_r.readline()
if buf.endswith('\n'):
return buf
# GDB-internal exception passing
if self._errfile_r.fileno() in fd_list:
exc = self._errfile_r.readline()
if exc:
exc_text = '\n-----------------------------------\n'
exc_text += 'Error occurred within GdbService:\n'
try:
exc_text += json.loads(exc)
except ValueError:
# whatever we got back wasn't valid JSON.
# This usually means we've run into an exception before the special
# exception handling was turned on. The first line we read up there
# will have been "Traceback (most recent call last):". Obviously, we
# want the rest, too, so we wait a bit and read it.
deadline = time.time() + 0.5
while self.is_running and TimeLeft() > 0:
exc += self._errfile_r.read()
try:
exc_text += json.loads(exc)
except ValueError:
exc_text = exc
raise ProxyError(exc_text)
# timeout
raise TimeoutError()
|
[
"def",
"_Recv",
"(",
"self",
",",
"timeout",
")",
":",
"buf",
"=",
"''",
"# The messiness of this stems from the \"duck-typiness\" of this function.",
"# The timeout parameter of poll has different semantics depending on whether",
"# it's <=0, >0, or None. Yay.",
"wait_for_line",
"=",
"timeout",
"is",
"TIMEOUT_FOREVER",
"deadline",
"=",
"time",
".",
"time",
"(",
")",
"+",
"(",
"timeout",
"if",
"not",
"wait_for_line",
"else",
"0",
")",
"def",
"TimeLeft",
"(",
")",
":",
"return",
"max",
"(",
"1000",
"*",
"(",
"deadline",
"-",
"time",
".",
"time",
"(",
")",
")",
",",
"0",
")",
"continue_reading",
"=",
"True",
"while",
"continue_reading",
":",
"poll_timeout",
"=",
"None",
"if",
"wait_for_line",
"else",
"TimeLeft",
"(",
")",
"fd_list",
"=",
"[",
"event",
"[",
"0",
"]",
"for",
"event",
"in",
"self",
".",
"_poller",
".",
"poll",
"(",
"poll_timeout",
")",
"if",
"event",
"[",
"1",
"]",
"&",
"(",
"select",
".",
"POLLIN",
"|",
"select",
".",
"POLLPRI",
")",
"]",
"if",
"not",
"wait_for_line",
"and",
"TimeLeft",
"(",
")",
"==",
"0",
":",
"continue_reading",
"=",
"False",
"if",
"self",
".",
"_outfile_r",
".",
"fileno",
"(",
")",
"in",
"fd_list",
":",
"buf",
"+=",
"self",
".",
"_outfile_r",
".",
"readline",
"(",
")",
"if",
"buf",
".",
"endswith",
"(",
"'\\n'",
")",
":",
"return",
"buf",
"# GDB-internal exception passing",
"if",
"self",
".",
"_errfile_r",
".",
"fileno",
"(",
")",
"in",
"fd_list",
":",
"exc",
"=",
"self",
".",
"_errfile_r",
".",
"readline",
"(",
")",
"if",
"exc",
":",
"exc_text",
"=",
"'\\n-----------------------------------\\n'",
"exc_text",
"+=",
"'Error occurred within GdbService:\\n'",
"try",
":",
"exc_text",
"+=",
"json",
".",
"loads",
"(",
"exc",
")",
"except",
"ValueError",
":",
"# whatever we got back wasn't valid JSON.",
"# This usually means we've run into an exception before the special",
"# exception handling was turned on. The first line we read up there",
"# will have been \"Traceback (most recent call last):\". Obviously, we",
"# want the rest, too, so we wait a bit and read it.",
"deadline",
"=",
"time",
".",
"time",
"(",
")",
"+",
"0.5",
"while",
"self",
".",
"is_running",
"and",
"TimeLeft",
"(",
")",
">",
"0",
":",
"exc",
"+=",
"self",
".",
"_errfile_r",
".",
"read",
"(",
")",
"try",
":",
"exc_text",
"+=",
"json",
".",
"loads",
"(",
"exc",
")",
"except",
"ValueError",
":",
"exc_text",
"=",
"exc",
"raise",
"ProxyError",
"(",
"exc_text",
")",
"# timeout",
"raise",
"TimeoutError",
"(",
")"
] |
Receive output from gdb.
This reads gdb's stdout and stderr streams, returns a single line of gdb's
stdout or rethrows any exceptions thrown from within gdb as well as it can.
Args:
timeout: floating point number of seconds after which to abort.
A value of None or TIMEOUT_FOREVER means "there is no timeout", i.e.
this might block forever.
Raises:
ProxyError: All exceptions received from the gdb service are generically
reraised as this.
TimeoutError: Raised if no answer is received from gdb in after the
specified time.
Returns:
The current contents of gdb's stdout buffer, read until the next newline,
or `None`, should the read fail or timeout.
|
[
"Receive",
"output",
"from",
"gdb",
"."
] |
76dff5d1ac29cd5e7bf32677654a83291a15ad8a
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/inferior.py#L361-L429
|
10,689
|
google/pyringe
|
pyringe/inferior.py
|
Inferior.needsattached
|
def needsattached(func):
"""Decorator to prevent commands from being used when not attached."""
@functools.wraps(func)
def wrap(self, *args, **kwargs):
if not self.attached:
raise PositionError('Not attached to any process.')
return func(self, *args, **kwargs)
return wrap
|
python
|
def needsattached(func):
"""Decorator to prevent commands from being used when not attached."""
@functools.wraps(func)
def wrap(self, *args, **kwargs):
if not self.attached:
raise PositionError('Not attached to any process.')
return func(self, *args, **kwargs)
return wrap
|
[
"def",
"needsattached",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"wrap",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"attached",
":",
"raise",
"PositionError",
"(",
"'Not attached to any process.'",
")",
"return",
"func",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrap"
] |
Decorator to prevent commands from being used when not attached.
|
[
"Decorator",
"to",
"prevent",
"commands",
"from",
"being",
"used",
"when",
"not",
"attached",
"."
] |
76dff5d1ac29cd5e7bf32677654a83291a15ad8a
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/inferior.py#L458-L466
|
10,690
|
google/pyringe
|
pyringe/inferior.py
|
Inferior.Reinit
|
def Reinit(self, pid, auto_symfile_loading=True):
"""Reinitializes the object with a new pid.
Since all modes might need access to this object at any time, this object
needs to be long-lived. To make this clear in the API, this shorthand is
supplied.
Args:
pid: the pid of the target process
auto_symfile_loading: whether the symbol file should automatically be
loaded by gdb.
"""
self.ShutDownGdb()
self.__init__(pid, auto_symfile_loading, architecture=self.arch)
|
python
|
def Reinit(self, pid, auto_symfile_loading=True):
"""Reinitializes the object with a new pid.
Since all modes might need access to this object at any time, this object
needs to be long-lived. To make this clear in the API, this shorthand is
supplied.
Args:
pid: the pid of the target process
auto_symfile_loading: whether the symbol file should automatically be
loaded by gdb.
"""
self.ShutDownGdb()
self.__init__(pid, auto_symfile_loading, architecture=self.arch)
|
[
"def",
"Reinit",
"(",
"self",
",",
"pid",
",",
"auto_symfile_loading",
"=",
"True",
")",
":",
"self",
".",
"ShutDownGdb",
"(",
")",
"self",
".",
"__init__",
"(",
"pid",
",",
"auto_symfile_loading",
",",
"architecture",
"=",
"self",
".",
"arch",
")"
] |
Reinitializes the object with a new pid.
Since all modes might need access to this object at any time, this object
needs to be long-lived. To make this clear in the API, this shorthand is
supplied.
Args:
pid: the pid of the target process
auto_symfile_loading: whether the symbol file should automatically be
loaded by gdb.
|
[
"Reinitializes",
"the",
"object",
"with",
"a",
"new",
"pid",
"."
] |
76dff5d1ac29cd5e7bf32677654a83291a15ad8a
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/inferior.py#L472-L484
|
10,691
|
google/pyringe
|
pyringe/plugins/inject.py
|
InjectPlugin.InjectString
|
def InjectString(self, codestring, wait_for_completion=True):
"""Try to inject python code into current thread.
Args:
codestring: Python snippet to execute in inferior. (may contain newlines)
wait_for_completion: Block until execution of snippet has completed.
"""
if self.inferior.is_running and self.inferior.gdb.IsAttached():
try:
self.inferior.gdb.InjectString(
self.inferior.position,
codestring,
wait_for_completion=wait_for_completion)
except RuntimeError:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
else:
logging.error('Not attached to any process.')
|
python
|
def InjectString(self, codestring, wait_for_completion=True):
"""Try to inject python code into current thread.
Args:
codestring: Python snippet to execute in inferior. (may contain newlines)
wait_for_completion: Block until execution of snippet has completed.
"""
if self.inferior.is_running and self.inferior.gdb.IsAttached():
try:
self.inferior.gdb.InjectString(
self.inferior.position,
codestring,
wait_for_completion=wait_for_completion)
except RuntimeError:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
else:
logging.error('Not attached to any process.')
|
[
"def",
"InjectString",
"(",
"self",
",",
"codestring",
",",
"wait_for_completion",
"=",
"True",
")",
":",
"if",
"self",
".",
"inferior",
".",
"is_running",
"and",
"self",
".",
"inferior",
".",
"gdb",
".",
"IsAttached",
"(",
")",
":",
"try",
":",
"self",
".",
"inferior",
".",
"gdb",
".",
"InjectString",
"(",
"self",
".",
"inferior",
".",
"position",
",",
"codestring",
",",
"wait_for_completion",
"=",
"wait_for_completion",
")",
"except",
"RuntimeError",
":",
"exc_type",
",",
"exc_value",
",",
"exc_traceback",
"=",
"sys",
".",
"exc_info",
"(",
")",
"traceback",
".",
"print_exception",
"(",
"exc_type",
",",
"exc_value",
",",
"exc_traceback",
")",
"else",
":",
"logging",
".",
"error",
"(",
"'Not attached to any process.'",
")"
] |
Try to inject python code into current thread.
Args:
codestring: Python snippet to execute in inferior. (may contain newlines)
wait_for_completion: Block until execution of snippet has completed.
|
[
"Try",
"to",
"inject",
"python",
"code",
"into",
"current",
"thread",
"."
] |
76dff5d1ac29cd5e7bf32677654a83291a15ad8a
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/plugins/inject.py#L40-L57
|
10,692
|
google/pyringe
|
pyringe/payload/libpython.py
|
PyObjectPtr.field
|
def field(self, name):
'''
Get the gdb.Value for the given field within the PyObject, coping with
some python 2 versus python 3 differences.
Various libpython types are defined using the "PyObject_HEAD" and
"PyObject_VAR_HEAD" macros.
In Python 2, this these are defined so that "ob_type" and (for a var
object) "ob_size" are fields of the type in question.
In Python 3, this is defined as an embedded PyVarObject type thus:
PyVarObject ob_base;
so that the "ob_size" field is located insize the "ob_base" field, and
the "ob_type" is most easily accessed by casting back to a (PyObject*).
'''
if self.is_null():
raise NullPyObjectPtr(self)
if name == 'ob_type':
pyo_ptr = self._gdbval.cast(PyObjectPtr.get_gdb_type())
return pyo_ptr.dereference()[name]
if name == 'ob_size':
try:
# Python 2:
return self._gdbval.dereference()[name]
except RuntimeError:
# Python 3:
return self._gdbval.dereference()['ob_base'][name]
# General case: look it up inside the object:
return self._gdbval.dereference()[name]
|
python
|
def field(self, name):
'''
Get the gdb.Value for the given field within the PyObject, coping with
some python 2 versus python 3 differences.
Various libpython types are defined using the "PyObject_HEAD" and
"PyObject_VAR_HEAD" macros.
In Python 2, this these are defined so that "ob_type" and (for a var
object) "ob_size" are fields of the type in question.
In Python 3, this is defined as an embedded PyVarObject type thus:
PyVarObject ob_base;
so that the "ob_size" field is located insize the "ob_base" field, and
the "ob_type" is most easily accessed by casting back to a (PyObject*).
'''
if self.is_null():
raise NullPyObjectPtr(self)
if name == 'ob_type':
pyo_ptr = self._gdbval.cast(PyObjectPtr.get_gdb_type())
return pyo_ptr.dereference()[name]
if name == 'ob_size':
try:
# Python 2:
return self._gdbval.dereference()[name]
except RuntimeError:
# Python 3:
return self._gdbval.dereference()['ob_base'][name]
# General case: look it up inside the object:
return self._gdbval.dereference()[name]
|
[
"def",
"field",
"(",
"self",
",",
"name",
")",
":",
"if",
"self",
".",
"is_null",
"(",
")",
":",
"raise",
"NullPyObjectPtr",
"(",
"self",
")",
"if",
"name",
"==",
"'ob_type'",
":",
"pyo_ptr",
"=",
"self",
".",
"_gdbval",
".",
"cast",
"(",
"PyObjectPtr",
".",
"get_gdb_type",
"(",
")",
")",
"return",
"pyo_ptr",
".",
"dereference",
"(",
")",
"[",
"name",
"]",
"if",
"name",
"==",
"'ob_size'",
":",
"try",
":",
"# Python 2:",
"return",
"self",
".",
"_gdbval",
".",
"dereference",
"(",
")",
"[",
"name",
"]",
"except",
"RuntimeError",
":",
"# Python 3:",
"return",
"self",
".",
"_gdbval",
".",
"dereference",
"(",
")",
"[",
"'ob_base'",
"]",
"[",
"name",
"]",
"# General case: look it up inside the object:",
"return",
"self",
".",
"_gdbval",
".",
"dereference",
"(",
")",
"[",
"name",
"]"
] |
Get the gdb.Value for the given field within the PyObject, coping with
some python 2 versus python 3 differences.
Various libpython types are defined using the "PyObject_HEAD" and
"PyObject_VAR_HEAD" macros.
In Python 2, this these are defined so that "ob_type" and (for a var
object) "ob_size" are fields of the type in question.
In Python 3, this is defined as an embedded PyVarObject type thus:
PyVarObject ob_base;
so that the "ob_size" field is located insize the "ob_base" field, and
the "ob_type" is most easily accessed by casting back to a (PyObject*).
|
[
"Get",
"the",
"gdb",
".",
"Value",
"for",
"the",
"given",
"field",
"within",
"the",
"PyObject",
"coping",
"with",
"some",
"python",
"2",
"versus",
"python",
"3",
"differences",
"."
] |
76dff5d1ac29cd5e7bf32677654a83291a15ad8a
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/libpython.py#L131-L163
|
10,693
|
google/pyringe
|
pyringe/payload/libpython.py
|
PyObjectPtr.write_repr
|
def write_repr(self, out, visited):
'''
Write a string representation of the value scraped from the inferior
process to "out", a file-like object.
'''
# Default implementation: generate a proxy value and write its repr
# However, this could involve a lot of work for complicated objects,
# so for derived classes we specialize this
return out.write(repr(self.proxyval(visited)))
|
python
|
def write_repr(self, out, visited):
'''
Write a string representation of the value scraped from the inferior
process to "out", a file-like object.
'''
# Default implementation: generate a proxy value and write its repr
# However, this could involve a lot of work for complicated objects,
# so for derived classes we specialize this
return out.write(repr(self.proxyval(visited)))
|
[
"def",
"write_repr",
"(",
"self",
",",
"out",
",",
"visited",
")",
":",
"# Default implementation: generate a proxy value and write its repr",
"# However, this could involve a lot of work for complicated objects,",
"# so for derived classes we specialize this",
"return",
"out",
".",
"write",
"(",
"repr",
"(",
"self",
".",
"proxyval",
"(",
"visited",
")",
")",
")"
] |
Write a string representation of the value scraped from the inferior
process to "out", a file-like object.
|
[
"Write",
"a",
"string",
"representation",
"of",
"the",
"value",
"scraped",
"from",
"the",
"inferior",
"process",
"to",
"out",
"a",
"file",
"-",
"like",
"object",
"."
] |
76dff5d1ac29cd5e7bf32677654a83291a15ad8a
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/libpython.py#L262-L270
|
10,694
|
google/pyringe
|
pyringe/payload/libpython.py
|
PyObjectPtr.from_pyobject_ptr
|
def from_pyobject_ptr(cls, gdbval):
'''
Try to locate the appropriate derived class dynamically, and cast
the pointer accordingly.
'''
try:
p = PyObjectPtr(gdbval)
cls = cls.subclass_from_type(p.type())
return cls(gdbval, cast_to=cls.get_gdb_type())
except RuntimeError:
# Handle any kind of error e.g. NULL ptrs by simply using the base
# class
pass
return cls(gdbval)
|
python
|
def from_pyobject_ptr(cls, gdbval):
'''
Try to locate the appropriate derived class dynamically, and cast
the pointer accordingly.
'''
try:
p = PyObjectPtr(gdbval)
cls = cls.subclass_from_type(p.type())
return cls(gdbval, cast_to=cls.get_gdb_type())
except RuntimeError:
# Handle any kind of error e.g. NULL ptrs by simply using the base
# class
pass
return cls(gdbval)
|
[
"def",
"from_pyobject_ptr",
"(",
"cls",
",",
"gdbval",
")",
":",
"try",
":",
"p",
"=",
"PyObjectPtr",
"(",
"gdbval",
")",
"cls",
"=",
"cls",
".",
"subclass_from_type",
"(",
"p",
".",
"type",
"(",
")",
")",
"return",
"cls",
"(",
"gdbval",
",",
"cast_to",
"=",
"cls",
".",
"get_gdb_type",
"(",
")",
")",
"except",
"RuntimeError",
":",
"# Handle any kind of error e.g. NULL ptrs by simply using the base",
"# class",
"pass",
"return",
"cls",
"(",
"gdbval",
")"
] |
Try to locate the appropriate derived class dynamically, and cast
the pointer accordingly.
|
[
"Try",
"to",
"locate",
"the",
"appropriate",
"derived",
"class",
"dynamically",
"and",
"cast",
"the",
"pointer",
"accordingly",
"."
] |
76dff5d1ac29cd5e7bf32677654a83291a15ad8a
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/libpython.py#L340-L353
|
10,695
|
google/pyringe
|
pyringe/payload/libpython.py
|
HeapTypeObjectPtr.proxyval
|
def proxyval(self, visited):
'''
Support for new-style classes.
Currently we just locate the dictionary using a transliteration to
python of _PyObject_GetDictPtr, ignoring descriptors
'''
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('<...>')
visited.add(self.as_address())
pyop_attr_dict = self.get_attr_dict()
if pyop_attr_dict:
attr_dict = pyop_attr_dict.proxyval(visited)
else:
attr_dict = {}
tp_name = self.safe_tp_name()
# New-style class:
return InstanceProxy(tp_name, attr_dict, long(self._gdbval))
|
python
|
def proxyval(self, visited):
'''
Support for new-style classes.
Currently we just locate the dictionary using a transliteration to
python of _PyObject_GetDictPtr, ignoring descriptors
'''
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('<...>')
visited.add(self.as_address())
pyop_attr_dict = self.get_attr_dict()
if pyop_attr_dict:
attr_dict = pyop_attr_dict.proxyval(visited)
else:
attr_dict = {}
tp_name = self.safe_tp_name()
# New-style class:
return InstanceProxy(tp_name, attr_dict, long(self._gdbval))
|
[
"def",
"proxyval",
"(",
"self",
",",
"visited",
")",
":",
"# Guard against infinite loops:",
"if",
"self",
".",
"as_address",
"(",
")",
"in",
"visited",
":",
"return",
"ProxyAlreadyVisited",
"(",
"'<...>'",
")",
"visited",
".",
"add",
"(",
"self",
".",
"as_address",
"(",
")",
")",
"pyop_attr_dict",
"=",
"self",
".",
"get_attr_dict",
"(",
")",
"if",
"pyop_attr_dict",
":",
"attr_dict",
"=",
"pyop_attr_dict",
".",
"proxyval",
"(",
"visited",
")",
"else",
":",
"attr_dict",
"=",
"{",
"}",
"tp_name",
"=",
"self",
".",
"safe_tp_name",
"(",
")",
"# New-style class:",
"return",
"InstanceProxy",
"(",
"tp_name",
",",
"attr_dict",
",",
"long",
"(",
"self",
".",
"_gdbval",
")",
")"
] |
Support for new-style classes.
Currently we just locate the dictionary using a transliteration to
python of _PyObject_GetDictPtr, ignoring descriptors
|
[
"Support",
"for",
"new",
"-",
"style",
"classes",
"."
] |
76dff5d1ac29cd5e7bf32677654a83291a15ad8a
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/libpython.py#L459-L479
|
10,696
|
google/pyringe
|
pyringe/payload/libpython.py
|
PyCodeObjectPtr.addr2line
|
def addr2line(self, addrq):
'''
Get the line number for a given bytecode offset
Analogous to PyCode_Addr2Line; translated from pseudocode in
Objects/lnotab_notes.txt
'''
co_lnotab = self.pyop_field('co_lnotab').proxyval(set())
# Initialize lineno to co_firstlineno as per PyCode_Addr2Line
# not 0, as lnotab_notes.txt has it:
lineno = int_from_int(self.field('co_firstlineno'))
addr = 0
for addr_incr, line_incr in zip(co_lnotab[::2], co_lnotab[1::2]):
addr += ord(addr_incr)
if addr > addrq:
return lineno
lineno += ord(line_incr)
return lineno
|
python
|
def addr2line(self, addrq):
'''
Get the line number for a given bytecode offset
Analogous to PyCode_Addr2Line; translated from pseudocode in
Objects/lnotab_notes.txt
'''
co_lnotab = self.pyop_field('co_lnotab').proxyval(set())
# Initialize lineno to co_firstlineno as per PyCode_Addr2Line
# not 0, as lnotab_notes.txt has it:
lineno = int_from_int(self.field('co_firstlineno'))
addr = 0
for addr_incr, line_incr in zip(co_lnotab[::2], co_lnotab[1::2]):
addr += ord(addr_incr)
if addr > addrq:
return lineno
lineno += ord(line_incr)
return lineno
|
[
"def",
"addr2line",
"(",
"self",
",",
"addrq",
")",
":",
"co_lnotab",
"=",
"self",
".",
"pyop_field",
"(",
"'co_lnotab'",
")",
".",
"proxyval",
"(",
"set",
"(",
")",
")",
"# Initialize lineno to co_firstlineno as per PyCode_Addr2Line",
"# not 0, as lnotab_notes.txt has it:",
"lineno",
"=",
"int_from_int",
"(",
"self",
".",
"field",
"(",
"'co_firstlineno'",
")",
")",
"addr",
"=",
"0",
"for",
"addr_incr",
",",
"line_incr",
"in",
"zip",
"(",
"co_lnotab",
"[",
":",
":",
"2",
"]",
",",
"co_lnotab",
"[",
"1",
":",
":",
"2",
"]",
")",
":",
"addr",
"+=",
"ord",
"(",
"addr_incr",
")",
"if",
"addr",
">",
"addrq",
":",
"return",
"lineno",
"lineno",
"+=",
"ord",
"(",
"line_incr",
")",
"return",
"lineno"
] |
Get the line number for a given bytecode offset
Analogous to PyCode_Addr2Line; translated from pseudocode in
Objects/lnotab_notes.txt
|
[
"Get",
"the",
"line",
"number",
"for",
"a",
"given",
"bytecode",
"offset"
] |
76dff5d1ac29cd5e7bf32677654a83291a15ad8a
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/libpython.py#L592-L611
|
10,697
|
google/pyringe
|
pyringe/payload/libpython.py
|
PyFrameObjectPtr.current_line
|
def current_line(self):
'''Get the text of the current source line as a string, with a trailing
newline character'''
if self.is_optimized_out():
return '(frame information optimized out)'
with open(self.filename(), 'r') as f:
all_lines = f.readlines()
# Convert from 1-based current_line_num to 0-based list offset:
return all_lines[self.current_line_num()-1]
|
python
|
def current_line(self):
'''Get the text of the current source line as a string, with a trailing
newline character'''
if self.is_optimized_out():
return '(frame information optimized out)'
with open(self.filename(), 'r') as f:
all_lines = f.readlines()
# Convert from 1-based current_line_num to 0-based list offset:
return all_lines[self.current_line_num()-1]
|
[
"def",
"current_line",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_optimized_out",
"(",
")",
":",
"return",
"'(frame information optimized out)'",
"with",
"open",
"(",
"self",
".",
"filename",
"(",
")",
",",
"'r'",
")",
"as",
"f",
":",
"all_lines",
"=",
"f",
".",
"readlines",
"(",
")",
"# Convert from 1-based current_line_num to 0-based list offset:",
"return",
"all_lines",
"[",
"self",
".",
"current_line_num",
"(",
")",
"-",
"1",
"]"
] |
Get the text of the current source line as a string, with a trailing
newline character
|
[
"Get",
"the",
"text",
"of",
"the",
"current",
"source",
"line",
"as",
"a",
"string",
"with",
"a",
"trailing",
"newline",
"character"
] |
76dff5d1ac29cd5e7bf32677654a83291a15ad8a
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/libpython.py#L889-L897
|
10,698
|
google/pyringe
|
pyringe/payload/libpython.py
|
Frame.select
|
def select(self):
'''If supported, select this frame and return True; return False if unsupported
Not all builds have a gdb.Frame.select method; seems to be present on Fedora 12
onwards, but absent on Ubuntu buildbot'''
if not hasattr(self._gdbframe, 'select'):
print ('Unable to select frame: '
'this build of gdb does not expose a gdb.Frame.select method')
return False
self._gdbframe.select()
return True
|
python
|
def select(self):
'''If supported, select this frame and return True; return False if unsupported
Not all builds have a gdb.Frame.select method; seems to be present on Fedora 12
onwards, but absent on Ubuntu buildbot'''
if not hasattr(self._gdbframe, 'select'):
print ('Unable to select frame: '
'this build of gdb does not expose a gdb.Frame.select method')
return False
self._gdbframe.select()
return True
|
[
"def",
"select",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
".",
"_gdbframe",
",",
"'select'",
")",
":",
"print",
"(",
"'Unable to select frame: '",
"'this build of gdb does not expose a gdb.Frame.select method'",
")",
"return",
"False",
"self",
".",
"_gdbframe",
".",
"select",
"(",
")",
"return",
"True"
] |
If supported, select this frame and return True; return False if unsupported
Not all builds have a gdb.Frame.select method; seems to be present on Fedora 12
onwards, but absent on Ubuntu buildbot
|
[
"If",
"supported",
"select",
"this",
"frame",
"and",
"return",
"True",
";",
"return",
"False",
"if",
"unsupported"
] |
76dff5d1ac29cd5e7bf32677654a83291a15ad8a
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/libpython.py#L1173-L1183
|
10,699
|
google/pyringe
|
pyringe/payload/libpython.py
|
Frame.get_index
|
def get_index(self):
'''Calculate index of frame, starting at 0 for the newest frame within
this thread'''
index = 0
# Go down until you reach the newest frame:
iter_frame = self
while iter_frame.newer():
index += 1
iter_frame = iter_frame.newer()
return index
|
python
|
def get_index(self):
'''Calculate index of frame, starting at 0 for the newest frame within
this thread'''
index = 0
# Go down until you reach the newest frame:
iter_frame = self
while iter_frame.newer():
index += 1
iter_frame = iter_frame.newer()
return index
|
[
"def",
"get_index",
"(",
"self",
")",
":",
"index",
"=",
"0",
"# Go down until you reach the newest frame:",
"iter_frame",
"=",
"self",
"while",
"iter_frame",
".",
"newer",
"(",
")",
":",
"index",
"+=",
"1",
"iter_frame",
"=",
"iter_frame",
".",
"newer",
"(",
")",
"return",
"index"
] |
Calculate index of frame, starting at 0 for the newest frame within
this thread
|
[
"Calculate",
"index",
"of",
"frame",
"starting",
"at",
"0",
"for",
"the",
"newest",
"frame",
"within",
"this",
"thread"
] |
76dff5d1ac29cd5e7bf32677654a83291a15ad8a
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/libpython.py#L1185-L1194
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.