sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def extract_flatfield(prihdr, scihdu):
"""Extract flatfield data from ``PFLTFILE``.
Parameters
----------
prihdr : obj
FITS primary header HDU.
scihdu : obj
Extension HDU of the science image.
This is only used to extract subarray data.
Returns
-------
invflat : ndarray or `None`
Inverse flatfield, if any. Multiply this to apply ``FLATCORR``.
"""
for ff in ['DFLTFILE', 'LFLTFILE']:
vv = prihdr.get(ff, 'N/A')
if vv != 'N/A':
warnings.warn('{0}={1} is not accounted for'.format(ff, vv),
AstropyUserWarning)
flatfile = prihdr.get('PFLTFILE', 'N/A')
if flatfile == 'N/A':
return None
flatfile = from_irafpath(flatfile)
ampstring = prihdr['CCDAMP']
with fits.open(flatfile) as hduflat:
if ampstring == 'ABCD':
invflat = np.concatenate(
(1 / hduflat['sci', 1].data,
1 / hduflat['sci', 2].data[::-1, :]), axis=1)
elif ampstring in ('A', 'B', 'AB'):
invflat = 1 / extract_ref(scihdu, hduflat['sci', 2])
else:
invflat = 1 / extract_ref(scihdu, hduflat['sci', 1])
return invflat
|
Extract flatfield data from ``PFLTFILE``.
Parameters
----------
prihdr : obj
FITS primary header HDU.
scihdu : obj
Extension HDU of the science image.
This is only used to extract subarray data.
Returns
-------
invflat : ndarray or `None`
Inverse flatfield, if any. Multiply this to apply ``FLATCORR``.
|
entailment
|
def from_irafpath(irafpath):
"""Resolve IRAF path like ``jref$`` into actual file path.
Parameters
----------
irafpath : str
Path containing IRAF syntax.
Returns
-------
realpath : str
Actual file path. If input does not follow ``path$filename``
format, then this is the same as input.
Raises
------
ValueError
The required environment variable is undefined.
"""
s = irafpath.split('$')
if len(s) != 2:
return irafpath
if len(s[0]) == 0:
return irafpath
try:
refdir = os.environ[s[0]]
except KeyError:
raise ValueError('{0} environment variable undefined'.format(s[0]))
return os.path.join(refdir, s[1])
|
Resolve IRAF path like ``jref$`` into actual file path.
Parameters
----------
irafpath : str
Path containing IRAF syntax.
Returns
-------
realpath : str
Actual file path. If input does not follow ``path$filename``
format, then this is the same as input.
Raises
------
ValueError
The required environment variable is undefined.
|
entailment
|
def extract_ref(scihdu, refhdu):
"""Extract section of the reference image that
corresponds to the given science image.
This only returns a view, not a copy of the
reference image's array.
Parameters
----------
scihdu, refhdu : obj
Extension HDU's of the science and reference image,
respectively.
Returns
-------
refdata : array-like
Section of the relevant reference image.
Raises
------
NotImplementedError
Either science or reference data are binned.
ValueError
Extracted section size mismatch.
"""
same_size, rx, ry, x0, y0 = find_line(scihdu, refhdu)
# Use the whole reference image
if same_size:
return refhdu.data
# Binned data
if rx != 1 or ry != 1:
raise NotImplementedError(
'Either science or reference data are binned')
# Extract a view of the sub-section
ny, nx = scihdu.data.shape
refdata = refhdu.data[y0:y0+ny, x0:x0+nx]
if refdata.shape != (ny, nx):
raise ValueError('Extracted reference image is {0} but science image '
'is {1}'.format(refdata.shape, (ny, nx)))
return refdata
|
Extract section of the reference image that
corresponds to the given science image.
This only returns a view, not a copy of the
reference image's array.
Parameters
----------
scihdu, refhdu : obj
Extension HDU's of the science and reference image,
respectively.
Returns
-------
refdata : array-like
Section of the relevant reference image.
Raises
------
NotImplementedError
Either science or reference data are binned.
ValueError
Extracted section size mismatch.
|
entailment
|
def find_line(scihdu, refhdu):
"""Obtain bin factors and corner location to extract
and bin the appropriate subset of a reference image to
match a science image.
If the science image has zero offset and is the same size and
binning as the reference image, ``same_size`` will be set to
`True`. Otherwise, the values of ``rx``, ``ry``, ``x0``, and
``y0`` will be assigned.
Normally the science image will be binned the same or more
than the reference image. In that case, ``rx`` and ``ry``
will be the bin size of the science image divided by the
bin size of the reference image.
If the binning of the reference image is greater than the
binning of the science image, the ratios (``rx`` and ``ry``)
of the bin sizes will be the reference image size divided by
the science image bin size. This is not necessarily an error.
.. note:: Translated from ``calacs/lib/findbin.c``.
Parameters
----------
scihdu, refhdu : obj
Extension HDU's of the science and reference image,
respectively.
Returns
-------
same_size : bool
`True` if zero offset and same size and binning.
rx, ry : int
Ratio of bin sizes.
x0, y0 : int
Location of start of subimage in reference image.
Raises
------
ValueError
Science and reference data size mismatch.
"""
sci_bin, sci_corner = get_corner(scihdu.header)
ref_bin, ref_corner = get_corner(refhdu.header)
# We can use the reference image directly, without binning
# and without extracting a subset.
if (sci_corner[0] == ref_corner[0] and sci_corner[1] == ref_corner[1] and
sci_bin[0] == ref_bin[0] and sci_bin[1] == ref_bin[1] and
scihdu.data.shape[1] == refhdu.data.shape[1]):
same_size = True
rx = 1
ry = 1
x0 = 0
y0 = 0
# Reference image is binned more than the science image.
elif ref_bin[0] > sci_bin[0] or ref_bin[1] > sci_bin[1]:
same_size = False
rx = ref_bin[0] / sci_bin[0]
ry = ref_bin[1] / sci_bin[1]
x0 = (sci_corner[0] - ref_corner[0]) / ref_bin[0]
y0 = (sci_corner[1] - ref_corner[1]) / ref_bin[1]
# For subarray input images, whether they are binned or not.
else:
same_size = False
# Ratio of bin sizes.
ratiox = sci_bin[0] / ref_bin[0]
ratioy = sci_bin[1] / ref_bin[1]
if (ratiox * ref_bin[0] != sci_bin[0] or
ratioy * ref_bin[1] != sci_bin[1]):
raise ValueError('Science and reference data size mismatch')
# cshift is the offset in units of unbinned pixels.
# Divide by ref_bin to convert to units of pixels in the ref image.
cshift = (sci_corner[0] - ref_corner[0], sci_corner[1] - ref_corner[1])
xzero = cshift[0] / ref_bin[0]
yzero = cshift[1] / ref_bin[1]
if (xzero * ref_bin[0] != cshift[0] or
yzero * ref_bin[1] != cshift[1]):
warnings.warn('Subimage offset not divisible by bin size',
AstropyUserWarning)
rx = ratiox
ry = ratioy
x0 = xzero
y0 = yzero
# Ensure integer index
x0 = int(x0)
y0 = int(y0)
return same_size, rx, ry, x0, y0
|
Obtain bin factors and corner location to extract
and bin the appropriate subset of a reference image to
match a science image.
If the science image has zero offset and is the same size and
binning as the reference image, ``same_size`` will be set to
`True`. Otherwise, the values of ``rx``, ``ry``, ``x0``, and
``y0`` will be assigned.
Normally the science image will be binned the same or more
than the reference image. In that case, ``rx`` and ``ry``
will be the bin size of the science image divided by the
bin size of the reference image.
If the binning of the reference image is greater than the
binning of the science image, the ratios (``rx`` and ``ry``)
of the bin sizes will be the reference image size divided by
the science image bin size. This is not necessarily an error.
.. note:: Translated from ``calacs/lib/findbin.c``.
Parameters
----------
scihdu, refhdu : obj
Extension HDU's of the science and reference image,
respectively.
Returns
-------
same_size : bool
`True` if zero offset and same size and binning.
rx, ry : int
Ratio of bin sizes.
x0, y0 : int
Location of start of subimage in reference image.
Raises
------
ValueError
Science and reference data size mismatch.
|
entailment
|
def get_corner(hdr, rsize=1):
"""Obtain bin and corner information for a subarray.
``LTV1``, ``LTV2``, ``LTM1_1``, and ``LTM2_2`` keywords
are extracted from the given extension header and converted
to bin and corner values (0-indexed).
``LTV1`` for the CCD uses the beginning of the illuminated
portion as the origin, not the beginning of the overscan region.
Thus, the computed X-corner has the same origin as ``LTV1``,
which is what we want, but it differs from the ``CENTERA1``
header keyword, which has the beginning of the overscan region
as origin.
.. note:: Translated from ``calacs/lib/getcorner.c``.
Parameters
----------
hdr : obj
Extension header.
rsize : int, optional
Size of reference pixel in units of high-res pixels.
Returns
-------
bin : tuple of int
Pixel size in X and Y.
corner : tuple of int
Corner of subarray in X and Y.
"""
ltm, ltv = get_lt(hdr)
return from_lt(rsize, ltm, ltv)
|
Obtain bin and corner information for a subarray.
``LTV1``, ``LTV2``, ``LTM1_1``, and ``LTM2_2`` keywords
are extracted from the given extension header and converted
to bin and corner values (0-indexed).
``LTV1`` for the CCD uses the beginning of the illuminated
portion as the origin, not the beginning of the overscan region.
Thus, the computed X-corner has the same origin as ``LTV1``,
which is what we want, but it differs from the ``CENTERA1``
header keyword, which has the beginning of the overscan region
as origin.
.. note:: Translated from ``calacs/lib/getcorner.c``.
Parameters
----------
hdr : obj
Extension header.
rsize : int, optional
Size of reference pixel in units of high-res pixels.
Returns
-------
bin : tuple of int
Pixel size in X and Y.
corner : tuple of int
Corner of subarray in X and Y.
|
entailment
|
def get_lt(hdr):
"""Obtain the LTV and LTM keyword values.
Note that this returns the values just as read from the header,
which means in particular that the LTV values are for one-indexed
pixel coordinates.
LTM keywords are the diagonal elements of MWCS linear
transformation matrix, while LTV's are MWCS linear transformation
vector (1-indexed).
.. note:: Translated from ``calacs/lib/getlt.c``.
Parameters
----------
hdr : obj
Extension header.
Returns
-------
ltm, ltv : tuple of float
``(LTM1_1, LTM2_2)`` and ``(LTV1, LTV2)``.
Values are ``(1, 1)`` and ``(0, 0)`` if not found,
to accomodate reference files with missing info.
Raises
------
ValueError
Invalid LTM* values.
"""
ltm = (hdr.get('LTM1_1', 1.0), hdr.get('LTM2_2', 1.0))
if ltm[0] <= 0 or ltm[1] <= 0:
raise ValueError('(LTM1_1, LTM2_2) = {0} is invalid'.format(ltm))
ltv = (hdr.get('LTV1', 0.0), hdr.get('LTV2', 0.0))
return ltm, ltv
|
Obtain the LTV and LTM keyword values.
Note that this returns the values just as read from the header,
which means in particular that the LTV values are for one-indexed
pixel coordinates.
LTM keywords are the diagonal elements of MWCS linear
transformation matrix, while LTV's are MWCS linear transformation
vector (1-indexed).
.. note:: Translated from ``calacs/lib/getlt.c``.
Parameters
----------
hdr : obj
Extension header.
Returns
-------
ltm, ltv : tuple of float
``(LTM1_1, LTM2_2)`` and ``(LTV1, LTV2)``.
Values are ``(1, 1)`` and ``(0, 0)`` if not found,
to accomodate reference files with missing info.
Raises
------
ValueError
Invalid LTM* values.
|
entailment
|
def from_lt(rsize, ltm, ltv):
"""Compute the corner location and pixel size in units
of unbinned pixels.
.. note:: Translated from ``calacs/lib/fromlt.c``.
Parameters
----------
rsize : int
Reference pixel size. Usually 1.
ltm, ltv : tuple of float
See :func:`get_lt`.
Returns
-------
bin : tuple of int
Pixel size in X and Y.
corner : tuple of int
Corner of subarray in X and Y.
"""
dbinx = rsize / ltm[0]
dbiny = rsize / ltm[1]
dxcorner = (dbinx - rsize) - dbinx * ltv[0]
dycorner = (dbiny - rsize) - dbiny * ltv[1]
# Round off to the nearest integer.
bin = (_nint(dbinx), _nint(dbiny))
corner = (_nint(dxcorner), _nint(dycorner))
return bin, corner
|
Compute the corner location and pixel size in units
of unbinned pixels.
.. note:: Translated from ``calacs/lib/fromlt.c``.
Parameters
----------
rsize : int
Reference pixel size. Usually 1.
ltm, ltv : tuple of float
See :func:`get_lt`.
Returns
-------
bin : tuple of int
Pixel size in X and Y.
corner : tuple of int
Corner of subarray in X and Y.
|
entailment
|
def hdr_vals_for_overscan(root):
"""Retrieve header keyword values from RAW and SPT
FITS files to pass on to :func:`check_oscntab` and
:func:`check_overscan`.
Parameters
----------
root : str
Rootname of the observation. Can be relative path
to the file excluding the type of FITS file and
extension, e.g., '/my/path/jxxxxxxxq'.
Returns
-------
ccdamp : str
Amplifiers used to read out the CCDs.
xstart : int
Starting column of the readout in detector
coordinates.
ystart : int
Starting row of the readout in detector
coordinates.
xsize : int
Number of columns in the readout.
ysize : int
Number of rows in the readout.
"""
with fits.open(root + '_spt.fits') as hdu:
spthdr = hdu[0].header
with fits.open(root + '_raw.fits') as hdu:
prihdr = hdu[0].header
xstart = spthdr['SS_A1CRN']
ystart = spthdr['SS_A2CRN']
xsize = spthdr['SS_A1SZE']
ysize = spthdr['SS_A2SZE']
ccdamp = prihdr['CCDAMP']
return ccdamp, xstart, ystart, xsize, ysize
|
Retrieve header keyword values from RAW and SPT
FITS files to pass on to :func:`check_oscntab` and
:func:`check_overscan`.
Parameters
----------
root : str
Rootname of the observation. Can be relative path
to the file excluding the type of FITS file and
extension, e.g., '/my/path/jxxxxxxxq'.
Returns
-------
ccdamp : str
Amplifiers used to read out the CCDs.
xstart : int
Starting column of the readout in detector
coordinates.
ystart : int
Starting row of the readout in detector
coordinates.
xsize : int
Number of columns in the readout.
ysize : int
Number of rows in the readout.
|
entailment
|
def check_oscntab(oscntab, ccdamp, xsize, ysize, leading, trailing):
"""Check if the supplied parameters are in the
``OSCNTAB`` reference file.
.. note:: Even if an entry does not exist in ``OSCNTAB``,
as long as the subarray does not have any overscan,
it should not be a problem for CALACS.
.. note:: This function does not check the virtual bias rows.
Parameters
----------
oscntab : str
Path to the ``OSCNTAB`` reference file being checked against.
ccdamp : str
Amplifier(s) used to read out the CCDs.
xsize : int
Number of columns in the readout.
ysize : int
Number of rows in the readout.
leading : int
Number of columns in the bias section ("TRIMX1" to be trimmed off
by ``BLEVCORR``) on the A/C amplifiers side of the CCDs.
trailing : int
Number of columns in the bias section ("TRIMX2" to be trimmed off
by ``BLEVCORR``) on the B/D amplifiers side of the CCDs.
Returns
-------
supported : bool
Result of test if input parameters are in ``OSCNTAB``.
"""
tab = Table.read(oscntab)
ccdamp = ccdamp.lower().rstrip()
for row in tab:
if (row['CCDAMP'].lower().rstrip() in ccdamp and
row['NX'] == xsize and row['NY'] == ysize and
row['TRIMX1'] == leading and row['TRIMX2'] == trailing):
return True
return False
|
Check if the supplied parameters are in the
``OSCNTAB`` reference file.
.. note:: Even if an entry does not exist in ``OSCNTAB``,
as long as the subarray does not have any overscan,
it should not be a problem for CALACS.
.. note:: This function does not check the virtual bias rows.
Parameters
----------
oscntab : str
Path to the ``OSCNTAB`` reference file being checked against.
ccdamp : str
Amplifier(s) used to read out the CCDs.
xsize : int
Number of columns in the readout.
ysize : int
Number of rows in the readout.
leading : int
Number of columns in the bias section ("TRIMX1" to be trimmed off
by ``BLEVCORR``) on the A/C amplifiers side of the CCDs.
trailing : int
Number of columns in the bias section ("TRIMX2" to be trimmed off
by ``BLEVCORR``) on the B/D amplifiers side of the CCDs.
Returns
-------
supported : bool
Result of test if input parameters are in ``OSCNTAB``.
|
entailment
|
def check_overscan(xstart, xsize, total_prescan_pixels=24,
total_science_pixels=4096):
"""Check image for bias columns.
Parameters
----------
xstart : int
Starting column of the readout in detector coordinates.
xsize : int
Number of columns in the readout.
total_prescan_pixels : int
Total prescan pixels for a single amplifier on a detector.
Default is 24 for WFC.
total_science_pixels : int
Total science pixels across a detector.
Default is 4096 for WFC (across two amplifiers).
Returns
-------
hasoverscan : bool
Indication if there are bias columns in the image.
leading : int
Number of bias columns on the A/C amplifiers
side of the CCDs ("TRIMX1" in ``OSCNTAB``).
trailing : int
Number of bias columns on the B/D amplifiers
side of the CCDs ("TRIMX2" in ``OSCNTAB``).
"""
hasoverscan = False
leading = 0
trailing = 0
if xstart < total_prescan_pixels:
hasoverscan = True
leading = abs(xstart - total_prescan_pixels)
if (xstart + xsize) > total_science_pixels:
hasoverscan = True
trailing = abs(total_science_pixels -
(xstart + xsize - total_prescan_pixels))
return hasoverscan, leading, trailing
|
Check image for bias columns.
Parameters
----------
xstart : int
Starting column of the readout in detector coordinates.
xsize : int
Number of columns in the readout.
total_prescan_pixels : int
Total prescan pixels for a single amplifier on a detector.
Default is 24 for WFC.
total_science_pixels : int
Total science pixels across a detector.
Default is 4096 for WFC (across two amplifiers).
Returns
-------
hasoverscan : bool
Indication if there are bias columns in the image.
leading : int
Number of bias columns on the A/C amplifiers
side of the CCDs ("TRIMX1" in ``OSCNTAB``).
trailing : int
Number of bias columns on the B/D amplifiers
side of the CCDs ("TRIMX2" in ``OSCNTAB``).
|
entailment
|
def process_request(self, request):
"""
Ignore unnecessary actions for static file requests, posts, or ajax
requests. We're only interested in redirecting following a 'natural'
request redirection to the `wagtailadmin_explore_root` or
`wagtailadmin_explore` views.
"""
referer_url = request.META.get('HTTP_REFERER')
return_to_index_url = request.session.get('return_to_index_url')
try:
if all((
return_to_index_url,
referer_url,
request.method == 'GET',
not request.is_ajax(),
resolve(request.path).url_name in ('wagtailadmin_explore_root',
'wagtailadmin_explore'),
)):
perform_redirection = False
referer_match = resolve(urlparse(referer_url).path)
if all((
referer_match.namespace == 'wagtailadmin_pages',
referer_match.url_name in (
'add',
'edit',
'delete',
'unpublish',
'copy'
),
)):
perform_redirection = True
elif all((
not referer_match.namespace,
referer_match.url_name in (
'wagtailadmin_pages_create',
'wagtailadmin_pages_edit',
'wagtailadmin_pages_delete',
'wagtailadmin_pages_unpublish'
),
)):
perform_redirection = True
if perform_redirection:
del request.session['return_to_index_url']
return HttpResponseRedirect(return_to_index_url)
except Resolver404:
pass
return None
|
Ignore unnecessary actions for static file requests, posts, or ajax
requests. We're only interested in redirecting following a 'natural'
request redirection to the `wagtailadmin_explore_root` or
`wagtailadmin_explore` views.
|
entailment
|
def acs2d(input, exec_path='', time_stamps=False, verbose=False, quiet=False,
exe_args=None):
r"""
Run the acs2d.e executable as from the shell.
Output is automatically named based on input suffix:
+--------------------+----------------+------------------------------+
| INPUT | OUTPUT | EXPECTED DATA |
+====================+================+==============================+
| ``*_raw.fits`` | ``*_flt.fits`` | SBC image. |
+--------------------+----------------+------------------------------+
| ``*_blv_tmp.fits`` | ``*_flt.fits`` | ACSCCD output. |
+--------------------+----------------+------------------------------+
| ``*_blc_tmp.fits`` | ``*_flc.fits`` | ACSCCD output with PCTECORR. |
+--------------------+----------------+------------------------------+
| ``*_crj_tmp.fits`` | ``*_crj.fits`` | ACSREJ output. |
+--------------------+----------------+------------------------------+
| ``*_crc_tmp.fits`` | ``*_crc.fits`` | ACSREJ output with PCTECORR. |
+--------------------+----------------+------------------------------+
Parameters
----------
input : str or list of str
Input filenames in one of these formats:
* a single filename ('j1234567q_blv_tmp.fits')
* a Python list of filenames
* a partial filename with wildcards ('\*blv_tmp.fits')
* filename of an ASN table ('j12345670_asn.fits')
* an at-file (``@input``)
exec_path : str, optional
The complete path to ACS2D executable.
If not given, run ACS2D given by 'acs2d.e'.
time_stamps : bool, optional
Set to True to turn on the printing of time stamps.
verbose : bool, optional
Set to True for verbose output.
quiet : bool, optional
Set to True for quiet output.
exe_args : list, optional
Arbitrary arguments passed to underlying executable call.
Note: Implementation uses subprocess.call and whitespace is not
permitted. E.g. use exe_args=['--nThreads', '1']
"""
from stsci.tools import parseinput # Optional package dependency
if exec_path:
if not os.path.exists(exec_path):
raise OSError('Executable not found: ' + exec_path)
call_list = [exec_path]
else:
call_list = ['acs2d.e']
# Parse input to get list of filenames to process.
# acs2d.e only takes 'file1,file2,...'
infiles, dummy_out = parseinput.parseinput(input)
call_list.append(','.join(infiles))
if time_stamps:
call_list.append('-t')
if verbose:
call_list.append('-v')
if quiet:
call_list.append('-q')
if exe_args:
call_list.extend(exe_args)
subprocess.check_call(call_list)
|
r"""
Run the acs2d.e executable as from the shell.
Output is automatically named based on input suffix:
+--------------------+----------------+------------------------------+
| INPUT | OUTPUT | EXPECTED DATA |
+====================+================+==============================+
| ``*_raw.fits`` | ``*_flt.fits`` | SBC image. |
+--------------------+----------------+------------------------------+
| ``*_blv_tmp.fits`` | ``*_flt.fits`` | ACSCCD output. |
+--------------------+----------------+------------------------------+
| ``*_blc_tmp.fits`` | ``*_flc.fits`` | ACSCCD output with PCTECORR. |
+--------------------+----------------+------------------------------+
| ``*_crj_tmp.fits`` | ``*_crj.fits`` | ACSREJ output. |
+--------------------+----------------+------------------------------+
| ``*_crc_tmp.fits`` | ``*_crc.fits`` | ACSREJ output with PCTECORR. |
+--------------------+----------------+------------------------------+
Parameters
----------
input : str or list of str
Input filenames in one of these formats:
* a single filename ('j1234567q_blv_tmp.fits')
* a Python list of filenames
* a partial filename with wildcards ('\*blv_tmp.fits')
* filename of an ASN table ('j12345670_asn.fits')
* an at-file (``@input``)
exec_path : str, optional
The complete path to ACS2D executable.
If not given, run ACS2D given by 'acs2d.e'.
time_stamps : bool, optional
Set to True to turn on the printing of time stamps.
verbose : bool, optional
Set to True for verbose output.
quiet : bool, optional
Set to True for quiet output.
exe_args : list, optional
Arbitrary arguments passed to underlying executable call.
Note: Implementation uses subprocess.call and whitespace is not
permitted. E.g. use exe_args=['--nThreads', '1']
|
entailment
|
def run(configobj=None):
"""
TEAL interface for the `acs2d` function.
"""
acs2d(configobj['input'],
exec_path=configobj['exec_path'],
time_stamps=configobj['time_stamps'],
verbose=configobj['verbose'],
quiet=configobj['quiet']
)
|
TEAL interface for the `acs2d` function.
|
entailment
|
def run(configobj=None):
"""
TEAL interface for the `acscte` function.
"""
acscte(configobj['input'],
exec_path=configobj['exec_path'],
time_stamps=configobj['time_stamps'],
verbose=configobj['verbose'],
quiet=configobj['quiet'],
single_core=configobj['single_core']
)
|
TEAL interface for the `acscte` function.
|
entailment
|
def _check_inputs(self):
"""Check the inputs to ensure they are valid.
Returns
-------
status : bool
True if all inputs are valid, False if one is not.
"""
valid_detector = True
valid_filter = True
valid_date = True
# Determine the submitted detector is valid
if self.detector not in self._valid_detectors:
msg = ('{} is not a valid detector option.\n'
'Please choose one of the following:\n{}\n'
'{}'.format(self.detector,
'\n'.join(self._valid_detectors),
self._msg_div))
LOG.error(msg)
valid_detector = False
# Determine if the submitted filter is valid
if (self.filt is not None and valid_detector and
self.filt not in self.valid_filters[self.detector]):
msg = ('{} is not a valid filter for {}\n'
'Please choose one of the following:\n{}\n'
'{}'.format(self.filt, self.detector,
'\n'.join(self.valid_filters[self.detector]),
self._msg_div))
LOG.error(msg)
valid_filter = False
# Determine if the submitted date is valid
date_check = self._check_date()
if date_check is not None:
LOG.error('{}\n{}'.format(date_check, self._msg_div))
valid_date = False
if not valid_detector or not valid_filter or not valid_date:
return False
return True
|
Check the inputs to ensure they are valid.
Returns
-------
status : bool
True if all inputs are valid, False if one is not.
|
entailment
|
def _check_date(self, fmt='%Y-%m-%d'):
"""Convenience method for determining if the input date is valid.
Parameters
----------
fmt : str
The format of the date string. The default is ``%Y-%m-%d``, which
corresponds to ``YYYY-MM-DD``.
Returns
-------
status : str or `None`
If the date is valid, returns `None`. If the date is invalid,
returns a message explaining the issue.
"""
result = None
try:
dt_obj = dt.datetime.strptime(self.date, fmt)
except ValueError:
result = '{} does not match YYYY-MM-DD format'.format(self.date)
else:
if dt_obj < self._acs_installation_date:
result = ('The observation date cannot occur '
'before ACS was installed ({})'
.format(self._acs_installation_date.strftime(fmt)))
elif dt_obj > self._extrapolation_date:
result = ('The observation date cannot occur after the '
'maximum allowable date, {}. Extrapolations of the '
'instrument throughput after this date lead to '
'high uncertainties and are therefore invalid.'
.format(self._extrapolation_date.strftime(fmt)))
finally:
return result
|
Convenience method for determining if the input date is valid.
Parameters
----------
fmt : str
The format of the date string. The default is ``%Y-%m-%d``, which
corresponds to ``YYYY-MM-DD``.
Returns
-------
status : str or `None`
If the date is valid, returns `None`. If the date is invalid,
returns a message explaining the issue.
|
entailment
|
def _submit_request(self):
"""Submit a request to the ACS Zeropoint Calculator.
If an exception is raised during the request, an error message is
given. Otherwise, the response is saved in the corresponding
attribute.
"""
try:
self._response = urlopen(self._url)
except URLError as e:
msg = ('{}\n{}\nThe query failed! Please check your inputs. '
'If the error persists, submit a ticket to the '
'ACS Help Desk at hsthelp.stsci.edu with the error message '
'displayed above.'.format(str(e), self._msg_div))
LOG.error(msg)
self._failed = True
else:
self._failed = False
|
Submit a request to the ACS Zeropoint Calculator.
If an exception is raised during the request, an error message is
given. Otherwise, the response is saved in the corresponding
attribute.
|
entailment
|
def _parse_and_format(self):
""" Parse and format the results returned by the ACS Zeropoint Calculator.
Using ``beautifulsoup4``, find all the ``<tb> </tb>`` tags present in
the response. Format the results into an astropy.table.QTable with
corresponding units and assign it to the zpt_table attribute.
"""
soup = BeautifulSoup(self._response.read(), 'html.parser')
# Grab all elements in the table returned by the ZPT calc.
td = soup.find_all('td')
# Remove the units attached to PHOTFLAM and PHOTPLAM column names.
td = [val.text.split(' ')[0] for val in td]
# Turn the single list into a 2-D numpy array
data = np.reshape(td,
(int(len(td) / self._block_size), self._block_size))
# Create the QTable, note that sometimes self._response will be empty
# even though the return was successful; hence the try/except to catch
# any potential index errors. Provide the user with a message and
# set the zpt_table to None.
try:
tab = QTable(data[1:, :],
names=data[0],
dtype=[str, float, float, float, float, float])
except IndexError as e:
msg = ('{}\n{}\n There was an issue parsing the request. '
'Try resubmitting the query. If this issue persists, please '
'submit a ticket to the Help Desk at'
'https://stsci.service-now.com/hst'
.format(e, self._msg_div))
LOG.info(msg)
self._zpt_table = None
else:
# If and only if no exception was raised, attach the units to each
# column of the QTable. Note we skip the FILTER column because
# Quantity objects in astropy must be numerical (i.e. not str)
for col in tab.colnames:
if col.lower() == 'filter':
continue
tab[col].unit = self._data_units[col]
self._zpt_table = tab
|
Parse and format the results returned by the ACS Zeropoint Calculator.
Using ``beautifulsoup4``, find all the ``<tb> </tb>`` tags present in
the response. Format the results into an astropy.table.QTable with
corresponding units and assign it to the zpt_table attribute.
|
entailment
|
def fetch(self):
"""Submit the request to the ACS Zeropoints Calculator.
This method will:
* submit the request
* parse the response
* format the results into a table with the correct units
Returns
-------
tab : `astropy.table.QTable` or `None`
If the request was successful, returns a table; otherwise, `None`.
"""
LOG.info('Checking inputs...')
valid_inputs = self._check_inputs()
if valid_inputs:
LOG.info('Submitting request to {}'.format(self._url))
self._submit_request()
if self._failed:
return
LOG.info('Parsing the response and formatting the results...')
self._parse_and_format()
return self.zpt_table
LOG.error('Please fix the incorrect input(s)')
|
Submit the request to the ACS Zeropoints Calculator.
This method will:
* submit the request
* parse the response
* format the results into a table with the correct units
Returns
-------
tab : `astropy.table.QTable` or `None`
If the request was successful, returns a table; otherwise, `None`.
|
entailment
|
def get_queryset(self, request):
"""
Returns a QuerySet of all model instances that can be edited by the
admin site.
"""
qs = self.model._default_manager.get_queryset()
ordering = self.get_ordering(request)
if ordering:
qs = qs.order_by(*ordering)
return qs
|
Returns a QuerySet of all model instances that can be edited by the
admin site.
|
entailment
|
def index_view(self, request):
"""
Instantiates a class-based view to provide listing functionality for
the assigned model. The view class used can be overridden by changing
the 'index_view_class' attribute.
"""
kwargs = {'model_admin': self}
view_class = self.index_view_class
return view_class.as_view(**kwargs)(request)
|
Instantiates a class-based view to provide listing functionality for
the assigned model. The view class used can be overridden by changing
the 'index_view_class' attribute.
|
entailment
|
def create_view(self, request):
"""
Instantiates a class-based view to provide 'creation' functionality for
the assigned model, or redirect to Wagtail's create view if the
assigned model extends 'Page'. The view class used can be overridden by
changing the 'create_view_class' attribute.
"""
kwargs = {'model_admin': self}
view_class = self.create_view_class
return view_class.as_view(**kwargs)(request)
|
Instantiates a class-based view to provide 'creation' functionality for
the assigned model, or redirect to Wagtail's create view if the
assigned model extends 'Page'. The view class used can be overridden by
changing the 'create_view_class' attribute.
|
entailment
|
def choose_parent_view(self, request):
"""
Instantiates a class-based view to provide a view that allows a parent
page to be chosen for a new object, where the assigned model extends
Wagtail's Page model, and there is more than one potential parent for
new instances. The view class used can be overridden by changing the
'choose_parent_view_class' attribute.
"""
kwargs = {'model_admin': self}
view_class = self.choose_parent_view_class
return view_class.as_view(**kwargs)(request)
|
Instantiates a class-based view to provide a view that allows a parent
page to be chosen for a new object, where the assigned model extends
Wagtail's Page model, and there is more than one potential parent for
new instances. The view class used can be overridden by changing the
'choose_parent_view_class' attribute.
|
entailment
|
def edit_view(self, request, object_id):
"""
Instantiates a class-based view to provide 'edit' functionality for the
assigned model, or redirect to Wagtail's edit view if the assigned
model extends 'Page'. The view class used can be overridden by changing
the 'edit_view_class' attribute.
"""
kwargs = {'model_admin': self, 'object_id': object_id}
view_class = self.edit_view_class
return view_class.as_view(**kwargs)(request)
|
Instantiates a class-based view to provide 'edit' functionality for the
assigned model, or redirect to Wagtail's edit view if the assigned
model extends 'Page'. The view class used can be overridden by changing
the 'edit_view_class' attribute.
|
entailment
|
def confirm_delete_view(self, request, object_id):
"""
Instantiates a class-based view to provide 'delete confirmation'
functionality for the assigned model, or redirect to Wagtail's delete
confirmation view if the assigned model extends 'Page'. The view class
used can be overridden by changing the 'confirm_delete_view_class'
attribute.
"""
kwargs = {'model_admin': self, 'object_id': object_id}
view_class = self.confirm_delete_view_class
return view_class.as_view(**kwargs)(request)
|
Instantiates a class-based view to provide 'delete confirmation'
functionality for the assigned model, or redirect to Wagtail's delete
confirmation view if the assigned model extends 'Page'. The view class
used can be overridden by changing the 'confirm_delete_view_class'
attribute.
|
entailment
|
def unpublish_view(self, request, object_id):
"""
Instantiates a class-based view that redirects to Wagtail's 'unpublish'
view for models that extend 'Page' (if the user has sufficient
permissions). We do this via our own view so that we can reliably
control redirection of the user back to the index_view once the action
is completed. The view class used can be overridden by changing the
'unpublish_view_class' attribute.
"""
kwargs = {'model_admin': self, 'object_id': object_id}
view_class = self.unpublish_view_class
return view_class.as_view(**kwargs)(request)
|
Instantiates a class-based view that redirects to Wagtail's 'unpublish'
view for models that extend 'Page' (if the user has sufficient
permissions). We do this via our own view so that we can reliably
control redirection of the user back to the index_view once the action
is completed. The view class used can be overridden by changing the
'unpublish_view_class' attribute.
|
entailment
|
def copy_view(self, request, object_id):
"""
Instantiates a class-based view that redirects to Wagtail's 'copy'
view for models that extend 'Page' (if the user has sufficient
permissions). We do this via our own view so that we can reliably
control redirection of the user back to the index_view once the action
is completed. The view class used can be overridden by changing the
'copy_view_class' attribute.
"""
kwargs = {'model_admin': self, 'object_id': object_id}
view_class = self.copy_view_class
return view_class.as_view(**kwargs)(request)
|
Instantiates a class-based view that redirects to Wagtail's 'copy'
view for models that extend 'Page' (if the user has sufficient
permissions). We do this via our own view so that we can reliably
control redirection of the user back to the index_view once the action
is completed. The view class used can be overridden by changing the
'copy_view_class' attribute.
|
entailment
|
def get_templates(self, action='index'):
"""
Utility function that provides a list of templates to try for a given
view, when the template isn't overridden by one of the template
attributes on the class.
"""
app = self.opts.app_label
model_name = self.opts.model_name
return [
'wagtailmodeladmin/%s/%s/%s.html' % (app, model_name, action),
'wagtailmodeladmin/%s/%s.html' % (app, action),
'wagtailmodeladmin/%s.html' % (action,),
]
|
Utility function that provides a list of templates to try for a given
view, when the template isn't overridden by one of the template
attributes on the class.
|
entailment
|
def get_permissions_for_registration(self):
"""
Utilised by Wagtail's 'register_permissions' hook to allow permissions
for a model to be assigned to groups in settings. This is only required
if the model isn't a Page model, and isn't registered as a Snippet
"""
from wagtail.wagtailsnippets.models import SNIPPET_MODELS
if not self.is_pagemodel and self.model not in SNIPPET_MODELS:
return self.permission_helper.get_all_model_permissions()
return Permission.objects.none()
|
Utilised by Wagtail's 'register_permissions' hook to allow permissions
for a model to be assigned to groups in settings. This is only required
if the model isn't a Page model, and isn't registered as a Snippet
|
entailment
|
def get_admin_urls_for_registration(self):
"""
Utilised by Wagtail's 'register_admin_urls' hook to register urls for
our the views that class offers.
"""
urls = (
url(get_url_pattern(self.opts),
self.index_view, name=get_url_name(self.opts)),
url(get_url_pattern(self.opts, 'create'),
self.create_view, name=get_url_name(self.opts, 'create')),
url(get_object_specific_url_pattern(self.opts, 'edit'),
self.edit_view, name=get_url_name(self.opts, 'edit')),
url(get_object_specific_url_pattern(self.opts, 'confirm_delete'),
self.confirm_delete_view,
name=get_url_name(self.opts, 'confirm_delete')),
)
if self.inspect_view_enabled:
urls = urls + (
url(get_object_specific_url_pattern(self.opts, 'inspect'),
self.inspect_view,
name=get_url_name(self.opts, 'inspect')),
)
if self.is_pagemodel:
urls = urls + (
url(get_url_pattern(self.opts, 'choose_parent'),
self.choose_parent_view,
name=get_url_name(self.opts, 'choose_parent')),
url(get_object_specific_url_pattern(self.opts, 'unpublish'),
self.unpublish_view,
name=get_url_name(self.opts, 'unpublish')),
url(get_object_specific_url_pattern(self.opts, 'copy'),
self.copy_view,
name=get_url_name(self.opts, 'copy')),
)
return urls
def construct_main_menu(self, request, menu_items):
warnings.warn((
"The 'construct_main_menu' method is now deprecated. You "
"should also remove the construct_main_menu hook from "
"wagtail_hooks.py in your app folder."), DeprecationWarning)
return menu_items
|
Utilised by Wagtail's 'register_admin_urls' hook to register urls for
our the views that class offers.
|
entailment
|
def get_menu_item(self):
"""
Utilised by Wagtail's 'register_menu_item' hook to create a menu
for this group with a SubMenu linking to listing pages for any
associated ModelAdmin instances
"""
if self.modeladmin_instances:
submenu = SubMenu(self.get_submenu_items())
return GroupMenuItem(self, self.get_menu_order(), submenu)
|
Utilised by Wagtail's 'register_menu_item' hook to create a menu
for this group with a SubMenu linking to listing pages for any
associated ModelAdmin instances
|
entailment
|
def get_permissions_for_registration(self):
"""
Utilised by Wagtail's 'register_permissions' hook to allow permissions
for a all models grouped by this class to be assigned to Groups in
settings.
"""
qs = Permission.objects.none()
for instance in self.modeladmin_instances:
qs = qs | instance.get_permissions_for_registration()
return qs
|
Utilised by Wagtail's 'register_permissions' hook to allow permissions
for a all models grouped by this class to be assigned to Groups in
settings.
|
entailment
|
def get_admin_urls_for_registration(self):
"""
Utilised by Wagtail's 'register_admin_urls' hook to register urls for
used by any associated ModelAdmin instances
"""
urls = []
for instance in self.modeladmin_instances:
urls.extend(instance.get_admin_urls_for_registration())
return urls
|
Utilised by Wagtail's 'register_admin_urls' hook to register urls for
used by any associated ModelAdmin instances
|
entailment
|
def is_shown(self, request):
"""
If there aren't any visible items in the submenu, don't bother to show
this menu item
"""
for menuitem in self.menu._registered_menu_items:
if menuitem.is_shown(request):
return True
return False
|
If there aren't any visible items in the submenu, don't bother to show
this menu item
|
entailment
|
def run(configobj=None):
"""
TEAL interface for the `acscteforwardmodel` function.
"""
acscteforwardmodel(configobj['input'],
exec_path=configobj['exec_path'],
time_stamps=configobj['time_stamps'],
verbose=configobj['verbose'],
quiet=configobj['quiet'],
single_core=configobj['single_core']
)
|
TEAL interface for the `acscteforwardmodel` function.
|
entailment
|
def run(configobj=None):
"""
TEAL interface for the `acsccd` function.
"""
acsccd(configobj['input'],
exec_path=configobj['exec_path'],
time_stamps=configobj['time_stamps'],
verbose=configobj['verbose'],
quiet=configobj['quiet'] #,
#dqicorr=configobj['dqicorr'],
#atodcorr=configobj['atodcorr'],
#blevcorr=configobj['blevcorr'],
#biascorr=configobj['biascorr']
)
|
TEAL interface for the `acsccd` function.
|
entailment
|
def destripe_plus(inputfile, suffix='strp', stat='pmode1', maxiter=15,
sigrej=2.0, lower=None, upper=None, binwidth=0.3,
scimask1=None, scimask2=None,
dqbits=None, rpt_clean=0, atol=0.01,
cte_correct=True, clobber=False, verbose=True):
r"""Calibrate post-SM4 ACS/WFC exposure(s) and use
standalone :ref:`acsdestripe`.
This takes a RAW image and generates a FLT file containing
its calibrated and destriped counterpart.
If CTE correction is performed, FLC will also be present.
Parameters
----------
inputfile : str or list of str
Input filenames in one of these formats:
* a Python list of filenames
* a partial filename with wildcards ('\*raw.fits')
* filename of an ASN table ('j12345670_asn.fits')
* an at-file (``@input``)
suffix : str
The string to use to add to each input file name to
indicate an output product of ``acs_destripe``.
This only affects the intermediate output file that will
be automatically renamed to ``*blv_tmp.fits`` during the processing.
stat : { 'pmode1', 'pmode2', 'mean', 'mode', 'median', 'midpt' } (Default = 'pmode1')
Specifies the statistics to be used for computation of the
background in image rows:
* 'pmode1' - SEXTRACTOR-like mode estimate based on a
modified `Pearson's rule <http://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_:
``2.5*median-1.5*mean``;
* 'pmode2' - mode estimate based on
`Pearson's rule <http://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_:
``3*median-2*mean``;
* 'mean' - the mean of the distribution of the "good" pixels (after
clipping, masking, etc.);
* 'mode' - the mode of the distribution of the "good" pixels;
* 'median' - the median of the distribution of the "good" pixels;
* 'midpt' - estimate of the median of the distribution of the "good"
pixels based on an algorithm similar to IRAF's `imagestats` task
(``CDF(midpt)=1/2``).
.. note::
The midpoint and mode are computed in two passes through the
image. In the first pass the standard deviation of the pixels
is calculated and used with the *binwidth* parameter to compute
the resolution of the data histogram. The midpoint is estimated
by integrating the histogram and computing by interpolation
the data value at which exactly half the pixels are below that
data value and half are above it. The mode is computed by
locating the maximum of the data histogram and fitting the peak
by parabolic interpolation.
maxiter : int
This parameter controls the maximum number of iterations
to perform when computing the statistics used to compute the
row-by-row corrections.
sigrej : float
This parameters sets the sigma level for the rejection applied
during each iteration of statistics computations for the
row-by-row corrections.
lower : float, None (Default = None)
Lower limit of usable pixel values for computing the background.
This value should be specified in the units of the input image(s).
upper : float, None (Default = None)
Upper limit of usable pixel values for computing the background.
This value should be specified in the units of the input image(s).
binwidth : float (Default = 0.1)
Histogram's bin width, in sigma units, used to sample the
distribution of pixel brightness values in order to compute the
background statistics. This parameter is aplicable *only* to *stat*
parameter values of `'mode'` or `'midpt'`.
clobber : bool
Specify whether or not to 'clobber' (delete then replace)
previously generated products with the same names.
scimask1 : str or list of str
Mask images for *calibrated* ``SCI,1``, one for each input file.
Pixels with zero values will be masked out, in addition to clipping.
scimask2 : str or list of str
Mask images for *calibrated* ``SCI,2``, one for each input file.
Pixels with zero values will be masked out, in addition to clipping.
This is not used for subarrays.
dqbits : int, str, None (Default = None)
Integer sum of all the DQ bit values from the input image's DQ array
that should be considered "good" when building masks for de-striping
computations. For example, if pixels in the DQ array can be
combinations of 1, 2, 4, and 8 flags and one wants to consider
DQ "defects" having flags 2 and 4 as being acceptable for de-striping
computations, then `dqbits` should be set to 2+4=6. Then a DQ pixel
having values 2,4, or 6 will be considered a good pixel, while a DQ
pixel with a value, e.g., 1+2=3, 4+8=12, etc. will be flagged
as a "bad" pixel.
Alternatively, one can enter a comma- or '+'-separated list of
integer bit flags that should be added to obtain the final
"good" bits. For example, both ``4,8`` and ``4+8`` are equivalent to
setting `dqbits` to 12.
| Set `dqbits` to 0 to make *all* non-zero pixels in the DQ
mask to be considered "bad" pixels, and the corresponding image
pixels not to be used for de-striping computations.
| Default value (`None`) will turn off the use of image's DQ array
for de-striping computations.
| In order to reverse the meaning of the `dqbits`
parameter from indicating values of the "good" DQ flags
to indicating the "bad" DQ flags, prepend '~' to the string
value. For example, in order not to use pixels with
DQ flags 4 and 8 for sky computations and to consider
as "good" all other pixels (regardless of their DQ flag),
set `dqbits` to ``~4+8``, or ``~4,8``. To obtain the
same effect with an `int` input value (except for 0),
enter -(4+8+1)=-9. Following this convention,
a `dqbits` string value of ``'~0'`` would be equivalent to
setting ``dqbits=None``.
.. note::
DQ masks (if used), *will be* combined with user masks specified
in the `scimask1` and `scimask2` parameters (if any).
rpt_clean : int
An integer indicating how many *additional* times stripe cleaning
should be performed on the input image. Default = 0.
atol : float, None
The threshold for maximum absolute value of bias stripe correction
below which repeated cleanings can stop. When `atol` is `None`
cleaning will be repeated `rpt_clean` number of times.
Default = 0.01 [e].
cte_correct : bool
Perform CTE correction.
verbose : bool
Print informational messages. Default = True.
Raises
------
ImportError
``stsci.tools`` not found.
IOError
Input file does not exist.
ValueError
Invalid header values or CALACS version.
"""
# Optional package dependencies
from stsci.tools import parseinput
try:
from stsci.tools.bitmask import interpret_bit_flags
except ImportError:
from stsci.tools.bitmask import (
interpret_bits_value as interpret_bit_flags
)
# process input file(s) and if we have multiple input files - recursively
# call acs_destripe_plus for each input image:
flist = parseinput.parseinput(inputfile)[0]
if isinstance(scimask1, str):
mlist1 = parseinput.parseinput(scimask1)[0]
elif isinstance(scimask1, np.ndarray):
mlist1 = [scimask1.copy()]
elif scimask1 is None:
mlist1 = []
elif isinstance(scimask1, list):
mlist1 = []
for m in scimask1:
if isinstance(m, np.ndarray):
mlist1.append(m.copy())
elif isinstance(m, str):
mlist1 += parseinput.parseinput(m)[0]
else:
raise TypeError("'scimask1' must be a list of str or "
"numpy.ndarray values.")
else:
raise TypeError("'scimask1' must be either a str, or a "
"numpy.ndarray, or a list of the two type of "
"values.")
if isinstance(scimask2, str):
mlist2 = parseinput.parseinput(scimask2)[0]
elif isinstance(scimask2, np.ndarray):
mlist2 = [scimask2.copy()]
elif scimask2 is None:
mlist2 = []
elif isinstance(scimask2, list):
mlist2 = []
for m in scimask2:
if isinstance(m, np.ndarray):
mlist2.append(m.copy())
elif isinstance(m, str):
mlist2 += parseinput.parseinput(m)[0]
else:
raise TypeError("'scimask2' must be a list of str or "
"numpy.ndarray values.")
else:
raise TypeError("'scimask2' must be either a str, or a "
"numpy.ndarray, or a list of the two type of "
"values.")
n_input = len(flist)
n_mask1 = len(mlist1)
n_mask2 = len(mlist2)
if n_input == 0:
raise ValueError(
'No input file(s) provided or the file(s) do not exist')
if n_mask1 == 0:
mlist1 = [None] * n_input
elif n_mask1 != n_input:
raise ValueError('Insufficient masks for [SCI,1]')
if n_mask2 == 0:
mlist2 = [None] * n_input
elif n_mask2 != n_input:
raise ValueError('Insufficient masks for [SCI,2]')
if n_input > 1:
for img, mf1, mf2 in zip(flist, mlist1, mlist2):
destripe_plus(
inputfile=img, suffix=suffix, stat=stat,
lower=lower, upper=upper, binwidth=binwidth,
maxiter=maxiter, sigrej=sigrej,
scimask1=scimask1, scimask2=scimask2, dqbits=dqbits,
cte_correct=cte_correct, clobber=clobber, verbose=verbose
)
return
inputfile = flist[0]
scimask1 = mlist1[0]
scimask2 = mlist2[0]
# verify that the RAW image exists in cwd
cwddir = os.getcwd()
if not os.path.exists(os.path.join(cwddir, inputfile)):
raise IOError("{0} does not exist.".format(inputfile))
# get image's primary header:
header = fits.getheader(inputfile)
# verify masks defined (or not) simultaneously:
if (header['CCDAMP'] == 'ABCD' and
((scimask1 is not None and scimask2 is None) or
(scimask1 is None and scimask2 is not None))):
raise ValueError("Both 'scimask1' and 'scimask2' must be specified "
"or not specified together.")
calacs_str = subprocess.check_output(['calacs.e', '--version']).split()[0]
calacs_ver = [int(x) for x in calacs_str.decode().split('.')]
if calacs_ver < [8, 3, 1]:
raise ValueError('CALACS {0} is incomptible. '
'Must be 8.3.1 or later.'.format(calacs_str))
# check date for post-SM4 and if supported subarray or full frame
is_subarray = False
ctecorr = header['PCTECORR']
aperture = header['APERTURE']
detector = header['DETECTOR']
date_obs = Time(header['DATE-OBS'])
# intermediate filenames
blvtmp_name = inputfile.replace('raw', 'blv_tmp')
blctmp_name = inputfile.replace('raw', 'blc_tmp')
# output filenames
tra_name = inputfile.replace('_raw.fits', '.tra')
flt_name = inputfile.replace('raw', 'flt')
flc_name = inputfile.replace('raw', 'flc')
if detector != 'WFC':
raise ValueError("{0} is not a WFC image, please check the 'DETECTOR'"
" keyword.".format(inputfile))
if date_obs < SM4_DATE:
raise ValueError(
"{0} is a pre-SM4 image.".format(inputfile))
if header['SUBARRAY'] and cte_correct:
if aperture in SUBARRAY_LIST:
is_subarray = True
else:
LOG.warning('Using non-supported subarray, '
'turning CTE correction off')
cte_correct = False
# delete files from previous CALACS runs
if clobber:
for tmpfilename in [blvtmp_name, blctmp_name, flt_name, flc_name,
tra_name]:
if os.path.exists(tmpfilename):
os.remove(tmpfilename)
# run ACSCCD on RAW
acsccd.acsccd(inputfile)
# modify user mask with DQ masks if requested
dqbits = interpret_bit_flags(dqbits)
if dqbits is not None:
# save 'tra' file in memory to trick the log file
# not to save first acs2d log as this is done only
# for the purpose of obtaining DQ masks.
# WISH: it would have been nice is there was an easy way of obtaining
# just the DQ masks as if data were calibrated but without
# having to recalibrate them with acs2d.
if os.path.isfile(tra_name):
with open(tra_name) as fh:
tra_lines = fh.readlines()
else:
tra_lines = None
# apply flats, etc.
acs2d.acs2d(blvtmp_name, verbose=False, quiet=True)
# extract DQ arrays from the FLT image:
dq1, dq2 = _read_DQ_arrays(flt_name)
mask1 = _get_mask(scimask1, 1)
scimask1 = acs_destripe._mergeUserMaskAndDQ(dq1, mask1, dqbits)
mask2 = _get_mask(scimask2, 2)
if dq2 is not None:
scimask2 = acs_destripe._mergeUserMaskAndDQ(dq2, mask2, dqbits)
elif mask2 is None:
scimask2 = None
# reconstruct trailer file:
if tra_lines is not None:
with open(tra_name, mode='w') as fh:
fh.writelines(tra_lines)
# delete temporary FLT image:
if os.path.isfile(flt_name):
os.remove(flt_name)
# execute destriping (post-SM4 data only)
acs_destripe.clean(
blvtmp_name, suffix, stat=stat, maxiter=maxiter, sigrej=sigrej,
lower=lower, upper=upper, binwidth=binwidth,
mask1=scimask1, mask2=scimask2, dqbits=dqbits,
rpt_clean=rpt_clean, atol=atol, clobber=clobber, verbose=verbose)
blvtmpsfx = 'blv_tmp_{0}'.format(suffix)
os.rename(inputfile.replace('raw', blvtmpsfx), blvtmp_name)
# update subarray header
if is_subarray and cte_correct:
fits.setval(blvtmp_name, 'PCTECORR', value='PERFORM')
ctecorr = 'PERFORM'
# perform CTE correction on destriped image
if cte_correct:
if ctecorr == 'PERFORM':
acscte.acscte(blvtmp_name)
else:
LOG.warning(
"PCTECORR={0}, cannot run CTE correction".format(ctecorr))
cte_correct = False
# run ACS2D to get FLT and FLC images
acs2d.acs2d(blvtmp_name)
if cte_correct:
acs2d.acs2d(blctmp_name)
# delete intermediate files
os.remove(blvtmp_name)
if cte_correct and os.path.isfile(blctmp_name):
os.remove(blctmp_name)
info_str = 'Done.\nFLT: {0}\n'.format(flt_name)
if cte_correct:
info_str += 'FLC: {0}\n'.format(flc_name)
LOG.info(info_str)
|
r"""Calibrate post-SM4 ACS/WFC exposure(s) and use
standalone :ref:`acsdestripe`.
This takes a RAW image and generates a FLT file containing
its calibrated and destriped counterpart.
If CTE correction is performed, FLC will also be present.
Parameters
----------
inputfile : str or list of str
Input filenames in one of these formats:
* a Python list of filenames
* a partial filename with wildcards ('\*raw.fits')
* filename of an ASN table ('j12345670_asn.fits')
* an at-file (``@input``)
suffix : str
The string to use to add to each input file name to
indicate an output product of ``acs_destripe``.
This only affects the intermediate output file that will
be automatically renamed to ``*blv_tmp.fits`` during the processing.
stat : { 'pmode1', 'pmode2', 'mean', 'mode', 'median', 'midpt' } (Default = 'pmode1')
Specifies the statistics to be used for computation of the
background in image rows:
* 'pmode1' - SEXTRACTOR-like mode estimate based on a
modified `Pearson's rule <http://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_:
``2.5*median-1.5*mean``;
* 'pmode2' - mode estimate based on
`Pearson's rule <http://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_:
``3*median-2*mean``;
* 'mean' - the mean of the distribution of the "good" pixels (after
clipping, masking, etc.);
* 'mode' - the mode of the distribution of the "good" pixels;
* 'median' - the median of the distribution of the "good" pixels;
* 'midpt' - estimate of the median of the distribution of the "good"
pixels based on an algorithm similar to IRAF's `imagestats` task
(``CDF(midpt)=1/2``).
.. note::
The midpoint and mode are computed in two passes through the
image. In the first pass the standard deviation of the pixels
is calculated and used with the *binwidth* parameter to compute
the resolution of the data histogram. The midpoint is estimated
by integrating the histogram and computing by interpolation
the data value at which exactly half the pixels are below that
data value and half are above it. The mode is computed by
locating the maximum of the data histogram and fitting the peak
by parabolic interpolation.
maxiter : int
This parameter controls the maximum number of iterations
to perform when computing the statistics used to compute the
row-by-row corrections.
sigrej : float
This parameters sets the sigma level for the rejection applied
during each iteration of statistics computations for the
row-by-row corrections.
lower : float, None (Default = None)
Lower limit of usable pixel values for computing the background.
This value should be specified in the units of the input image(s).
upper : float, None (Default = None)
Upper limit of usable pixel values for computing the background.
This value should be specified in the units of the input image(s).
binwidth : float (Default = 0.1)
Histogram's bin width, in sigma units, used to sample the
distribution of pixel brightness values in order to compute the
background statistics. This parameter is aplicable *only* to *stat*
parameter values of `'mode'` or `'midpt'`.
clobber : bool
Specify whether or not to 'clobber' (delete then replace)
previously generated products with the same names.
scimask1 : str or list of str
Mask images for *calibrated* ``SCI,1``, one for each input file.
Pixels with zero values will be masked out, in addition to clipping.
scimask2 : str or list of str
Mask images for *calibrated* ``SCI,2``, one for each input file.
Pixels with zero values will be masked out, in addition to clipping.
This is not used for subarrays.
dqbits : int, str, None (Default = None)
Integer sum of all the DQ bit values from the input image's DQ array
that should be considered "good" when building masks for de-striping
computations. For example, if pixels in the DQ array can be
combinations of 1, 2, 4, and 8 flags and one wants to consider
DQ "defects" having flags 2 and 4 as being acceptable for de-striping
computations, then `dqbits` should be set to 2+4=6. Then a DQ pixel
having values 2,4, or 6 will be considered a good pixel, while a DQ
pixel with a value, e.g., 1+2=3, 4+8=12, etc. will be flagged
as a "bad" pixel.
Alternatively, one can enter a comma- or '+'-separated list of
integer bit flags that should be added to obtain the final
"good" bits. For example, both ``4,8`` and ``4+8`` are equivalent to
setting `dqbits` to 12.
| Set `dqbits` to 0 to make *all* non-zero pixels in the DQ
mask to be considered "bad" pixels, and the corresponding image
pixels not to be used for de-striping computations.
| Default value (`None`) will turn off the use of image's DQ array
for de-striping computations.
| In order to reverse the meaning of the `dqbits`
parameter from indicating values of the "good" DQ flags
to indicating the "bad" DQ flags, prepend '~' to the string
value. For example, in order not to use pixels with
DQ flags 4 and 8 for sky computations and to consider
as "good" all other pixels (regardless of their DQ flag),
set `dqbits` to ``~4+8``, or ``~4,8``. To obtain the
same effect with an `int` input value (except for 0),
enter -(4+8+1)=-9. Following this convention,
a `dqbits` string value of ``'~0'`` would be equivalent to
setting ``dqbits=None``.
.. note::
DQ masks (if used), *will be* combined with user masks specified
in the `scimask1` and `scimask2` parameters (if any).
rpt_clean : int
An integer indicating how many *additional* times stripe cleaning
should be performed on the input image. Default = 0.
atol : float, None
The threshold for maximum absolute value of bias stripe correction
below which repeated cleanings can stop. When `atol` is `None`
cleaning will be repeated `rpt_clean` number of times.
Default = 0.01 [e].
cte_correct : bool
Perform CTE correction.
verbose : bool
Print informational messages. Default = True.
Raises
------
ImportError
``stsci.tools`` not found.
IOError
Input file does not exist.
ValueError
Invalid header values or CALACS version.
|
entailment
|
def run(configobj=None):
"""TEAL interface for :func:`destripe_plus`."""
destripe_plus(
configobj['input'],
suffix=configobj['suffix'],
stat=configobj['stat'],
maxiter=configobj['maxiter'],
sigrej=configobj['sigrej'],
lower=configobj['lower'],
upper=configobj['upper'],
binwidth=configobj['binwidth'],
scimask1=configobj['scimask1'],
scimask2=configobj['scimask2'],
dqbits=configobj['dqbits'],
rpt_clean=configobj['rpt_clean'],
atol=configobj['atol'],
cte_correct=configobj['cte_correct'],
clobber=configobj['clobber'],
verbose=configobj['verbose'])
|
TEAL interface for :func:`destripe_plus`.
|
entailment
|
def main():
"""Command line driver."""
import argparse
# Parse input parameters
parser = argparse.ArgumentParser(
prog=__taskname__,
description=(
'Run CALACS and standalone acs_destripe script on given post-SM4 '
'ACS/WFC RAW full-frame or supported subarray image.'))
parser.add_argument(
'arg0', metavar='input', type=str, help='Input file')
parser.add_argument(
'--suffix', type=str, default='strp',
help='Output suffix for acs_destripe')
parser.add_argument(
'--stat', type=str, default='pmode1', help='Background statistics')
parser.add_argument(
'--maxiter', type=int, default=15, help='Max #iterations')
parser.add_argument(
'--sigrej', type=float, default=2.0, help='Rejection sigma')
parser.add_argument(
'--lower', nargs='?', type=float, default=None,
help='Lower limit for "good" pixels.')
parser.add_argument(
'--upper', nargs='?', type=float, default=None,
help='Upper limit for "good" pixels.')
parser.add_argument(
'--binwidth', type=float, default=0.1,
help='Bin width for distribution sampling.')
parser.add_argument(
'--sci1_mask', nargs=1, type=str, default=None,
help='Mask image for calibrated [SCI,1]')
parser.add_argument(
'--sci2_mask', nargs=1, type=str, default=None,
help='Mask image for calibrated [SCI,2]')
parser.add_argument(
'--dqbits', nargs='?', type=str, default=None,
help='DQ bits to be considered "good".')
parser.add_argument(
'--rpt_clean', type=int, default=0,
help='Number of *repeated* bias de-stripes to perform.')
parser.add_argument(
'--atol', nargs='?', type=float, default=0.01,
help='Absolute tolerance to stop *repeated* bias de-stripes.')
parser.add_argument(
'--nocte', action='store_true', help='Turn off CTE correction.')
parser.add_argument(
'--clobber', action='store_true', help='Clobber output')
parser.add_argument(
'-q', '--quiet', action="store_true",
help='Do not print informational messages')
parser.add_argument(
'--version', action='version',
version='{0} v{1} ({2})'.format(__taskname__, __version__, __vdate__))
options = parser.parse_args()
if options.sci1_mask:
mask1 = options.sci1_mask[0]
else:
mask1 = options.sci1_mask
if options.sci2_mask:
mask2 = options.sci2_mask[0]
else:
mask2 = options.sci2_mask
destripe_plus(options.arg0, suffix=options.suffix,
maxiter=options.maxiter, sigrej=options.sigrej,
lower=options.lower, upper=options.upper,
binwidth=options.binwidth,
scimask1=mask1, scimask2=mask2, dqbits=options.dqbits,
rpt_clean=options.rpt_clean, atol=options.atol,
cte_correct=not options.nocte, clobber=options.clobber,
verbose=not options.quiet)
|
Command line driver.
|
entailment
|
def get_search_results(self, request, queryset, search_term):
"""
Returns a tuple containing a queryset to implement the search,
and a boolean indicating if the results may contain duplicates.
"""
# Apply keyword searches.
def construct_search(field_name):
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__search" % field_name[1:]
else:
return "%s__icontains" % field_name
use_distinct = False
if self.search_fields and search_term:
orm_lookups = [construct_search(str(search_field))
for search_field in self.search_fields]
for bit in search_term.split():
or_queries = [models.Q(**{orm_lookup: bit})
for orm_lookup in orm_lookups]
queryset = queryset.filter(reduce(operator.or_, or_queries))
if not use_distinct:
for search_spec in orm_lookups:
if lookup_needs_distinct(self.opts, search_spec):
use_distinct = True
break
return queryset, use_distinct
|
Returns a tuple containing a queryset to implement the search,
and a boolean indicating if the results may contain duplicates.
|
entailment
|
def get_filters_params(self, params=None):
"""
Returns all params except IGNORED_PARAMS
"""
if not params:
params = self.params
lookup_params = params.copy() # a dictionary of the query string
# Remove all the parameters that are globally and systematically
# ignored.
for ignored in IGNORED_PARAMS:
if ignored in lookup_params:
del lookup_params[ignored]
return lookup_params
|
Returns all params except IGNORED_PARAMS
|
entailment
|
def get_ordering_field(self, field_name):
"""
Returns the proper model field name corresponding to the given
field_name to use for ordering. field_name may either be the name of a
proper model field or the name of a method (on the admin or model) or a
callable with the 'admin_order_field' attribute. Returns None if no
proper model field name can be matched.
"""
try:
field = self.opts.get_field(field_name)
return field.name
except FieldDoesNotExist:
# See whether field_name is a name of a non-field
# that allows sorting.
if callable(field_name):
attr = field_name
elif hasattr(self.model_admin, field_name):
attr = getattr(self.model_admin, field_name)
else:
attr = getattr(self.model, field_name)
return getattr(attr, 'admin_order_field', None)
|
Returns the proper model field name corresponding to the given
field_name to use for ordering. field_name may either be the name of a
proper model field or the name of a method (on the admin or model) or a
callable with the 'admin_order_field' attribute. Returns None if no
proper model field name can be matched.
|
entailment
|
def get_ordering(self, request, queryset):
"""
Returns the list of ordering fields for the change list.
First we check the get_ordering() method in model admin, then we check
the object's default ordering. Then, any manually-specified ordering
from the query string overrides anything. Finally, a deterministic
order is guaranteed by ensuring the primary key is used as the last
ordering field.
"""
params = self.params
ordering = list(self.get_default_ordering(request))
if ORDER_VAR in params:
# Clear ordering and used params
ordering = []
order_params = params[ORDER_VAR].split('.')
for p in order_params:
try:
none, pfx, idx = p.rpartition('-')
field_name = self.list_display[int(idx)]
order_field = self.get_ordering_field(field_name)
if not order_field:
continue # No 'admin_order_field', skip it
# reverse order if order_field has already "-" as prefix
if order_field.startswith('-') and pfx == "-":
ordering.append(order_field[1:])
else:
ordering.append(pfx + order_field)
except (IndexError, ValueError):
continue # Invalid ordering specified, skip it.
# Add the given query's ordering fields, if any.
ordering.extend(queryset.query.order_by)
# Ensure that the primary key is systematically present in the list of
# ordering fields so we can guarantee a deterministic order across all
# database backends.
pk_name = self.opts.pk.name
if not (set(ordering) & {'pk', '-pk', pk_name, '-' + pk_name}):
# The two sets do not intersect, meaning the pk isn't present. So
# we add it.
ordering.append('-pk')
return ordering
|
Returns the list of ordering fields for the change list.
First we check the get_ordering() method in model admin, then we check
the object's default ordering. Then, any manually-specified ordering
from the query string overrides anything. Finally, a deterministic
order is guaranteed by ensuring the primary key is used as the last
ordering field.
|
entailment
|
def get_ordering_field_columns(self):
"""
Returns an OrderedDict of ordering field column numbers and asc/desc
"""
# We must cope with more than one column having the same underlying
# sort field, so we base things on column numbers.
ordering = self._get_default_ordering()
ordering_fields = OrderedDict()
if ORDER_VAR not in self.params:
# for ordering specified on model_admin or model Meta, we don't
# know the right column numbers absolutely, because there might be
# morr than one column associated with that ordering, so we guess.
for field in ordering:
if field.startswith('-'):
field = field[1:]
order_type = 'desc'
else:
order_type = 'asc'
for index, attr in enumerate(self.list_display):
if self.get_ordering_field(attr) == field:
ordering_fields[index] = order_type
break
else:
for p in self.params[ORDER_VAR].split('.'):
none, pfx, idx = p.rpartition('-')
try:
idx = int(idx)
except ValueError:
continue # skip it
ordering_fields[idx] = 'desc' if pfx == '-' else 'asc'
return ordering_fields
|
Returns an OrderedDict of ordering field column numbers and asc/desc
|
entailment
|
def get_field_label(self, field_name, field=None):
""" Return a label to display for a field """
label = None
if field is not None:
label = getattr(field, 'verbose_name', None)
if label is None:
label = getattr(field, 'name', None)
if label is None:
label = field_name
return label.capitalize()
|
Return a label to display for a field
|
entailment
|
def get_field_display_value(self, field_name, field=None):
""" Return a display value for a field """
"""
Firstly, check for a 'get_fieldname_display' property/method on
the model, and return the value of that, if present.
"""
val_funct = getattr(self.instance, 'get_%s_display' % field_name, None)
if val_funct is not None:
if callable(val_funct):
return val_funct()
return val_funct
"""
Secondly, if we have a real field, we can try to display something
more useful for it.
"""
if field is not None:
try:
field_type = field.get_internal_type()
if (
field_type == 'ForeignKey' and
field.related_model == get_image_model()
):
# The field is an image
return self.get_image_field_display(field_name, field)
if (
field_type == 'ForeignKey' and
field.related_model == Document
):
# The field is a document
return self.get_document_field_display(field_name, field)
except AttributeError:
pass
"""
Resort to getting the value of 'field_name' from the instance.
"""
return getattr(self.instance, field_name,
self.model_admin.get_empty_value_display())
|
Return a display value for a field
|
entailment
|
def get_image_field_display(self, field_name, field):
""" Render an image """
image = getattr(self.instance, field_name)
if image:
fltr, _ = Filter.objects.get_or_create(spec='max-400x400')
rendition = image.get_rendition(fltr)
return rendition.img_tag
return self.model_admin.get_empty_value_display()
|
Render an image
|
entailment
|
def get_document_field_display(self, field_name, field):
""" Render a link to a document """
document = getattr(self.instance, field_name)
if document:
return mark_safe(
'<a href="%s">%s <span class="meta">(%s, %s)</span></a>' % (
document.url,
document.title,
document.file_extension.upper(),
filesizeformat(document.file.size),
)
)
return self.model_admin.get_empty_value_display()
|
Render a link to a document
|
entailment
|
def get_dict_for_field(self, field_name):
"""
Return a dictionary containing `label` and `value` values to display
for a field.
"""
try:
field = self.model._meta.get_field(field_name)
except FieldDoesNotExist:
field = None
return {
'label': self.get_field_label(field_name, field),
'value': self.get_field_display_value(field_name, field),
}
|
Return a dictionary containing `label` and `value` values to display
for a field.
|
entailment
|
def get_fields_dict(self):
"""
Return a list of `label`/`value` dictionaries to represent the
fiels named by the model_admin class's `get_inspect_view_fields` method
"""
fields = []
for field_name in self.model_admin.get_inspect_view_fields():
fields.append(self.get_dict_for_field(field_name))
return fields
|
Return a list of `label`/`value` dictionaries to represent the
fiels named by the model_admin class's `get_inspect_view_fields` method
|
entailment
|
def items_for_result(view, result):
"""
Generates the actual list of data.
"""
model_admin = view.model_admin
for field_name in view.list_display:
empty_value_display = model_admin.get_empty_value_display()
row_classes = ['field-%s' % field_name]
try:
f, attr, value = lookup_field(field_name, result, model_admin)
except ObjectDoesNotExist:
result_repr = empty_value_display
else:
empty_value_display = getattr(attr, 'empty_value_display', empty_value_display)
if f is None or f.auto_created:
allow_tags = getattr(attr, 'allow_tags', False)
boolean = getattr(attr, 'boolean', False)
if boolean or not value:
allow_tags = True
if django.VERSION >= (1, 9):
result_repr = display_for_value(value, empty_value_display, boolean)
else:
result_repr = display_for_value(value, boolean)
# Strip HTML tags in the resulting text, except if the
# function has an "allow_tags" attribute set to True.
if allow_tags:
result_repr = mark_safe(result_repr)
if isinstance(value, (datetime.date, datetime.time)):
row_classes.append('nowrap')
else:
if isinstance(f, models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = empty_value_display
else:
result_repr = field_val
else:
if django.VERSION >= (1, 9):
result_repr = display_for_field(value, f, empty_value_display)
else:
result_repr = display_for_field(value, f)
if isinstance(f, (models.DateField, models.TimeField, models.ForeignKey)):
row_classes.append('nowrap')
if force_text(result_repr) == '':
result_repr = mark_safe(' ')
row_classes.extend(model_admin.get_extra_class_names_for_field_col(field_name, result))
row_attributes_dict = model_admin.get_extra_attrs_for_field_col(field_name, result)
row_attributes_dict['class'] = ' ' . join(row_classes)
row_attributes = ''.join(' %s="%s"' % (key, val) for key, val in row_attributes_dict.items())
row_attributes_safe = mark_safe(row_attributes)
yield format_html('<td{}>{}</td>', row_attributes_safe, result_repr)
|
Generates the actual list of data.
|
entailment
|
def result_list(context):
"""
Displays the headers and data list together
"""
view = context['view']
object_list = context['object_list']
headers = list(result_headers(view))
num_sorted_fields = 0
for h in headers:
if h['sortable'] and h['sorted']:
num_sorted_fields += 1
context.update({
'result_headers': headers,
'num_sorted_fields': num_sorted_fields,
'results': list(results(view, object_list))})
return context
|
Displays the headers and data list together
|
entailment
|
def run(configobj=None):
"""
TEAL interface for the `acssum` function.
"""
acssum(configobj['input'],
configobj['output'],
exec_path=configobj['exec_path'],
time_stamps=configobj['time_stamps'],
verbose=configobj['verbose'],
quiet=configobj['quiet']
)
|
TEAL interface for the `acssum` function.
|
entailment
|
def _detsat_one(filename, ext, sigma=2.0, low_thresh=0.1, h_thresh=0.5,
small_edge=60, line_len=200, line_gap=75,
percentile=(4.5, 93.0), buf=200, plot=False, verbose=False):
"""Called by :func:`detsat`."""
if verbose:
t_beg = time.time()
fname = '{0}[{1}]'.format(filename, ext)
# check extension
if ext not in (1, 4, 'SCI', ('SCI', 1), ('SCI', 2)):
warnings.warn('{0} is not a valid science extension for '
'ACS/WFC'.format(ext), AstropyUserWarning)
# get the data
image = fits.getdata(filename, ext)
# image = im.astype('float64')
# rescale the image
p1, p2 = np.percentile(image, percentile)
# there should always be some counts in the image, anything lower should
# be set to one. Makes things nicer for finding edges.
if p1 < 0:
p1 = 0.0
if verbose:
print('Rescale intensity percentiles: {0}, {1}'.format(p1, p2))
image = exposure.rescale_intensity(image, in_range=(p1, p2))
# get the edges
immax = np.max(image)
edge = canny(image, sigma=sigma,
low_threshold=immax * low_thresh,
high_threshold=immax * h_thresh)
# clean up the small objects, will make less noise
morph.remove_small_objects(edge, min_size=small_edge, connectivity=8,
in_place=True)
# create an array of angles from 0 to 180, exactly 0 will get bad columns
# but it is unlikely that a satellite will be exactly at 0 degrees, so
# don't bother checking.
# then, convert to radians.
angle = np.radians(np.arange(2, 178, 0.5, dtype=float))
# perform Hough Transform to detect straight lines.
# only do if plotting to visualize the image in hough space.
# otherwise just preform a Probabilistic Hough Transform.
if plot and plt is not None:
h, theta, d = transform.hough_line(edge, theta=angle)
plt.ion()
# perform Probabilistic Hough Transformation to get line segments.
# NOTE: Results are slightly different from run to run!
result = transform.probabilistic_hough_line(
edge, threshold=210, line_length=line_len,
line_gap=line_gap, theta=angle)
result = np.asarray(result)
n_result = len(result)
# initially assume there is no satellite
satellite = False
# only continue if there was more than one point (at least a line)
# returned from the PHT
if n_result > 1:
if verbose:
print('Length of PHT result: {0}'.format(n_result))
# create lists for X and Y positions of lines and build points
x0 = result[:, 0, 0]
y0 = result[:, 0, 1]
x1 = result[:, 1, 0]
y1 = result[:, 1, 1]
# set some boundries
ymax, xmax = image.shape
topx = xmax - buf
topy = ymax - buf
if verbose:
print('min(x0)={0:4d}, min(x1)={1:4d}, min(y0)={2:4d}, '
'min(y1)={3:4d}'.format(min(x0), min(x1), min(y0), min(y1)))
print('max(x0)={0:4d}, max(x1)={1:4d}, max(y0)={2:4d}, '
'max(y1)={3:4d}'.format(max(x0), max(x1), max(y0), max(y1)))
print('buf={0}'.format(buf))
print('topx={0}, topy={1}'.format(topx, topy))
# set up trail angle "tracking" arrays.
# find the angle of each segment and filter things out.
# TODO: this may be wrong. Try using arctan2.
trail_angle = np.degrees(np.arctan((y1 - y0) / (x1 - x0)))
# round to the nearest 5 degrees, trail should not be that curved
round_angle = (5 * np.round(trail_angle * 0.2)).astype(int)
# take out 90 degree things
mask = round_angle % 90 != 0
if not np.any(mask):
if verbose:
print('No round_angle found')
return np.empty(0)
round_angle = round_angle[mask]
trail_angle = trail_angle[mask]
result = result[mask]
ang, num = stats.mode(round_angle)
# do the filtering
truth = round_angle == ang[0]
if verbose:
print('trail_angle: {0}'.format(trail_angle))
print('round_angle: {0}'.format(round_angle))
print('mode(round_angle): {0}'.format(ang[0]))
# filter out the outliers
trail_angle = trail_angle[truth]
result = result[truth]
n_result = len(result)
if verbose:
print('Filtered trail_angle: {0}'.format(trail_angle))
if n_result < 1:
return np.empty(0)
# if there is an unreasonable amount of points, it picked up garbage
elif n_result > 300:
warnings.warn(
'Way too many segments results to be correct ({0}). '
'Rejecting detection on {1}.'.format(n_result, fname),
AstropyUserWarning)
return np.empty(0)
# remake the point lists with things taken out
x0 = result[:, 0, 0]
y0 = result[:, 0, 1]
x1 = result[:, 1, 0]
y1 = result[:, 1, 1]
min_x0 = min(x0)
min_y0 = min(y0)
min_x1 = min(x1)
min_y1 = min(y1)
max_x0 = max(x0)
max_y0 = max(y0)
max_x1 = max(x1)
max_y1 = max(y1)
mean_angle = np.mean(trail_angle)
# make decisions on where the trail went and determine if a trail
# traversed the image
# top to bottom
if (((min_y0 < buf) or (min_y1 < buf)) and
((max_y0 > topy) or (max_y1 > topy))):
satellite = True
if verbose:
print('Trail Direction: Top to Bottom')
# right to left
elif (((min_x0 < buf) or (min_x1 < buf)) and
((max_x0 > topx) or (max_x1 > topx))):
satellite = True
if verbose:
print('Trail Direction: Right to Left')
# bottom to left
elif (((min_x0 < buf) or (min_x1 < buf)) and
((min_y0 < buf) or (min_y1 < buf)) and
(-1 > mean_angle > -89)):
satellite = True
if verbose:
print('Trail Direction: Bottom to Left')
# top to left
elif (((min_x0 < buf) or (min_x1 < buf)) and
((max_y0 > topy) or (max_y1 > topy)) and
(89 > mean_angle > 1)):
satellite = True
if verbose:
print('Trail Direction: Top to Left')
# top to right
elif (((max_x0 > topx) or (max_x1 > topx)) and
((max_y0 > topy) or (max_y1 > topy)) and
(-1 > mean_angle > -89)):
satellite = True
if verbose:
print('Trail Direction: Top to Right')
# bottom to right
elif (((max_x0 > topx) or (max_x1 > topx)) and
((min_y0 < buf) or (min_y1 < buf)) and
(89 > mean_angle > 1)):
satellite = True
if verbose:
print('Trail Direction: Bottom to Right')
if satellite:
if verbose:
print('{0} trail segment(s) detected'.format(n_result))
print('Trail angle list (not returned): ')
print(trail_angle)
print('End point list:')
for i, ((px0, py0), (px1, py1)) in enumerate(result, 1):
print('{0:5d}. ({1:4d}, {2:4d}), ({3:4d}, {4:4d})'.format(
i, px0, py0, px1, py1))
if plot and plt is not None:
mean = np.median(image)
stddev = image.std()
lower = mean - stddev
upper = mean + stddev
fig1, ax1 = plt.subplots()
ax1.imshow(edge, cmap=plt.cm.gray)
ax1.set_title('Edge image for {0}'.format(fname))
for (px0, py0), (px1, py1) in result: # Draw trails
ax1.plot((px0, px1), (py0, py1), scalex=False, scaley=False)
fig2, ax2 = plt.subplots()
ax2.imshow(
np.log(1 + h),
extent=(np.rad2deg(theta[-1]), np.rad2deg(theta[0]),
d[-1], d[0]), aspect=0.02)
ax2.set_title('Hough Transform')
ax2.set_xlabel('Angles (degrees)')
ax2.set_ylabel('Distance from Origin (pixels)')
fig3, ax3 = plt.subplots()
ax3.imshow(image, vmin=lower, vmax=upper, cmap=plt.cm.gray)
ax3.set_title(fname)
for (px0, py0), (px1, py1) in result: # Draw trails
ax3.plot((px0, px1), (py0, py1), scalex=False, scaley=False)
plt.draw()
else: # length of result was too small
result = np.empty(0)
if verbose:
print('No trail detected; found {0} segments'.format(n_result))
if plot and plt is not None:
fig1, ax1 = plt.subplots()
ax1.imshow(edge, cmap=plt.cm.gray)
ax1.set_title(fname)
# Draw trails
for (px0, py0), (px1, py1) in result:
ax1.plot((px0, px1), (py0, py1), scalex=False, scaley=False)
if verbose:
t_end = time.time()
print('Run time: {0} s'.format(t_end - t_beg))
return result
|
Called by :func:`detsat`.
|
entailment
|
def _get_valid_indices(shape, ix0, ix1, iy0, iy1):
"""Give array shape and desired indices, return indices that are
correctly bounded by the shape."""
ymax, xmax = shape
if ix0 < 0:
ix0 = 0
if ix1 > xmax:
ix1 = xmax
if iy0 < 0:
iy0 = 0
if iy1 > ymax:
iy1 = ymax
if iy1 <= iy0 or ix1 <= ix0:
raise IndexError(
'array[{0}:{1},{2}:{3}] is invalid'.format(iy0, iy1, ix0, ix1))
return list(map(int, [ix0, ix1, iy0, iy1]))
|
Give array shape and desired indices, return indices that are
correctly bounded by the shape.
|
entailment
|
def _rotate_point(point, angle, ishape, rshape, reverse=False):
"""Transform a point from original image coordinates to rotated image
coordinates and back. It assumes the rotation point is the center of an
image.
This works on a simple rotation transformation::
newx = (startx) * np.cos(angle) - (starty) * np.sin(angle)
newy = (startx) * np.sin(angle) + (starty) * np.cos(angle)
It takes into account the differences in image size.
Parameters
----------
point : tuple
Point to be rotated, in the format of ``(x, y)`` measured from
origin.
angle : float
The angle in degrees to rotate the point by as measured
counter-clockwise from the X axis.
ishape : tuple
The shape of the original image, taken from ``image.shape``.
rshape : tuple
The shape of the rotated image, in the form of ``rotate.shape``.
reverse : bool, optional
Transform from rotated coordinates back to non-rotated image.
Returns
-------
rotated_point : tuple
Rotated point in the format of ``(x, y)`` as measured from origin.
"""
# unpack the image and rotated images shapes
if reverse:
angle = (angle * -1)
temp = ishape
ishape = rshape
rshape = temp
# transform into center of image coordinates
yhalf, xhalf = ishape
yrhalf, xrhalf = rshape
yhalf = yhalf / 2
xhalf = xhalf / 2
yrhalf = yrhalf / 2
xrhalf = xrhalf / 2
startx = point[0] - xhalf
starty = point[1] - yhalf
# do the rotation
newx = startx * np.cos(angle) - starty * np.sin(angle)
newy = startx * np.sin(angle) + starty * np.cos(angle)
# add back the padding from changing the size of the image
newx = newx + xrhalf
newy = newy + yrhalf
return (newx, newy)
|
Transform a point from original image coordinates to rotated image
coordinates and back. It assumes the rotation point is the center of an
image.
This works on a simple rotation transformation::
newx = (startx) * np.cos(angle) - (starty) * np.sin(angle)
newy = (startx) * np.sin(angle) + (starty) * np.cos(angle)
It takes into account the differences in image size.
Parameters
----------
point : tuple
Point to be rotated, in the format of ``(x, y)`` measured from
origin.
angle : float
The angle in degrees to rotate the point by as measured
counter-clockwise from the X axis.
ishape : tuple
The shape of the original image, taken from ``image.shape``.
rshape : tuple
The shape of the rotated image, in the form of ``rotate.shape``.
reverse : bool, optional
Transform from rotated coordinates back to non-rotated image.
Returns
-------
rotated_point : tuple
Rotated point in the format of ``(x, y)`` as measured from origin.
|
entailment
|
def make_mask(filename, ext, trail_coords, sublen=75, subwidth=200, order=3,
sigma=4, pad=10, plot=False, verbose=False):
"""Create DQ mask for an image for a given satellite trail.
This mask can be added to existing DQ data using :func:`update_dq`.
.. note::
Unlike :func:`detsat`, multiprocessing is not available for
this function.
Parameters
----------
filename : str
FITS image filename.
ext : int, str, or tuple
Extension for science data, as accepted by ``astropy.io.fits``.
trail_coords : ndarray
One of the trails returned by :func:`detsat`.
This must be in the format of ``[[x0, y0], [x1, y1]]``.
sublen : int, optional
Length of strip to use as the fitting window for the trail.
subwidth : int, optional
Width of box to fit trail on.
order : int, optional
The order of the spline interpolation for image rotation.
See :func:`skimage.transform.rotate`.
sigma : float, optional
Sigma of the satellite trail for detection. If points are
a given sigma above the background in the subregion then it is
marked as a satellite. This may need to be lowered for resolved
trails.
pad : int, optional
Amount of extra padding in pixels to give the satellite mask.
plot : bool, optional
Plot the result.
verbose : bool, optional
Print extra information to the terminal, mostly for debugging.
Returns
-------
mask : ndarray
Boolean array marking the satellite trail with `True`.
Raises
------
ImportError
Missing scipy or skimage>=0.11 packages.
IndexError
Invalid subarray indices.
ValueError
Image has no positive values, trail subarray too small, or
trail profile not found.
"""
if not HAS_OPDEP:
raise ImportError('Missing scipy or skimage>=0.11 packages')
if verbose:
t_beg = time.time()
fname = '{0}[{1}]'.format(filename, ext)
image = fits.getdata(filename, ext)
dx = image.max()
if dx <= 0:
raise ValueError('Image has no positive values')
# rescale the image
image = image / dx
# make sure everything is at least 0
image[image < 0] = 0
(x0, y0), (x1, y1) = trail_coords # p0, p1
# Find out how much to rotate the image
rad = np.arctan2(y1 - y0, x1 - x0)
newrad = (np.pi * 2) - rad
deg = np.degrees(rad)
if verbose:
print('Rotation: {0}'.format(deg))
rotate = transform.rotate(image, deg, resize=True, order=order)
if plot and plt is not None:
plt.ion()
mean = np.median(image)
stddev = image.std()
lower = mean - stddev
upper = mean + stddev
fig1, ax1 = plt.subplots()
ax1.imshow(image, vmin=lower, vmax=upper, cmap=plt.cm.gray)
ax1.set_title(fname)
fig2, ax2 = plt.subplots()
ax2.imshow(rotate, vmin=lower, vmax=upper, cmap=plt.cm.gray)
ax2.set_title('{0} rotated by {1} deg'.format(fname, deg))
plt.draw()
# Will do all of this in the loop, but want to make sure there is a
# good point first and that there is indeed a profile to fit.
# get starting point
sx, sy = _rotate_point((x0, y0), newrad, image.shape, rotate.shape)
# start with one subarray around p0
dx = int(subwidth / 2)
ix0, ix1, iy0, iy1 = _get_valid_indices(
rotate.shape, sx - dx, sx + dx, sy - sublen, sy + sublen)
subr = rotate[iy0:iy1, ix0:ix1]
if len(subr) <= sublen:
raise ValueError('Trail subarray size is {0} but expected {1} or '
'larger'.format(len(subr), sublen))
# Flatten the array so we are looking along rows
# Take median of each row, should filter out most outliers
# This list will get appended in the loop
medarr = np.median(subr, axis=1)
flat = [medarr]
# get the outliers
# mean = biweight_location(medarr)
mean = sigma_clipped_stats(medarr)[0]
stddev = biweight_midvariance(medarr)
# only flag things that are sigma from the mean
z = np.where(medarr > (mean + (sigma * stddev)))[0]
if plot and plt is not None:
fig1, ax1 = plt.subplots()
ax1.plot(medarr, 'b.')
ax1.plot(z, medarr[z], 'r.')
ax1.set_xlabel('Index')
ax1.set_ylabel('Value')
ax1.set_title('Median array in flat[0]')
plt.draw()
# Make sure there is something in the first pass before trying to move on
if len(z) < 1:
raise ValueError(
'First look at finding a profile failed. '
'Nothing found at {0} from background! '
'Adjust parameters and try again.'.format(sigma))
# get the bounds of the flagged points
lower = z.min()
upper = z.max()
diff = upper - lower
# add in a pading value to make sure all of the wings are accounted for
lower = lower - pad
upper = upper + pad
# for plotting see how the profile was made (append to plot above)
if plot and plt is not None:
padind = np.arange(lower, upper)
ax1.plot(padind, medarr[padind], 'yx')
plt.draw()
# start to create a mask
mask = np.zeros(rotate.shape)
lowerx, upperx, lowery, uppery = _get_valid_indices(
mask.shape, np.floor(sx - subwidth), np.ceil(sx + subwidth),
np.floor(sy - sublen + lower), np.ceil(sy - sublen + upper))
mask[lowery:uppery, lowerx:upperx] = 1
done = False
first = True
nextx = upperx # np.ceil(sx + subwidth)
centery = np.ceil(lowery + diff) # np.ceil(sy - sublen + lower + diff)
counter = 0
while not done:
# move to the right of the centerpoint first. do the same
# as above but keep moving right until the edge is hit.
ix0, ix1, iy0, iy1 = _get_valid_indices(
rotate.shape, nextx - dx, nextx + dx,
centery - sublen, centery + sublen)
subr = rotate[iy0:iy1, ix0:ix1]
# determines the edge, if the subr is not good, then the edge was
# hit.
if 0 in subr.shape:
if verbose:
print('Hit edge, subr shape={0}, first={1}'.format(
subr.shape, first))
if first:
first = False
centery = sy
nextx = sx
else:
done = True
continue
medarr = np.median(subr, axis=1)
flat.append(medarr)
# mean = biweight_location(medarr)
mean = sigma_clipped_stats(medarr, sigma=sigma)[0]
# Might give RuntimeWarning
stddev = biweight_midvariance(medarr)
z = np.where(medarr > (mean + (sigma * stddev)))[0]
if len(z) < 1:
if first:
if verbose:
print('No good profile found for counter={0}. Start '
'moving left from starting point.'.format(counter))
centery = sy
nextx = sx
first = False
else:
if verbose:
print('z={0} is less than 1, subr shape={1}, '
'we are done'.format(z, subr.shape))
done = True
continue
# get the bounds of the flagged points
lower = z.min()
upper = z.max()
diff = upper - lower
# add in a pading value to make sure all of the wings
# are accounted for
lower = np.floor(lower - pad)
upper = np.ceil(upper + pad)
lowerx, upperx, lowery, uppery = _get_valid_indices(
mask.shape,
np.floor(nextx - subwidth),
np.ceil(nextx + subwidth),
np.floor(centery - sublen + lower),
np.ceil(centery - sublen + upper))
mask[lowery:uppery, lowerx:upperx] = 1
# lower_p = (lowerx, lowery)
upper_p = (upperx, uppery)
# lower_t = _rotate_point(
# lower_p, newrad, image.shape, rotate.shape, reverse=True)
upper_t = _rotate_point(
upper_p, newrad, image.shape, rotate.shape, reverse=True)
# lowy = np.floor(lower_t[1])
highy = np.ceil(upper_t[1])
# lowx = np.floor(lower_t[0])
highx = np.ceil(upper_t[0])
# Reset the next subr to be at the center of the profile
if first:
nextx = nextx + dx
centery = lowery + diff # centery - sublen + lower + diff
if (nextx + subwidth) > rotate.shape[1]:
if verbose:
print('Hit rotate edge at counter={0}'.format(counter))
first = False
elif (highy > image.shape[0]) or (highx > image.shape[1]):
if verbose:
print('Hit image edge at counter={0}'.format(counter))
first = False
if not first:
centery = sy
nextx = sx
# Not first, this is the pass the other way.
else:
nextx = nextx - dx
centery = lowery + diff # centery - sublen + lower + diff
if (nextx - subwidth) < 0:
if verbose:
print('Hit rotate edge at counter={0}'.format(counter))
done = True
elif (highy > image.shape[0]) or (highx > image.shape[1]):
if verbose:
print('Hit image edge at counter={0}'.format(counter))
done = True
counter += 1
# make sure it does not try to go infinetly
if counter > 500:
if verbose:
print('Too many loops, exiting')
done = True
# End while
rot = transform.rotate(mask, -deg, resize=True, order=1)
ix0 = (rot.shape[1] - image.shape[1]) / 2
iy0 = (rot.shape[0] - image.shape[0]) / 2
lowerx, upperx, lowery, uppery = _get_valid_indices(
rot.shape, ix0, image.shape[1] + ix0, iy0, image.shape[0] + iy0)
mask = rot[lowery:uppery, lowerx:upperx]
if mask.shape != image.shape:
warnings.warn(
'Output mask shape is {0} but input image shape is '
'{1}'.format(mask.shape, image.shape), AstropyUserWarning)
# Change to boolean mask
mask = mask.astype(np.bool)
if plot and plt is not None:
# debugging array
test = image.copy()
test[mask] = 0
mean = np.median(test)
stddev = test.std()
lower = mean - stddev
upper = mean + stddev
fig1, ax1 = plt.subplots()
ax1.imshow(test, vmin=lower, vmax=upper, cmap=plt.cm.gray)
ax1.set_title('Masked image')
fig2, ax2 = plt.subplots()
ax2.imshow(mask, cmap=plt.cm.gray)
ax2.set_title('DQ mask')
plt.draw()
if verbose:
t_end = time.time()
print('Run time: {0} s'.format(t_end - t_beg))
return mask
|
Create DQ mask for an image for a given satellite trail.
This mask can be added to existing DQ data using :func:`update_dq`.
.. note::
Unlike :func:`detsat`, multiprocessing is not available for
this function.
Parameters
----------
filename : str
FITS image filename.
ext : int, str, or tuple
Extension for science data, as accepted by ``astropy.io.fits``.
trail_coords : ndarray
One of the trails returned by :func:`detsat`.
This must be in the format of ``[[x0, y0], [x1, y1]]``.
sublen : int, optional
Length of strip to use as the fitting window for the trail.
subwidth : int, optional
Width of box to fit trail on.
order : int, optional
The order of the spline interpolation for image rotation.
See :func:`skimage.transform.rotate`.
sigma : float, optional
Sigma of the satellite trail for detection. If points are
a given sigma above the background in the subregion then it is
marked as a satellite. This may need to be lowered for resolved
trails.
pad : int, optional
Amount of extra padding in pixels to give the satellite mask.
plot : bool, optional
Plot the result.
verbose : bool, optional
Print extra information to the terminal, mostly for debugging.
Returns
-------
mask : ndarray
Boolean array marking the satellite trail with `True`.
Raises
------
ImportError
Missing scipy or skimage>=0.11 packages.
IndexError
Invalid subarray indices.
ValueError
Image has no positive values, trail subarray too small, or
trail profile not found.
|
entailment
|
def update_dq(filename, ext, mask, dqval=16384, verbose=True):
"""Update the given image and DQ extension with the given
satellite trails mask and flag.
Parameters
----------
filename : str
FITS image filename to update.
ext : int, str, or tuple
DQ extension, as accepted by ``astropy.io.fits``, to update.
mask : ndarray
Boolean mask, with `True` marking the satellite trail(s).
This can be the result(s) from :func:`make_mask`.
dqval : int, optional
DQ value to use for the trail. Default value of 16384 is
tailored for ACS/WFC.
verbose : bool, optional
Print extra information to the terminal.
"""
with fits.open(filename, mode='update') as pf:
dqarr = pf[ext].data
old_mask = (dqval & dqarr) != 0 # Existing flagged trails
new_mask = mask & ~old_mask # Only flag previously unflagged trails
npix_updated = np.count_nonzero(new_mask)
# Update DQ extension only if necessary
if npix_updated > 0:
pf[ext].data[new_mask] += dqval
pf['PRIMARY'].header.add_history('{0} satdet v{1}({2})'.format(
time.ctime(), __version__, __vdate__))
pf['PRIMARY'].header.add_history(
' Updated {0} px in EXT {1} with DQ={2}'.format(
npix_updated, ext, dqval))
if verbose:
fname = '{0}[{1}]'.format(filename, ext)
print('DQ flag value is {0}'.format(dqval))
print('Input... flagged NPIX={0}'.format(np.count_nonzero(mask)))
print('Existing flagged NPIX={0}'.format(np.count_nonzero(old_mask)))
print('Newly... flagged NPIX={0}'.format(npix_updated))
if npix_updated > 0:
print('{0} updated'.format(fname))
else:
print('No updates necessary for {0}'.format(fname))
|
Update the given image and DQ extension with the given
satellite trails mask and flag.
Parameters
----------
filename : str
FITS image filename to update.
ext : int, str, or tuple
DQ extension, as accepted by ``astropy.io.fits``, to update.
mask : ndarray
Boolean mask, with `True` marking the satellite trail(s).
This can be the result(s) from :func:`make_mask`.
dqval : int, optional
DQ value to use for the trail. Default value of 16384 is
tailored for ACS/WFC.
verbose : bool, optional
Print extra information to the terminal.
|
entailment
|
def _satdet_worker(work_queue, done_queue, sigma=2.0, low_thresh=0.1,
h_thresh=0.5, small_edge=60, line_len=200, line_gap=75,
percentile=(4.5, 93.0), buf=200):
"""Multiprocessing worker."""
for fil, chip in iter(work_queue.get, 'STOP'):
try:
result = _detsat_one(
fil, chip, sigma=sigma,
low_thresh=low_thresh, h_thresh=h_thresh,
small_edge=small_edge, line_len=line_len, line_gap=line_gap,
percentile=percentile, buf=buf, plot=False, verbose=False)
except Exception as e:
retcode = False
result = '{0}: {1}'.format(type(e), str(e))
else:
retcode = True
done_queue.put((retcode, fil, chip, result))
return True
|
Multiprocessing worker.
|
entailment
|
def detsat(searchpattern, chips=[1, 4], n_processes=4, sigma=2.0,
low_thresh=0.1, h_thresh=0.5, small_edge=60, line_len=200,
line_gap=75, percentile=(4.5, 93.0), buf=200, plot=False,
verbose=True):
"""Find satellite trails in the given images and extensions.
The trails are calculated using Probabilistic Hough Transform.
.. note::
The trail endpoints found here are crude approximations.
Use :func:`make_mask` to create the actual DQ mask for the trail(s)
of interest.
Parameters
----------
searchpattern : str
Search pattern for input FITS images, as accepted by
:py:func:`glob.glob`.
chips : list
List of extensions for science data, as accepted by
``astropy.io.fits``.
The default values of ``[1, 4]`` are tailored for ACS/WFC.
n_processes : int
Number of processes for multiprocessing, which is only useful
if you are processing a lot of images or extensions.
If 1 is given, no multiprocessing is done.
sigma : float, optional
The size of a Gaussian filter to use before edge detection.
The default is 2, which is good for almost all images.
low_thresh : float, optional
The lower threshold for hysteresis linking of edge pieces.
This should be between 0 and 1, and less than ``h_thresh``.
h_thresh : float, optional
The upper threshold for hysteresis linking of edge pieces.
This should be between 0 and 1, and greater than ``low_thresh``.
small_edge : int, optional
Size of perimeter of small objects to remove in edge image.
This significantly reduces noise before doing Hough Transform.
If it is set too high, you will remove the edge of the
satellite you are trying to find.
line_len : int, optional
Minimum line length for Probabilistic Hough Transform to fit.
line_gap : int, optional
The largest gap in points allowed for the Probabilistic
Hough Transform.
percentile : tuple of float, optional
The percent boundaries to scale the image to before
creating edge image.
buf : int, optional
How close to the edge of the image the satellite trail has to
be to be considered a trail.
plot : bool, optional
Make plots of edge image, Hough space transformation, and
rescaled image. This is only applicable if ``n_processes=1``.
verbose : bool, optional
Print extra information to the terminal, mostly for debugging.
In multiprocessing mode, info from individual process is not printed.
Returns
-------
results : dict
Dictionary mapping ``(filename, ext)`` to an array of endpoints of
line segments in the format of ``[[x0, y0], [x1, y1]]`` (if found) or
an empty array (if not). These are the segments that have been
identified as making up part of a satellite trail.
errors : dict
Dictionary mapping ``(filename, ext)`` to the error message explaining
why processing failed.
Raises
------
ImportError
Missing scipy or skimage>=0.11 packages.
"""
if not HAS_OPDEP:
raise ImportError('Missing scipy or skimage>=0.11 packages')
if verbose:
t_beg = time.time()
files = glob.glob(searchpattern)
n_files = len(files)
n_chips = len(chips)
n_tot = n_files * n_chips
n_cpu = multiprocessing.cpu_count()
results = {}
errors = {}
if verbose:
print('{0} file(s) found...'.format(n_files))
# Nothing to do
if n_files < 1 or n_chips < 1:
return results, errors
# Adjust number of processes
if n_tot < n_processes:
n_processes = n_tot
if n_processes > n_cpu:
n_processes = n_cpu
# No multiprocessing
if n_processes == 1:
for fil in files:
for chip in chips:
if verbose:
print('\nProcessing {0}[{1}]...'.format(fil, chip))
key = (fil, chip)
try:
result = _detsat_one(
fil, chip, sigma=sigma,
low_thresh=low_thresh, h_thresh=h_thresh,
small_edge=small_edge, line_len=line_len,
line_gap=line_gap, percentile=percentile, buf=buf,
plot=plot, verbose=verbose)
except Exception as e:
errmsg = '{0}: {1}'.format(type(e), str(e))
errors[key] = errmsg
if verbose:
print(errmsg)
else:
results[key] = result
if verbose:
print()
# Multiprocessing.
# The work queue is for things that need to be done and is shared by all
# processes. When a worker finishes, its output is put into done queue.
else:
if verbose:
print('Using {0} processes'.format(n_processes))
work_queue = Queue()
done_queue = Queue()
processes = []
for fil in files:
for chip in chips:
work_queue.put((fil, chip))
for w in range(n_processes):
p = Process(
target=_satdet_worker, args=(work_queue, done_queue), kwargs={
'sigma': sigma, 'low_thresh': low_thresh,
'h_thresh': h_thresh, 'small_edge': small_edge,
'line_len': line_len, 'line_gap': line_gap,
'percentile': percentile, 'buf': buf})
p.start()
processes.append(p)
work_queue.put('STOP')
for p in processes:
p.join()
done_queue.put('STOP')
# return a dictionary of lists
for status in iter(done_queue.get, 'STOP'):
key = (status[1], status[2])
if status[0]: # Success
results[key] = status[3]
else: # Failed
errors[key] = status[3]
if verbose:
if len(results) > 0:
print('Number of trail segment(s) found:')
for key in sorted(results):
print(' {0}[{1}]: {2}'.format(
key[0], key[1], len(results[key])))
if len(errors) > 0:
print('These have errors:')
for key in sorted(errors):
print(' {0}[{1}]'.format(key[0], key[1]))
if verbose:
t_end = time.time()
print('Total run time: {0} s'.format(t_end - t_beg))
return results, errors
|
Find satellite trails in the given images and extensions.
The trails are calculated using Probabilistic Hough Transform.
.. note::
The trail endpoints found here are crude approximations.
Use :func:`make_mask` to create the actual DQ mask for the trail(s)
of interest.
Parameters
----------
searchpattern : str
Search pattern for input FITS images, as accepted by
:py:func:`glob.glob`.
chips : list
List of extensions for science data, as accepted by
``astropy.io.fits``.
The default values of ``[1, 4]`` are tailored for ACS/WFC.
n_processes : int
Number of processes for multiprocessing, which is only useful
if you are processing a lot of images or extensions.
If 1 is given, no multiprocessing is done.
sigma : float, optional
The size of a Gaussian filter to use before edge detection.
The default is 2, which is good for almost all images.
low_thresh : float, optional
The lower threshold for hysteresis linking of edge pieces.
This should be between 0 and 1, and less than ``h_thresh``.
h_thresh : float, optional
The upper threshold for hysteresis linking of edge pieces.
This should be between 0 and 1, and greater than ``low_thresh``.
small_edge : int, optional
Size of perimeter of small objects to remove in edge image.
This significantly reduces noise before doing Hough Transform.
If it is set too high, you will remove the edge of the
satellite you are trying to find.
line_len : int, optional
Minimum line length for Probabilistic Hough Transform to fit.
line_gap : int, optional
The largest gap in points allowed for the Probabilistic
Hough Transform.
percentile : tuple of float, optional
The percent boundaries to scale the image to before
creating edge image.
buf : int, optional
How close to the edge of the image the satellite trail has to
be to be considered a trail.
plot : bool, optional
Make plots of edge image, Hough space transformation, and
rescaled image. This is only applicable if ``n_processes=1``.
verbose : bool, optional
Print extra information to the terminal, mostly for debugging.
In multiprocessing mode, info from individual process is not printed.
Returns
-------
results : dict
Dictionary mapping ``(filename, ext)`` to an array of endpoints of
line segments in the format of ``[[x0, y0], [x1, y1]]`` (if found) or
an empty array (if not). These are the segments that have been
identified as making up part of a satellite trail.
errors : dict
Dictionary mapping ``(filename, ext)`` to the error message explaining
why processing failed.
Raises
------
ImportError
Missing scipy or skimage>=0.11 packages.
|
entailment
|
def on(self, event: str) -> Callable:
""" Decorator for subscribing a function to a specific event.
:param event: Name of the event to subscribe to.
:type event: str
:return: The outer function.
:rtype: Callable
"""
def outer(func):
self.add_event(func, event)
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
return outer
|
Decorator for subscribing a function to a specific event.
:param event: Name of the event to subscribe to.
:type event: str
:return: The outer function.
:rtype: Callable
|
entailment
|
def add_event(self, func: Callable, event: str) -> None:
""" Adds a function to a event.
:param func: The function to call when event is emitted
:type func: Callable
:param event: Name of the event.
:type event: str
"""
self._events[event].add(func)
|
Adds a function to a event.
:param func: The function to call when event is emitted
:type func: Callable
:param event: Name of the event.
:type event: str
|
entailment
|
def emit(self, event: str, *args, **kwargs) -> None:
""" Emit an event and run the subscribed functions.
:param event: Name of the event.
:type event: str
.. notes:
Passing in threads=True as a kwarg allows to run emitted events
as separate threads. This can significantly speed up code execution
depending on the code being executed.
"""
threads = kwargs.pop('threads', None)
if threads:
events = [
Thread(target=f, args=args, kwargs=kwargs) for f in
self._event_funcs(event)
]
for event in events:
event.start()
else:
for func in self._event_funcs(event):
func(*args, **kwargs)
|
Emit an event and run the subscribed functions.
:param event: Name of the event.
:type event: str
.. notes:
Passing in threads=True as a kwarg allows to run emitted events
as separate threads. This can significantly speed up code execution
depending on the code being executed.
|
entailment
|
def emit_only(self, event: str, func_names: Union[str, List[str]], *args,
**kwargs) -> None:
""" Specifically only emits certain subscribed events.
:param event: Name of the event.
:type event: str
:param func_names: Function(s) to emit.
:type func_names: Union[ str | List[str] ]
"""
if isinstance(func_names, str):
func_names = [func_names]
for func in self._event_funcs(event):
if func.__name__ in func_names:
func(*args, **kwargs)
|
Specifically only emits certain subscribed events.
:param event: Name of the event.
:type event: str
:param func_names: Function(s) to emit.
:type func_names: Union[ str | List[str] ]
|
entailment
|
def emit_after(self, event: str) -> Callable:
""" Decorator that emits events after the function is completed.
:param event: Name of the event.
:type event: str
:return: Callable
.. note:
This plainly just calls functions without passing params into the
subscribed callables. This is great if you want to do some kind
of post processing without the callable requiring information
before doing so.
"""
def outer(func):
@wraps(func)
def wrapper(*args, **kwargs):
returned = func(*args, **kwargs)
self.emit(event)
return returned
return wrapper
return outer
|
Decorator that emits events after the function is completed.
:param event: Name of the event.
:type event: str
:return: Callable
.. note:
This plainly just calls functions without passing params into the
subscribed callables. This is great if you want to do some kind
of post processing without the callable requiring information
before doing so.
|
entailment
|
def remove_event(self, func_name: str, event: str) -> None:
""" Removes a subscribed function from a specific event.
:param func_name: The name of the function to be removed.
:type func_name: str
:param event: The name of the event.
:type event: str
:raise EventDoesntExist if there func_name doesn't exist in event.
"""
event_funcs_copy = self._events[event].copy()
for func in self._event_funcs(event):
if func.__name__ == func_name:
event_funcs_copy.remove(func)
if self._events[event] == event_funcs_copy:
err_msg = "function doesn't exist inside event {} ".format(event)
raise EventDoesntExist(err_msg)
else:
self._events[event] = event_funcs_copy
|
Removes a subscribed function from a specific event.
:param func_name: The name of the function to be removed.
:type func_name: str
:param event: The name of the event.
:type event: str
:raise EventDoesntExist if there func_name doesn't exist in event.
|
entailment
|
def _event_funcs(self, event: str) -> Iterable[Callable]:
""" Returns an Iterable of the functions subscribed to a event.
:param event: Name of the event.
:type event: str
:return: A iterable to do things with.
:rtype: Iterable
"""
for func in self._events[event]:
yield func
|
Returns an Iterable of the functions subscribed to a event.
:param event: Name of the event.
:type event: str
:return: A iterable to do things with.
:rtype: Iterable
|
entailment
|
def _event_func_names(self, event: str) -> List[str]:
""" Returns string name of each function subscribed to an event.
:param event: Name of the event.
:type event: str
:return: Names of functions subscribed to a specific event.
:rtype: list
"""
return [func.__name__ for func in self._events[event]]
|
Returns string name of each function subscribed to an event.
:param event: Name of the event.
:type event: str
:return: Names of functions subscribed to a specific event.
:rtype: list
|
entailment
|
def _subscribed_event_count(self) -> int:
""" Returns the total amount of subscribed events.
:return: Integer amount events.
:rtype: int
"""
event_counter = Counter() # type: Dict[Any, int]
for key, values in self._events.items():
event_counter[key] = len(values)
return sum(event_counter.values())
|
Returns the total amount of subscribed events.
:return: Integer amount events.
:rtype: int
|
entailment
|
def clean(input, suffix, stat="pmode1", maxiter=15, sigrej=2.0,
lower=None, upper=None, binwidth=0.3,
mask1=None, mask2=None, dqbits=None,
rpt_clean=0, atol=0.01, clobber=False, verbose=True):
r"""Remove horizontal stripes from ACS WFC post-SM4 data.
Parameters
----------
input : str or list of str
Input filenames in one of these formats:
* a Python list of filenames
* a partial filename with wildcards ('\*flt.fits')
* filename of an ASN table ('j12345670_asn.fits')
* an at-file (``@input``)
suffix : str
The string to use to add to each input file name to
indicate an output product. This string will be appended
to the suffix in each input filename to create the
new output filename. For example, setting `suffix='csck'`
will create '\*_csck.fits' images.
stat : { 'pmode1', 'pmode2', 'mean', 'mode', 'median', 'midpt' } (Default = 'pmode1')
Specifies the statistics to be used for computation of the
background in image rows:
* 'pmode1' - SEXTRACTOR-like mode estimate based on a
modified `Pearson's rule <http://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_:
``2.5*median-1.5*mean``;
* 'pmode2' - mode estimate based on
`Pearson's rule <http://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_:
``3*median-2*mean``;
* 'mean' - the mean of the distribution of the "good" pixels (after
clipping, masking, etc.);
* 'mode' - the mode of the distribution of the "good" pixels;
* 'median' - the median of the distribution of the "good" pixels;
* 'midpt' - estimate of the median of the distribution of the "good"
pixels based on an algorithm similar to IRAF's ``imagestats`` task
(``CDF(midpt)=1/2``).
.. note::
The midpoint and mode are computed in two passes through the
image. In the first pass the standard deviation of the pixels
is calculated and used with the *binwidth* parameter to compute
the resolution of the data histogram. The midpoint is estimated
by integrating the histogram and computing by interpolation
the data value at which exactly half the pixels are below that
data value and half are above it. The mode is computed by
locating the maximum of the data histogram and fitting the peak
by parabolic interpolation.
maxiter : int
This parameter controls the maximum number of iterations
to perform when computing the statistics used to compute the
row-by-row corrections.
sigrej : float
This parameters sets the sigma level for the rejection applied
during each iteration of statistics computations for the
row-by-row corrections.
lower : float, None (Default = None)
Lower limit of usable pixel values for computing the background.
This value should be specified in the units of the input image(s).
upper : float, None (Default = None)
Upper limit of usable pixel values for computing the background.
This value should be specified in the units of the input image(s).
binwidth : float (Default = 0.1)
Histogram's bin width, in sigma units, used to sample the
distribution of pixel brightness values in order to compute the
background statistics. This parameter is aplicable *only* to *stat*
parameter values of `'mode'` or `'midpt'`.
clobber : bool
Specify whether or not to 'clobber' (delete then replace)
previously generated products with the same names.
mask1 : str, numpy.ndarray, None, or list of these types
Mask images for ``SCI,1``, one for each input file.
Pixels with zero values will be masked out, in addition to clipping.
mask2 : str, numpy.ndarray, None, or list of these types
Mask images for ``SCI,2``, one for each input file.
Pixels with zero values will be masked out, in addition to clipping.
This is not used for subarrays.
dqbits : int, str, None (Default = None)
Integer sum of all the DQ bit values from the input image's DQ array
that should be considered "good" when building masks for de-striping
computations. For example, if pixels in the DQ array can be
combinations of 1, 2, 4, and 8 flags and one wants to consider
DQ "defects" having flags 2 and 4 as being acceptable for de-striping
computations, then `dqbits` should be set to 2+4=6. Then a DQ pixel
having values 2,4, or 6 will be considered a good pixel, while a DQ
pixel with a value, e.g., 1+2=3, 4+8=12, etc. will be flagged
as a "bad" pixel.
Alternatively, one can enter a comma- or '+'-separated list of
integer bit flags that should be added to obtain the final
"good" bits. For example, both ``4,8`` and ``4+8`` are equivalent to
setting `dqbits` to 12.
| Set `dqbits` to 0 to make *all* non-zero pixels in the DQ
mask to be considered "bad" pixels, and the corresponding image
pixels not to be used for de-striping computations.
| Default value (`None`) will turn off the use of image's DQ array
for de-striping computations.
| In order to reverse the meaning of the `dqbits`
parameter from indicating values of the "good" DQ flags
to indicating the "bad" DQ flags, prepend '~' to the string
value. For example, in order not to use pixels with
DQ flags 4 and 8 for sky computations and to consider
as "good" all other pixels (regardless of their DQ flag),
set `dqbits` to ``~4+8``, or ``~4,8``. To obtain the
same effect with an `int` input value (except for 0),
enter -(4+8+1)=-9. Following this convention,
a `dqbits` string value of ``'~0'`` would be equivalent to
setting ``dqbits=None``.
.. note::
DQ masks (if used), *will be* combined with user masks specified
in the `mask1` and `mask2` parameters (if any).
rpt_clean : int
An integer indicating how many *additional* times stripe cleaning
should be performed on the input image. Default = 0.
atol : float, None
The threshold for maximum absolute value of bias stripe correction
below which repeated cleanings can stop. When `atol` is `None`
cleaning will be repeated `rpt_clean` number of times.
Default = 0.01 [e].
verbose : bool
Print informational messages. Default = True.
"""
from stsci.tools import parseinput # Optional package dependency
flist = parseinput.parseinput(input)[0]
if isinstance(mask1, str):
mlist1 = parseinput.parseinput(mask1)[0]
elif isinstance(mask1, np.ndarray):
mlist1 = [mask1.copy()]
elif mask1 is None:
mlist1 = []
elif isinstance(mask1, list):
mlist1 = []
for m in mask1:
if isinstance(m, np.ndarray):
mlist1.append(m.copy())
elif isinstance(m, str):
mlist1 += parseinput.parseinput(m)[0]
else:
raise TypeError("'mask1' must be a list of str or "
"numpy.ndarray values.")
else:
raise TypeError("'mask1' must be either a str, or a "
"numpy.ndarray, or a list of the two type of "
"values.")
if isinstance(mask2, str):
mlist2 = parseinput.parseinput(mask2)[0]
elif isinstance(mask2, np.ndarray):
mlist2 = [mask2.copy()]
elif mask2 is None:
mlist2 = []
elif isinstance(mask2, list):
mlist2 = []
for m in mask2:
if isinstance(m, np.ndarray):
mlist2.append(m.copy())
elif isinstance(m, str):
mlist2 += parseinput.parseinput(m)[0]
else:
raise TypeError("'mask2' must be a list of str or "
"numpy.ndarray values.")
else:
raise TypeError("'mask2' must be either a str or a "
"numpy.ndarray, or a list of the two type of "
"values.")
n_input = len(flist)
n_mask1 = len(mlist1)
n_mask2 = len(mlist2)
if n_input == 0:
raise ValueError("No input file(s) provided or "
"the file(s) do not exist")
if n_mask1 == 0:
mlist1 = [None] * n_input
elif n_mask1 != n_input:
raise ValueError('Insufficient masks for [SCI,1]')
if n_mask2 == 0:
mlist2 = [None] * n_input
elif n_mask2 != n_input:
raise ValueError('Insufficient masks for [SCI,2]')
for image, maskfile1, maskfile2 in zip(flist, mlist1, mlist2):
# Skip processing pre-SM4 images
if (fits.getval(image, 'EXPSTART') <= MJD_SM4):
LOG.warning('{0} is pre-SM4. Skipping...'.format(image))
continue
# Data must be in ELECTRONS
if (fits.getval(image, 'BUNIT', ext=1) != 'ELECTRONS'):
LOG.warning('{0} is not in ELECTRONS. Skipping...'.format(image))
continue
# Skip processing CTECORR-ed images
if (fits.getval(image, 'PCTECORR') == 'COMPLETE'):
LOG.warning('{0} already has PCTECORR applied. '
'Skipping...'.format(image))
continue
# generate output filename for each input based on specification
# of the output suffix
output = image.replace('.fits', '_' + suffix + '.fits')
LOG.info('Processing ' + image)
# verify masks defined (or not) simultaneously:
if (fits.getval(image, 'CCDAMP') == 'ABCD' and
((mask1 is not None and mask2 is None) or
(mask1 is None and mask2 is not None))):
raise ValueError("Both 'mask1' and 'mask2' must be specified "
"or not specified together.")
maskdata = _read_mask(maskfile1, maskfile2)
perform_correction(image, output, stat=stat, maxiter=maxiter,
sigrej=sigrej, lower=lower, upper=upper,
binwidth=binwidth, mask=maskdata, dqbits=dqbits,
rpt_clean=rpt_clean, atol=atol,
clobber=clobber, verbose=verbose)
LOG.info(output + ' created')
|
r"""Remove horizontal stripes from ACS WFC post-SM4 data.
Parameters
----------
input : str or list of str
Input filenames in one of these formats:
* a Python list of filenames
* a partial filename with wildcards ('\*flt.fits')
* filename of an ASN table ('j12345670_asn.fits')
* an at-file (``@input``)
suffix : str
The string to use to add to each input file name to
indicate an output product. This string will be appended
to the suffix in each input filename to create the
new output filename. For example, setting `suffix='csck'`
will create '\*_csck.fits' images.
stat : { 'pmode1', 'pmode2', 'mean', 'mode', 'median', 'midpt' } (Default = 'pmode1')
Specifies the statistics to be used for computation of the
background in image rows:
* 'pmode1' - SEXTRACTOR-like mode estimate based on a
modified `Pearson's rule <http://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_:
``2.5*median-1.5*mean``;
* 'pmode2' - mode estimate based on
`Pearson's rule <http://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_:
``3*median-2*mean``;
* 'mean' - the mean of the distribution of the "good" pixels (after
clipping, masking, etc.);
* 'mode' - the mode of the distribution of the "good" pixels;
* 'median' - the median of the distribution of the "good" pixels;
* 'midpt' - estimate of the median of the distribution of the "good"
pixels based on an algorithm similar to IRAF's ``imagestats`` task
(``CDF(midpt)=1/2``).
.. note::
The midpoint and mode are computed in two passes through the
image. In the first pass the standard deviation of the pixels
is calculated and used with the *binwidth* parameter to compute
the resolution of the data histogram. The midpoint is estimated
by integrating the histogram and computing by interpolation
the data value at which exactly half the pixels are below that
data value and half are above it. The mode is computed by
locating the maximum of the data histogram and fitting the peak
by parabolic interpolation.
maxiter : int
This parameter controls the maximum number of iterations
to perform when computing the statistics used to compute the
row-by-row corrections.
sigrej : float
This parameters sets the sigma level for the rejection applied
during each iteration of statistics computations for the
row-by-row corrections.
lower : float, None (Default = None)
Lower limit of usable pixel values for computing the background.
This value should be specified in the units of the input image(s).
upper : float, None (Default = None)
Upper limit of usable pixel values for computing the background.
This value should be specified in the units of the input image(s).
binwidth : float (Default = 0.1)
Histogram's bin width, in sigma units, used to sample the
distribution of pixel brightness values in order to compute the
background statistics. This parameter is aplicable *only* to *stat*
parameter values of `'mode'` or `'midpt'`.
clobber : bool
Specify whether or not to 'clobber' (delete then replace)
previously generated products with the same names.
mask1 : str, numpy.ndarray, None, or list of these types
Mask images for ``SCI,1``, one for each input file.
Pixels with zero values will be masked out, in addition to clipping.
mask2 : str, numpy.ndarray, None, or list of these types
Mask images for ``SCI,2``, one for each input file.
Pixels with zero values will be masked out, in addition to clipping.
This is not used for subarrays.
dqbits : int, str, None (Default = None)
Integer sum of all the DQ bit values from the input image's DQ array
that should be considered "good" when building masks for de-striping
computations. For example, if pixels in the DQ array can be
combinations of 1, 2, 4, and 8 flags and one wants to consider
DQ "defects" having flags 2 and 4 as being acceptable for de-striping
computations, then `dqbits` should be set to 2+4=6. Then a DQ pixel
having values 2,4, or 6 will be considered a good pixel, while a DQ
pixel with a value, e.g., 1+2=3, 4+8=12, etc. will be flagged
as a "bad" pixel.
Alternatively, one can enter a comma- or '+'-separated list of
integer bit flags that should be added to obtain the final
"good" bits. For example, both ``4,8`` and ``4+8`` are equivalent to
setting `dqbits` to 12.
| Set `dqbits` to 0 to make *all* non-zero pixels in the DQ
mask to be considered "bad" pixels, and the corresponding image
pixels not to be used for de-striping computations.
| Default value (`None`) will turn off the use of image's DQ array
for de-striping computations.
| In order to reverse the meaning of the `dqbits`
parameter from indicating values of the "good" DQ flags
to indicating the "bad" DQ flags, prepend '~' to the string
value. For example, in order not to use pixels with
DQ flags 4 and 8 for sky computations and to consider
as "good" all other pixels (regardless of their DQ flag),
set `dqbits` to ``~4+8``, or ``~4,8``. To obtain the
same effect with an `int` input value (except for 0),
enter -(4+8+1)=-9. Following this convention,
a `dqbits` string value of ``'~0'`` would be equivalent to
setting ``dqbits=None``.
.. note::
DQ masks (if used), *will be* combined with user masks specified
in the `mask1` and `mask2` parameters (if any).
rpt_clean : int
An integer indicating how many *additional* times stripe cleaning
should be performed on the input image. Default = 0.
atol : float, None
The threshold for maximum absolute value of bias stripe correction
below which repeated cleanings can stop. When `atol` is `None`
cleaning will be repeated `rpt_clean` number of times.
Default = 0.01 [e].
verbose : bool
Print informational messages. Default = True.
|
entailment
|
def perform_correction(image, output, stat="pmode1", maxiter=15, sigrej=2.0,
lower=None, upper=None, binwidth=0.3,
mask=None, dqbits=None,
rpt_clean=0, atol=0.01, clobber=False, verbose=True):
"""
Clean each input image.
Parameters
----------
image : str
Input image name.
output : str
Output image name.
mask : `numpy.ndarray`
Mask array.
maxiter, sigrej, clobber
See :func:`clean`.
dqbits : int, str, or None
Data quality bits to be considered as "good" (or "bad").
See :func:`clean` for more details.
rpt_clean : int
An integer indicating how many *additional* times stripe cleaning
should be performed on the input image. Default = 0.
atol : float, None
The threshold for maximum absolute value of bias stripe correction
below which repeated cleanings can stop. When `atol` is `None`
cleaning will be repeated `rpt_clean` number of times.
Default = 0.01 [e].
verbose : bool
Print informational messages. Default = True.
"""
# construct the frame to be cleaned, including the
# associated data stuctures needed for cleaning
frame = StripeArray(image)
# combine user mask with image's DQ array:
mask = _mergeUserMaskAndDQ(frame.dq, mask, dqbits)
# Do the stripe cleaning
Success, NUpdRows, NMaxIter, Bkgrnd, STDDEVCorr, MaxCorr, Nrpt = clean_streak(
frame, stat=stat, maxiter=maxiter, sigrej=sigrej,
lower=lower, upper=upper, binwidth=binwidth, mask=mask,
rpt_clean=rpt_clean, atol=atol, verbose=verbose
)
if Success:
if verbose:
LOG.info('perform_correction - ===== Overall statistics for '
'de-stripe corrections: =====')
if (STDDEVCorr > 1.5*0.9):
LOG.warning('perform_correction - STDDEV of applied de-stripe '
'corrections ({:.3g}) exceeds\nknown bias striping '
'STDDEV of 0.9e (see ISR ACS 2011-05) more than '
'1.5 times.'.format(STDDEVCorr))
elif verbose:
LOG.info('perform_correction - STDDEV of applied de-stripe '
'corrections {:.3g}.'.format(STDDEVCorr))
if verbose:
LOG.info('perform_correction - Estimated background: '
'{:.5g}.'.format(Bkgrnd))
LOG.info('perform_correction - Maximum applied correction: '
'{:.3g}.'.format(MaxCorr))
LOG.info('perform_correction - Effective number of clipping '
'iterations: {}.'.format(NMaxIter))
LOG.info('perform_correction - Effective number of additional '
'(repeated) cleanings: {}.'.format(Nrpt))
LOG.info('perform_correction - Total number of corrected rows: '
'{}.'.format(NUpdRows))
frame.write_corrected(output, clobber=clobber)
frame.close()
|
Clean each input image.
Parameters
----------
image : str
Input image name.
output : str
Output image name.
mask : `numpy.ndarray`
Mask array.
maxiter, sigrej, clobber
See :func:`clean`.
dqbits : int, str, or None
Data quality bits to be considered as "good" (or "bad").
See :func:`clean` for more details.
rpt_clean : int
An integer indicating how many *additional* times stripe cleaning
should be performed on the input image. Default = 0.
atol : float, None
The threshold for maximum absolute value of bias stripe correction
below which repeated cleanings can stop. When `atol` is `None`
cleaning will be repeated `rpt_clean` number of times.
Default = 0.01 [e].
verbose : bool
Print informational messages. Default = True.
|
entailment
|
def clean_streak(image, stat="pmode1", maxiter=15, sigrej=2.0,
lower=None, upper=None, binwidth=0.3, mask=None,
rpt_clean=0, atol=0.01, verbose=True):
"""
Apply destriping algorithm to input array.
Parameters
----------
image : `StripeArray` object
Arrays are modifed in-place.
stat : str
Statistics for background computations
(see :py:func:`clean` for more details)
mask : `numpy.ndarray`
Mask array. Pixels with zero values are masked out.
maxiter, sigrej : see `clean`
rpt_clean : int
An integer indicating how many *additional* times stripe cleaning
should be performed on the input image. Default = 0.
atol : float, None
The threshold for maximum absolute value of bias stripe correction
below which repeated cleanings can stop. When `atol` is `None`
cleaning will be repeated `rpt_clean` number of times.
Default = 0.01 [e].
verbose : bool
Print informational messages. Default = True.
Returns
-------
Success : bool
Indicates successful execution.
NUpdRows : int
Number of updated rows in the image.
NMaxIter : int
Maximum number of clipping iterations performed on image rows.
Bkgrnd, STDDEVCorr, MaxCorr : float
Background, standard deviation of corrections and maximum correction
applied to the non-flat-field-corrected (i.e., RAW) image rows.
Nrpt : int
Number of *additional* (performed *after* initial run) cleanings.
"""
# Optional package dependency
try:
from stsci.imagestats import ImageStats
except ImportError:
ImageStats = None
if mask is not None and image.science.shape != mask.shape:
raise ValueError('Mask shape does not match science data shape')
Nrpt = 0
warn_maxiter = False
NUpdRows = 0
NMaxIter = 0
STDDEVCorr = 0.0
MaxCorr = 0.0
wmean = 0.0
stat = stat.lower().strip()
if stat not in ['pmode1', 'pmode2', 'mean', 'mode', 'median', 'midpt']:
raise ValueError("Unsupported value for 'stat'.")
# array to hold the stripe amplitudes
corr = np.empty(image.science.shape[0], dtype=np.float64)
# array to hold cumulative stripe amplitudes and latest row npix:
cumcorr = np.zeros(image.science.shape[0], dtype=np.float64)
cnpix = np.zeros(image.science.shape[0], dtype=np.int)
# other arrays
corr_scale = np.empty(image.science.shape[0], dtype=np.float64)
npix = np.empty(image.science.shape[0], dtype=np.int)
sigcorr2 = np.zeros(image.science.shape[0], dtype=np.float64)
updrows = np.zeros(image.science.shape[0], dtype=np.int)
# for speed-up and to reduce rounding errors in ERR computations,
# keep a copy of the squared error array:
imerr2 = image.err**2
# arrays for detecting oscillatory behaviour:
nonconvi0 = np.arange(image.science.shape[0])
corr0 = np.zeros(image.science.shape[0], dtype=np.float64)
if stat == 'pmode1':
# SExtractor-esque central value statistic; slightly sturdier against
# skewness of pixel histogram due to faint source flux
def getcorr(): return (2.5 * SMedian - 1.5 * SMean)
elif stat == 'pmode2':
# "Original Pearson"-ian estimate for mode:
def getcorr(): return (3.0 * SMedian - 2.0 * SMean)
elif stat == 'mean':
def getcorr(): return (SMean)
elif stat == 'median':
def getcorr(): return (SMedian)
elif stat == 'mode':
if ImageStats is None:
raise ImportError('stsci.imagestats is missing')
def getcorr():
imstat = ImageStats(image.science[i][BMask], 'mode',
lower=lower, upper=upper, nclip=0)
assert(imstat.npix == NPix)
return (imstat.mode)
elif stat == 'midpt':
if ImageStats is None:
raise ImportError('stsci.imagestats is missing')
def getcorr():
imstat = ImageStats(image.science[i][BMask], 'midpt',
lower=lower, upper=upper, nclip=0)
assert(imstat.npix == NPix)
return (imstat.midpt)
nmax_rpt = 1 if rpt_clean is None else max(1, rpt_clean+1)
for rpt in range(nmax_rpt):
Nrpt += 1
if verbose:
if Nrpt <= 1:
if nmax_rpt > 1:
LOG.info("clean_streak - Performing initial image bias "
"de-stripe:")
else:
LOG.info("clean_streak - Performing image bias de-stripe:")
else:
LOG.info("clean_streak - Performing repeated image bias "
"de-stripe #{}:".format(Nrpt - 1))
# reset accumulators and arrays:
corr[:] = 0.0
corr_scale[:] = 0.0
npix[:] = 0
tcorr = 0.0
tnpix = 0
tnpix2 = 0
NMaxIter = 0
# loop over rows to fit the stripe amplitude
mask_arr = None
for i in range(image.science.shape[0]):
if mask is not None:
mask_arr = mask[i]
# row-by-row iterative sigma-clipped mean;
# sigma, iters are adjustable
SMean, SSig, SMedian, NPix, NIter, BMask = djs_iterstat(
image.science[i], MaxIter=maxiter, SigRej=sigrej,
Min=lower, Max=upper, Mask=mask_arr, lineno=i+1
)
if NPix > 0:
corr[i] = getcorr()
npix[i] = NPix
corr_scale[i] = 1.0 / np.average(image.invflat[i][BMask])
sigcorr2[i] = corr_scale[i]**2 * \
np.sum((image.err[i][BMask])**2)/NPix**2
cnpix[i] = NPix
tnpix += NPix
tnpix2 += NPix*NPix
tcorr += corr[i] * NPix
if NIter > NMaxIter:
NMaxIter = NIter
if tnpix <= 0:
LOG.warning('clean_streak - No good data points; cannot de-stripe.')
return False, 0, 0, 0.0, 0.0, 0
if NMaxIter >= maxiter:
warn_maxiter = True
# require that bias stripe corrections have zero mean:
# 1. compute weighted background of the flat-fielded image:
wmean = tcorr / tnpix
Bkgrnd = wmean
# 2. estimate corrections:
corr[npix > 0] -= wmean
# convert corrections to the "raw" space:
corr *= corr_scale
# weighted mean and max value for current corrections
# to the *RAW* image:
trim_npix = npix[npix > 0]
trim_corr = corr[npix > 0]
cwmean = np.sum(trim_npix * trim_corr) / tnpix
current_max_corr = np.amax(np.abs(trim_corr - cwmean))
wvar = np.sum(trim_npix * (trim_corr - cwmean) ** 2) / tnpix
uwvar = wvar / (1.0 - float(tnpix2) / float(tnpix) ** 2)
STDDEVCorr = np.sqrt(uwvar)
# keep track of total corrections:
cumcorr += corr
# apply corrections row-by-row
for i in range(image.science.shape[0]):
if npix[i] < 1:
continue
updrows[i] = 1
ffdark = (image.dark[i] + image.flash[i]) * image.invflat[i]
t1 = np.maximum(image.science[i] + ffdark, 0.0)
# stripe is constant along the row, before flatfielding;
# afterwards it has the shape of the inverse flatfield
truecorr = corr[i] * image.invflat[i]
#truecorr_sig2 = sigcorr2[i] * image.invflat[i]**2 # DEBUG
# correct the SCI extension
image.science[i] -= truecorr
t2 = np.maximum(image.science[i] + ffdark, 0.0)
T = (t1 - t2) * image.invflat[i]
# correct the ERR extension
# NOTE: np.abs() in the err array recomputation is used for safety
# only and, in principle, assuming no errors have been made
# in the derivation of the formula, np.abs() should not be
# necessary.
imerr2[i] -= T
image.err[i] = np.sqrt(np.abs(imerr2[i]))
# NOTE: for debugging purposes, one may want to uncomment
# next line:
#assert( np.all(imerr2 >= 0.0))
if atol is not None:
if current_max_corr < atol:
break
# detect oscilatory non-convergence:
nonconvi = np.nonzero(np.abs(corr) > atol)[0]
nonconvi_int = np.intersect1d(nonconvi, nonconvi0)
if (nonconvi.shape[0] == nonconvi0.shape[0] and
nonconvi.shape[0] == nonconvi_int.shape[0] and
np.all(corr0[nonconvi]*corr[nonconvi] < 0.0) and Nrpt > 1):
LOG.warning("clean_streak - Repeat bias stripe cleaning\n"
"process appears to be oscillatory for {:d} image "
"rows.\nTry to adjust 'sigrej', 'maxiter', and/or "
"'dqbits' parameters.\n"
"In addition, consider using masks or adjust "
"existing masks.".format(nonconvi.shape[0]))
break
nonconvi0 = nonconvi.copy()
corr0 = corr.copy()
if verbose:
if Nrpt <= 1:
LOG.info("clean_streak - Image bias de-stripe: Done.")
else:
LOG.info("clean_streak - Repeated (#{}) image bias de-stripe: "
"Done.".format(Nrpt - 1))
if verbose and Nrpt > 1:
LOG.info('clean_streak - ===== Repeated de-stripe "residual" '
'estimates: =====')
LOG.info('clean_streak - STDDEV of the last applied de-stripe '
'corrections {:.3g}'.format(STDDEVCorr))
LOG.info('clean_streak - Maximum of the last applied correction: '
'{:.3g}.'.format(current_max_corr))
# add (in quadratures) an error term associated with the accuracy of
# bias stripe correction:
truecorr_sig2 = ((sigcorr2 * (image.invflat ** 2).T).T).astype(image.err.dtype) # noqa
# update the ERR extension
image.err[:, :] = np.sqrt(np.abs(imerr2 + truecorr_sig2))
if warn_maxiter:
LOG.warning(
'clean_streak - Maximum number of clipping iterations '
'specified by the user ({}) has been reached.'.format(maxiter))
# weighted mean, sample variance, and max value for
# total (cummulative) corrections to the *RAW* image:
trim_cnpix = cnpix[cnpix > 0]
trim_cumcorr = cumcorr[cnpix > 0]
tcnpix = np.sum(trim_cnpix)
tcnpix2 = np.sum(trim_cnpix ** 2)
cwmean = np.sum(trim_cnpix * trim_cumcorr) / tcnpix
trim_cumcorr -= cwmean
wvar = np.sum(trim_cnpix * trim_cumcorr ** 2) / tcnpix
uwvar = wvar / (1.0 - float(tcnpix2) / float(tcnpix) ** 2)
STDDEVCorr = np.sqrt(uwvar)
MaxCorr = np.amax(np.abs(trim_cumcorr))
NUpdRows = np.sum(updrows)
return True, NUpdRows, NMaxIter, Bkgrnd, STDDEVCorr, MaxCorr, Nrpt-1
|
Apply destriping algorithm to input array.
Parameters
----------
image : `StripeArray` object
Arrays are modifed in-place.
stat : str
Statistics for background computations
(see :py:func:`clean` for more details)
mask : `numpy.ndarray`
Mask array. Pixels with zero values are masked out.
maxiter, sigrej : see `clean`
rpt_clean : int
An integer indicating how many *additional* times stripe cleaning
should be performed on the input image. Default = 0.
atol : float, None
The threshold for maximum absolute value of bias stripe correction
below which repeated cleanings can stop. When `atol` is `None`
cleaning will be repeated `rpt_clean` number of times.
Default = 0.01 [e].
verbose : bool
Print informational messages. Default = True.
Returns
-------
Success : bool
Indicates successful execution.
NUpdRows : int
Number of updated rows in the image.
NMaxIter : int
Maximum number of clipping iterations performed on image rows.
Bkgrnd, STDDEVCorr, MaxCorr : float
Background, standard deviation of corrections and maximum correction
applied to the non-flat-field-corrected (i.e., RAW) image rows.
Nrpt : int
Number of *additional* (performed *after* initial run) cleanings.
|
entailment
|
def djs_iterstat(InputArr, MaxIter=10, SigRej=3.0,
Max=None, Min=None, Mask=None, lineno=None):
"""
Iterative sigma-clipping.
Parameters
----------
InputArr : `numpy.ndarray`
Input image array.
MaxIter, SigRej : see `clean`
Max, Min : float
Max and min values for clipping.
Mask : `numpy.ndarray`
Mask array to indicate pixels to reject, in addition to clipping.
Pixels where mask is zero will be rejected.
If not given, all pixels will be used.
lineno : int or None
Line number to be used in log and/or warning messages.
Returns
-------
FMean, FSig, FMedian, NPix : float
Mean, sigma, and median of final result.
NIter : int
Number of performed clipping iterations
BMask : `numpy.ndarray`
Logical image mask from the final iteration.
"""
NGood = InputArr.size
ArrShape = InputArr.shape
if NGood == 0:
imrow = _write_row_number(lineno=lineno, offset=0, pad=1)
LOG.warning('djs_iterstat - No data points given' + imrow)
return 0, 0, 0, 0, 0, None
if NGood == 1:
imrow = _write_row_number(lineno=lineno, offset=0, pad=1)
LOG.warning('djs_iterstat - Only one data point; '
'cannot compute stats{0}'.format(imrow))
return 0, 0, 0, 0, 0, None
if np.unique(InputArr).size == 1:
imrow = _write_row_number(lineno=lineno, offset=0, pad=1)
LOG.warning('djs_iterstat - Only one value in data; '
'cannot compute stats{0}'.format(imrow))
return 0, 0, 0, 0, 0, None
# Determine Max and Min
if Max is None:
Max = InputArr.max()
if Min is None:
Min = InputArr.min()
# Use all pixels if no mask is provided
if Mask is None:
Mask = np.ones(ArrShape, dtype=np.byte)
else:
Mask = Mask.copy()
# Reject those above Max and those below Min
Mask[InputArr > Max] = 0
Mask[InputArr < Min] = 0
FMean = np.sum(1.0 * InputArr * Mask) / NGood
FSig = np.sqrt(np.sum((1.0 * InputArr - FMean) ** 2 * Mask) / (NGood - 1))
NLast = -1
Iter = 0
NGood = np.sum(Mask)
if NGood < 2:
imrow = _write_row_number(lineno=lineno, offset=0, pad=1)
LOG.warning('djs_iterstat - No good data points; '
'cannot compute stats{0}'.format(imrow))
return 0, 0, 0, 0, 0, None
SaveMask = Mask.copy()
if Iter >= MaxIter: # to support MaxIter=0
NLast = NGood
while (Iter < MaxIter) and (NLast != NGood) and (NGood >= 2):
LoVal = FMean - SigRej * FSig
HiVal = FMean + SigRej * FSig
Mask[InputArr < LoVal] = 0
Mask[InputArr > HiVal] = 0
NLast = NGood
npix = np.sum(Mask)
if npix >= 2:
FMean = np.sum(1.0 * InputArr * Mask) / npix
FSig = np.sqrt(np.sum(
(1.0 * InputArr - FMean) ** 2 * Mask) / (npix - 1))
SaveMask = Mask.copy() # last mask used for computation of mean
NGood = npix
Iter += 1
else:
break
logical_mask = SaveMask.astype(np.bool)
if NLast > 1:
FMedian = np.median(InputArr[logical_mask])
NLast = NGood
else:
FMedian = FMean
return FMean, FSig, FMedian, NLast, Iter, logical_mask
|
Iterative sigma-clipping.
Parameters
----------
InputArr : `numpy.ndarray`
Input image array.
MaxIter, SigRej : see `clean`
Max, Min : float
Max and min values for clipping.
Mask : `numpy.ndarray`
Mask array to indicate pixels to reject, in addition to clipping.
Pixels where mask is zero will be rejected.
If not given, all pixels will be used.
lineno : int or None
Line number to be used in log and/or warning messages.
Returns
-------
FMean, FSig, FMedian, NPix : float
Mean, sigma, and median of final result.
NIter : int
Number of performed clipping iterations
BMask : `numpy.ndarray`
Logical image mask from the final iteration.
|
entailment
|
def run(configobj=None):
"""TEAL interface for the `clean` function."""
clean(configobj['input'],
suffix=configobj['suffix'],
stat=configobj['stat'],
maxiter=configobj['maxiter'],
sigrej=configobj['sigrej'],
lower=configobj['lower'],
upper=configobj['upper'],
binwidth=configobj['binwidth'],
mask1=configobj['mask1'],
mask2=configobj['mask2'],
dqbits=configobj['dqbits'],
rpt_clean=configobj['rpt_clean'],
atol=configobj['atol'],
cte_correct=configobj['cte_correct'],
clobber=configobj['clobber'],
verbose=configobj['verbose'])
|
TEAL interface for the `clean` function.
|
entailment
|
def main():
"""Command line driver."""
import argparse
parser = argparse.ArgumentParser(
prog=__taskname__,
description='Remove horizontal stripes from ACS WFC post-SM4 data.')
parser.add_argument(
'arg0', metavar='input', type=str, help='Input file')
parser.add_argument(
'arg1', metavar='suffix', type=str, help='Output suffix')
parser.add_argument(
'maxiter', nargs='?', type=int, default=15, help='Max #iterations')
parser.add_argument(
'sigrej', nargs='?', type=float, default=2.0, help='Rejection sigma')
parser.add_argument(
'--stat', type=str, default='pmode1', help='Background statistics')
parser.add_argument(
'--lower', nargs='?', type=float, default=None,
help='Lower limit for "good" pixels.')
parser.add_argument(
'--upper', nargs='?', type=float, default=None,
help='Upper limit for "good" pixels.')
parser.add_argument(
'--binwidth', type=float, default=0.1,
help='Bin width for distribution sampling.')
parser.add_argument(
'--mask1', nargs=1, type=str, default=None,
help='Mask image for [SCI,1]')
parser.add_argument(
'--mask2', nargs=1, type=str, default=None,
help='Mask image for [SCI,2]')
parser.add_argument(
'--dqbits', nargs='?', type=str, default=None,
help='DQ bits to be considered "good".')
parser.add_argument(
'--rpt_clean', type=int, default=0,
help='Number of *repeated* bias de-stripes to perform.')
parser.add_argument(
'--atol', nargs='?', type=float, default=0.01,
help='Absolute tolerance to stop *repeated* bias de-stripes.')
parser.add_argument(
'-c', '--clobber', action="store_true", help='Clobber output')
parser.add_argument(
'-q', '--quiet', action='store_true',
help='Do not print informational messages')
parser.add_argument(
'--version', action="version",
version='{0} v{1} ({2})'.format(__taskname__, __version__, __vdate__))
args = parser.parse_args()
if args.mask1:
mask1 = args.mask1[0]
else:
mask1 = args.mask1
if args.mask2:
mask2 = args.mask2[0]
else:
mask2 = args.mask2
clean(args.arg0, args.arg1, stat=args.stat, maxiter=args.maxiter,
sigrej=args.sigrej, lower=args.lower, upper=args.upper,
binwidth=args.binwidth, mask1=mask1, mask2=mask2, dqbits=args.dqbits,
rpt_clean=args.rpt_clean, atol=args.atol,
clobber=args.clobber, verbose=not args.quiet)
|
Command line driver.
|
entailment
|
def configure_arrays(self):
"""Get the SCI and ERR data."""
self.science = self.hdulist['sci', 1].data
self.err = self.hdulist['err', 1].data
self.dq = self.hdulist['dq', 1].data
if (self.ampstring == 'ABCD'):
self.science = np.concatenate(
(self.science, self.hdulist['sci', 2].data[::-1, :]), axis=1)
self.err = np.concatenate(
(self.err, self.hdulist['err', 2].data[::-1, :]), axis=1)
self.dq = np.concatenate(
(self.dq, self.hdulist['dq', 2].data[::-1, :]), axis=1)
self.ingest_dark()
self.ingest_flash()
self.ingest_flatfield()
|
Get the SCI and ERR data.
|
entailment
|
def ingest_flatfield(self):
"""Process flatfield."""
self.invflat = extract_flatfield(
self.hdulist[0].header, self.hdulist[1])
# If BIAS or DARK, set flatfield to unity
if self.invflat is None:
self.invflat = np.ones_like(self.science)
return
# Apply the flatfield if necessary
if self.flatcorr != 'COMPLETE':
self.science = self.science * self.invflat
self.err = self.err * self.invflat
|
Process flatfield.
|
entailment
|
def ingest_flash(self):
"""Process post-flash."""
self.flash = extract_flash(self.hdulist[0].header, self.hdulist[1])
# Set post-flash to zeros
if self.flash is None:
self.flash = np.zeros_like(self.science)
return
# Apply the flash subtraction if necessary.
# Not applied to ERR, to be consistent with ingest_dark()
if self.flshcorr != 'COMPLETE':
self.science = self.science - self.flash
|
Process post-flash.
|
entailment
|
def ingest_dark(self):
"""Process dark."""
self.dark = extract_dark(self.hdulist[0].header, self.hdulist[1])
# If BIAS or DARK, set dark to zeros
if self.dark is None:
self.dark = np.zeros_like(self.science)
return
# Apply the dark subtraction if necessary.
# Effect of DARK on ERR is insignificant for de-striping.
if self.darkcorr != 'COMPLETE':
self.science = self.science - self.dark
|
Process dark.
|
entailment
|
def write_corrected(self, output, clobber=False):
"""Write out the destriped data."""
# un-apply the flatfield if necessary
if self.flatcorr != 'COMPLETE':
self.science = self.science / self.invflat
self.err = self.err / self.invflat
# un-apply the post-flash if necessary
if self.flshcorr != 'COMPLETE':
self.science = self.science + self.flash
# un-apply the dark if necessary
if self.darkcorr != 'COMPLETE':
self.science = self.science + self.dark
# reverse the amp merge
if (self.ampstring == 'ABCD'):
tmp_1, tmp_2 = np.split(self.science, 2, axis=1)
self.hdulist['sci', 1].data = tmp_1.copy()
self.hdulist['sci', 2].data = tmp_2[::-1, :].copy()
tmp_1, tmp_2 = np.split(self.err, 2, axis=1)
self.hdulist['err', 1].data = tmp_1.copy()
self.hdulist['err', 2].data = tmp_2[::-1, :].copy()
else:
self.hdulist['sci', 1].data = self.science.copy()
self.hdulist['err', 1].data = self.err.copy()
# Write the output
self.hdulist.writeto(output, overwrite=clobber)
|
Write out the destriped data.
|
entailment
|
def calacs(input_file, exec_path=None, time_stamps=False, temp_files=False,
verbose=False, debug=False, quiet=False, single_core=False,
exe_args=None):
"""
Run the calacs.e executable as from the shell.
By default this will run the calacs given by 'calacs.e'.
Parameters
----------
input_file : str
Name of input file.
exec_path : str, optional
The complete path to a calacs executable.
time_stamps : bool, optional
Set to True to turn on the printing of time stamps.
temp_files : bool, optional
Set to True to have CALACS save temporary files.
verbose : bool, optional
Set to True for verbose output.
debug : bool, optional
Set to True to turn on debugging output.
quiet : bool, optional
Set to True for quiet output.
single_core : bool, optional
CTE correction in CALACS will by default try to use all available
CPUs on your computer. Set this to True to force the use of just
one CPU.
exe_args : list, optional
Arbitrary arguments passed to underlying executable call.
Note: Implementation uses subprocess.call and whitespace is not
permitted. E.g. use exe_args=['--nThreads', '1']
"""
if exec_path:
if not os.path.exists(exec_path):
raise OSError('Executable not found: ' + exec_path)
call_list = [exec_path]
else:
call_list = ['calacs.e']
if time_stamps:
call_list.append('-t')
if temp_files:
call_list.append('-s')
if verbose:
call_list.append('-v')
if debug:
call_list.append('-d')
if quiet:
call_list.append('-q')
if single_core:
call_list.append('-1')
if not os.path.exists(input_file):
raise IOError('Input file not found: ' + input_file)
call_list.append(input_file)
if exe_args:
call_list.extend(exe_args)
subprocess.check_call(call_list)
|
Run the calacs.e executable as from the shell.
By default this will run the calacs given by 'calacs.e'.
Parameters
----------
input_file : str
Name of input file.
exec_path : str, optional
The complete path to a calacs executable.
time_stamps : bool, optional
Set to True to turn on the printing of time stamps.
temp_files : bool, optional
Set to True to have CALACS save temporary files.
verbose : bool, optional
Set to True for verbose output.
debug : bool, optional
Set to True to turn on debugging output.
quiet : bool, optional
Set to True for quiet output.
single_core : bool, optional
CTE correction in CALACS will by default try to use all available
CPUs on your computer. Set this to True to force the use of just
one CPU.
exe_args : list, optional
Arbitrary arguments passed to underlying executable call.
Note: Implementation uses subprocess.call and whitespace is not
permitted. E.g. use exe_args=['--nThreads', '1']
|
entailment
|
def run(configobj=None):
"""
TEAL interface for the `calacs` function.
"""
calacs(configobj['input_file'],
exec_path=configobj['exec_path'],
time_stamps=configobj['time_stamps'],
temp_files=configobj['temp_files'],
verbose=configobj['verbose'],
debug=configobj['debug'],
quiet=configobj['quiet'],
single_core=configobj['single_core']
)
|
TEAL interface for the `calacs` function.
|
entailment
|
def has_any_permissions(self, user):
"""
Return a boolean to indicate whether the supplied user has any
permissions at all on the associated model
"""
for perm in self.get_all_model_permissions():
if self.has_specific_permission(user, perm.codename):
return True
return False
|
Return a boolean to indicate whether the supplied user has any
permissions at all on the associated model
|
entailment
|
def get_valid_parent_pages(self, user):
"""
Identifies possible parent pages for the current user by first looking
at allowed_parent_page_models() on self.model to limit options to the
correct type of page, then checking permissions on those individual
pages to make sure we have permission to add a subpage to it.
"""
# Start with empty qs
parents_qs = Page.objects.none()
# Add pages of the correct type
for pt in self.model.allowed_parent_page_models():
pt_items = Page.objects.type(pt)
parents_qs = parents_qs | pt_items
# Exclude pages that we can't add subpages to
for page in parents_qs.all():
if not page.permissions_for_user(user).can_add_subpage():
parents_qs = parents_qs.exclude(pk=page.pk)
return parents_qs
|
Identifies possible parent pages for the current user by first looking
at allowed_parent_page_models() on self.model to limit options to the
correct type of page, then checking permissions on those individual
pages to make sure we have permission to add a subpage to it.
|
entailment
|
def acsrej(input, output, exec_path='', time_stamps=False, verbose=False,
shadcorr=False, crrejtab='', crmask=False, scalense=None,
initgues='', skysub='', crsigmas='', crradius=None, crthresh=None,
badinpdq=None, newbias=False, readnoise_only=False, exe_args=None):
r"""
Run the acsrej.e executable as from the shell.
Parameters
----------
input : str or list of str
Input filenames in one of these formats:
* a Python list of filenames
* a partial filename with wildcards ('\*flt.fits')
* filename of an ASN table ('j12345670_asn.fits')
* an at-file (``@input``)
output : str
Output filename.
exec_path : str, optional
The complete path to ACSREJ executable.
If not given, run ACSREJ given by 'acsrej.e'.
time_stamps : bool, optional
Set to True to turn on the printing of time stamps.
verbose : bool, optional
Set to True for verbose output.
shadcorr : bool, optional
Perform shutter shading correction.
If this is False but SHADCORR is set to PERFORM in
the header of the first image, the correction will
be applied anyway.
Only use this with CCD image, not SBC MAMA.
crrejtab : str, optional
CRREJTAB to use. If not given, will use CRREJTAB
given in the primary header of the first input image.
crmask : bool, optional
Flag CR-rejected pixels in input files.
If False, will use CRMASK value in CRREJTAB.
scalense : float, optional
Multiplicative scale factor (in percents) applied to noise.
Acceptable values are 0 to 100, inclusive.
If None, will use SCALENSE from CRREJTAB.
initgues : {'med', 'min'}, optional
Scheme for computing initial-guess image.
If not given, will use INITGUES from CRREJTAB.
skysub : {'none', 'mode'}, optional
Scheme for computing sky levels to be subtracted.
If not given, will use SKYSUB from CRREJTAB.
crsigmas : str, optional
Cosmic ray rejection thresholds given in the format of 'sig1,sig2,...'.
Number of sigmas given will be the number of rejection
iterations done. At least 1 and at most 20 sigmas accepted.
If not given, will use CRSIGMAS from CRREJTAB.
crradius : float, optional
Radius (in pixels) to propagate the cosmic ray.
If None, will use CRRADIUS from CRREJTAB.
crthresh : float, optional
Cosmic ray rejection propagation threshold.
If None, will use CRTHRESH from CRREJTAB.
badinpdq : int, optional
Data quality flag used for cosmic ray rejection.
If None, will use BADINPDQ from CRREJTAB.
newbias : bool, optional
This option has been deprecated. Use ``readnoise_only``.
readnoise_only : bool, optional
ERR is just read noise, not Poisson noise.
This is used for BIAS images.
exe_args : list, optional
Arbitrary arguments passed to underlying executable call.
Note: Implementation uses subprocess.call and whitespace is not
permitted. E.g. use exe_args=['--nThreads', '1']
"""
from stsci.tools import parseinput # Optional package dependency
if exec_path:
if not os.path.exists(exec_path):
raise OSError('Executable not found: ' + exec_path)
call_list = [exec_path]
else:
call_list = ['acsrej.e']
# Parse input to get list of filenames to process.
# acsrej.e only takes 'file1,file2,...'
infiles, dummy_out = parseinput.parseinput(input)
call_list.append(','.join(infiles))
call_list.append(output)
if time_stamps:
call_list.append('-t')
if verbose:
call_list.append('-v')
if shadcorr:
call_list.append('-shadcorr')
if crrejtab:
call_list += ['-table', crrejtab]
if crmask:
call_list.append('-crmask')
if scalense is not None:
if scalense < 0 or scalense > 100:
raise ValueError('SCALENSE must be 0 to 100')
call_list += ['-scale', str(scalense)]
if initgues:
if initgues not in ('med', 'min'):
raise ValueError('INITGUES must be "med" or "min"')
call_list += ['-init', initgues]
if skysub:
if skysub not in ('none', 'mode'):
raise ValueError('SKYSUB must be "none" or "mode"')
call_list += ['-sky', skysub]
if crsigmas:
call_list += ['-sigmas', crsigmas]
if crradius is not None:
call_list += ['-radius', str(crradius)]
if crthresh is not None:
call_list += ['-thresh ', str(crthresh)]
if badinpdq is not None:
call_list += ['-pdq', str(badinpdq)]
# Backward-compatibility for readnoise_only.
# TODO: Remove this option entirely in a future release.
if newbias:
warnings.warn('newbias is deprecated, use readnoise_only',
ACSREJDeprecationWarning)
readnoise_only = newbias
if readnoise_only:
call_list.append('-readnoise_only')
if exe_args:
call_list.extend(exe_args)
subprocess.check_call(call_list)
|
r"""
Run the acsrej.e executable as from the shell.
Parameters
----------
input : str or list of str
Input filenames in one of these formats:
* a Python list of filenames
* a partial filename with wildcards ('\*flt.fits')
* filename of an ASN table ('j12345670_asn.fits')
* an at-file (``@input``)
output : str
Output filename.
exec_path : str, optional
The complete path to ACSREJ executable.
If not given, run ACSREJ given by 'acsrej.e'.
time_stamps : bool, optional
Set to True to turn on the printing of time stamps.
verbose : bool, optional
Set to True for verbose output.
shadcorr : bool, optional
Perform shutter shading correction.
If this is False but SHADCORR is set to PERFORM in
the header of the first image, the correction will
be applied anyway.
Only use this with CCD image, not SBC MAMA.
crrejtab : str, optional
CRREJTAB to use. If not given, will use CRREJTAB
given in the primary header of the first input image.
crmask : bool, optional
Flag CR-rejected pixels in input files.
If False, will use CRMASK value in CRREJTAB.
scalense : float, optional
Multiplicative scale factor (in percents) applied to noise.
Acceptable values are 0 to 100, inclusive.
If None, will use SCALENSE from CRREJTAB.
initgues : {'med', 'min'}, optional
Scheme for computing initial-guess image.
If not given, will use INITGUES from CRREJTAB.
skysub : {'none', 'mode'}, optional
Scheme for computing sky levels to be subtracted.
If not given, will use SKYSUB from CRREJTAB.
crsigmas : str, optional
Cosmic ray rejection thresholds given in the format of 'sig1,sig2,...'.
Number of sigmas given will be the number of rejection
iterations done. At least 1 and at most 20 sigmas accepted.
If not given, will use CRSIGMAS from CRREJTAB.
crradius : float, optional
Radius (in pixels) to propagate the cosmic ray.
If None, will use CRRADIUS from CRREJTAB.
crthresh : float, optional
Cosmic ray rejection propagation threshold.
If None, will use CRTHRESH from CRREJTAB.
badinpdq : int, optional
Data quality flag used for cosmic ray rejection.
If None, will use BADINPDQ from CRREJTAB.
newbias : bool, optional
This option has been deprecated. Use ``readnoise_only``.
readnoise_only : bool, optional
ERR is just read noise, not Poisson noise.
This is used for BIAS images.
exe_args : list, optional
Arbitrary arguments passed to underlying executable call.
Note: Implementation uses subprocess.call and whitespace is not
permitted. E.g. use exe_args=['--nThreads', '1']
|
entailment
|
def run(configobj=None):
"""
TEAL interface for the `acsrej` function.
"""
acsrej(configobj['input'],
configobj['output'],
exec_path=configobj['exec_path'],
time_stamps=configobj['time_stamps'],
verbose=configobj['verbose'],
shadcorr=configobj['shadcorr'],
crrejtab=configobj['crrejtab'],
crmask=configobj['crmask'],
scalense=configobj['scalense'],
initgues=configobj['initgues'],
skysub=configobj['skysub'],
crsigmas=configobj['crsigmas'],
crradius=configobj['crradius'],
crthresh=configobj['crthresh'],
badinpdq=configobj['badinpdq'],
readnoise_only=configobj['readnoise_only'])
|
TEAL interface for the `acsrej` function.
|
entailment
|
def monkeypatch_os_fork_functions():
"""
Replace os.fork* with wrappers that use ForkSafeLock to acquire
all locks before forking and release them afterwards.
"""
builtin_function = type(''.join)
if hasattr(os, 'fork') and isinstance(os.fork, builtin_function):
global _orig_os_fork
_orig_os_fork = os.fork
os.fork = os_fork_wrapper
if hasattr(os, 'forkpty') and isinstance(os.forkpty, builtin_function):
global _orig_os_forkpty
_orig_os_forkpty = os.forkpty
os.forkpty = os_forkpty_wrapper
|
Replace os.fork* with wrappers that use ForkSafeLock to acquire
all locks before forking and release them afterwards.
|
entailment
|
def atfork(prepare=None, parent=None, child=None):
"""A Python work-a-like of pthread_atfork.
Any time a fork() is called from Python, all 'prepare' callables will
be called in the order they were registered using this function.
After the fork (successful or not), all 'parent' callables will be called in
the parent process. If the fork succeeded, all 'child' callables will be
called in the child process.
No exceptions may be raised from any of the registered callables. If so
they will be printed to sys.stderr after the fork call once it is safe
to do so.
"""
assert not prepare or callable(prepare)
assert not parent or callable(parent)
assert not child or callable(child)
_fork_lock.acquire()
try:
if prepare:
_prepare_call_list.append(prepare)
if parent:
_parent_call_list.append(parent)
if child:
_child_call_list.append(child)
finally:
_fork_lock.release()
|
A Python work-a-like of pthread_atfork.
Any time a fork() is called from Python, all 'prepare' callables will
be called in the order they were registered using this function.
After the fork (successful or not), all 'parent' callables will be called in
the parent process. If the fork succeeded, all 'child' callables will be
called in the child process.
No exceptions may be raised from any of the registered callables. If so
they will be printed to sys.stderr after the fork call once it is safe
to do so.
|
entailment
|
def _call_atfork_list(call_list):
"""
Given a list of callables in call_list, call them all in order and save
and return a list of sys.exc_info() tuples for each exception raised.
"""
exception_list = []
for func in call_list:
try:
func()
except:
exception_list.append(sys.exc_info())
return exception_list
|
Given a list of callables in call_list, call them all in order and save
and return a list of sys.exc_info() tuples for each exception raised.
|
entailment
|
def parent_after_fork_release():
"""
Call all parent after fork callables, release the lock and print
all prepare and parent callback exceptions.
"""
prepare_exceptions = list(_prepare_call_exceptions)
del _prepare_call_exceptions[:]
exceptions = _call_atfork_list(_parent_call_list)
_fork_lock.release()
_print_exception_list(prepare_exceptions, 'before fork')
_print_exception_list(exceptions, 'after fork from parent')
|
Call all parent after fork callables, release the lock and print
all prepare and parent callback exceptions.
|
entailment
|
def _print_exception_list(exceptions, message, output_file=None):
"""
Given a list of sys.exc_info tuples, print them all using the traceback
module preceeded by a message and separated by a blank line.
"""
output_file = output_file or sys.stderr
message = 'Exception %s:\n' % message
for exc_type, exc_value, exc_traceback in exceptions:
output_file.write(message)
traceback.print_exception(exc_type, exc_value, exc_traceback,
file=output_file)
output_file.write('\n')
|
Given a list of sys.exc_info tuples, print them all using the traceback
module preceeded by a message and separated by a blank line.
|
entailment
|
def os_forkpty_wrapper():
"""Wraps os.forkpty() to run atfork handlers."""
pid = None
prepare_to_fork_acquire()
try:
pid, fd = _orig_os_forkpty()
finally:
if pid == 0:
child_after_fork_release()
else:
parent_after_fork_release()
return pid, fd
|
Wraps os.forkpty() to run atfork handlers.
|
entailment
|
def maybe(value):
"""Wraps an object with a Maybe instance.
>>> maybe("I'm a value")
Something("I'm a value")
>>> maybe(None);
Nothing
Testing for value:
>>> maybe("I'm a value").is_some()
True
>>> maybe("I'm a value").is_none()
False
>>> maybe(None).is_some()
False
>>> maybe(None).is_none()
True
Simplifying IF statements:
>>> maybe("I'm a value").get()
"I'm a value"
>>> maybe("I'm a value").or_else(lambda: "No value")
"I'm a value"
>>> maybe(None).get()
Traceback (most recent call last):
...
NothingValueError: No such element
>>> maybe(None).or_else(lambda: "value")
'value'
>>> maybe(None).or_else("value")
'value'
Wrap around values from object's attributes:
class Person(object):
def __init__(name):
self.eran = name
eran = maybe(Person('eran'))
>>> eran.name
Something('eran')
>>> eran.phone_number
Nothing
>>> eran.phone_number.or_else('no phone number')
'no phone number'
>>> maybe(4) + 8
Something(12)
>>> maybe(4) - 2
Something(2)
>>> maybe(4) * 2
Something(8)
And methods:
>>> maybe('VALUE').lower().get()
'value'
>>> maybe(None).invalid().method().or_else('unknwon')
'unknwon'
Enabled easily using NestedDictionaries without having to worry
if a value is missing.
For example lets assume we want to load some value from the
following dictionary:
nested_dict = maybe({
'store': {
'name': 'MyStore',
'departments': {
'sales': { 'head_count': '10' }
}
}
})
>>> nested_dict['store']['name'].get()
'MyStore'
>>> nested_dict['store']['address']
Nothing
>>> nested_dict['store']['address']['street'].or_else('No Address Specified')
'No Address Specified'
>>> nested_dict['store']['address']['street'].or_none() is None
True
>>> nested_dict['store']['address']['street'].or_empty_list()
[]
>>> nested_dict['store']['departments']['sales']['head_count'].or_else('0')
'10'
>>> nested_dict['store']['departments']['marketing']['head_count'].or_else('0')
'0'
"""
if isinstance(value, Maybe):
return value
if value is not None:
return Something(value)
return Nothing()
|
Wraps an object with a Maybe instance.
>>> maybe("I'm a value")
Something("I'm a value")
>>> maybe(None);
Nothing
Testing for value:
>>> maybe("I'm a value").is_some()
True
>>> maybe("I'm a value").is_none()
False
>>> maybe(None).is_some()
False
>>> maybe(None).is_none()
True
Simplifying IF statements:
>>> maybe("I'm a value").get()
"I'm a value"
>>> maybe("I'm a value").or_else(lambda: "No value")
"I'm a value"
>>> maybe(None).get()
Traceback (most recent call last):
...
NothingValueError: No such element
>>> maybe(None).or_else(lambda: "value")
'value'
>>> maybe(None).or_else("value")
'value'
Wrap around values from object's attributes:
class Person(object):
def __init__(name):
self.eran = name
eran = maybe(Person('eran'))
>>> eran.name
Something('eran')
>>> eran.phone_number
Nothing
>>> eran.phone_number.or_else('no phone number')
'no phone number'
>>> maybe(4) + 8
Something(12)
>>> maybe(4) - 2
Something(2)
>>> maybe(4) * 2
Something(8)
And methods:
>>> maybe('VALUE').lower().get()
'value'
>>> maybe(None).invalid().method().or_else('unknwon')
'unknwon'
Enabled easily using NestedDictionaries without having to worry
if a value is missing.
For example lets assume we want to load some value from the
following dictionary:
nested_dict = maybe({
'store': {
'name': 'MyStore',
'departments': {
'sales': { 'head_count': '10' }
}
}
})
>>> nested_dict['store']['name'].get()
'MyStore'
>>> nested_dict['store']['address']
Nothing
>>> nested_dict['store']['address']['street'].or_else('No Address Specified')
'No Address Specified'
>>> nested_dict['store']['address']['street'].or_none() is None
True
>>> nested_dict['store']['address']['street'].or_empty_list()
[]
>>> nested_dict['store']['departments']['sales']['head_count'].or_else('0')
'10'
>>> nested_dict['store']['departments']['marketing']['head_count'].or_else('0')
'0'
|
entailment
|
def setup(self):
'''Called after instantiation'''
TelnetHandlerBase.setup(self)
# Spawn a greenlet to handle socket input
self.greenlet_ic = eventlet.spawn(self.inputcooker)
# Note that inputcooker exits on EOF
# Sleep for 0.5 second to allow options negotiation
eventlet.sleep(0.5)
|
Called after instantiation
|
entailment
|
def inputcooker_store_queue(self, char):
"""Put the cooked data in the input queue (no locking needed)"""
if type(char) in [type(()), type([]), type("")]:
for v in char:
self.cookedq.put(v)
else:
self.cookedq.put(char)
|
Put the cooked data in the input queue (no locking needed)
|
entailment
|
def setup(self):
'''Setup the connection.'''
log.debug( 'New request from address %s, port %d', self.client_address )
try:
self.transport.load_server_moduli()
except:
log.exception( '(Failed to load moduli -- gex will be unsupported.)' )
raise
try:
self.transport.add_server_key(self.host_key)
except:
if self.host_key is None:
log.critical('Host key not set! SSHHandler MUST define the host_key parameter.')
raise NotImplementedError('Host key not set! SSHHandler instance must define the host_key parameter. Try host_key = paramiko_ssh.getRsaKeyFile("server_rsa.key").')
try:
# Tell transport to use this object as a server
log.debug( 'Starting SSH server-side negotiation' )
self.transport.start_server(server=self)
except SSHException, e:
log.warn('SSH negotiation failed. %s', e)
raise
# Accept any requested channels
while True:
channel = self.transport.accept(20)
if channel is None:
# check to see if any thread is running
any_running = False
for c, thread in self.channels.items():
if thread.is_alive():
any_running = True
break
if not any_running:
break
else:
log.info( 'Accepted channel %s', channel )
|
Setup the connection.
|
entailment
|
def streamserver_handle(cls, socket, address):
'''Translate this class for use in a StreamServer'''
request = cls.dummy_request()
request._sock = socket
server = None
cls(request, address, server)
|
Translate this class for use in a StreamServer
|
entailment
|
def check_channel_shell_request(self, channel):
'''Request to start a shell on the given channel'''
try:
self.channels[channel].start()
except KeyError:
log.error('Requested to start a channel (%r) that was not previously set up.', channel)
return False
else:
return True
|
Request to start a shell on the given channel
|
entailment
|
def check_channel_pty_request(self, channel, term, width, height, pixelwidth,
pixelheight, modes):
'''Request to allocate a PTY terminal.'''
#self.sshterm = term
#print "term: %r, modes: %r" % (term, modes)
log.debug('PTY requested. Setting up %r.', self.telnet_handler)
pty_thread = Thread( target=self.start_pty_request, args=(channel, term, modes) )
self.channels[channel] = pty_thread
return True
|
Request to allocate a PTY terminal.
|
entailment
|
def download(url, file_name):
r = requests.get(url, stream=True)
file_size = int(r.headers['Content-length'])
'''
if py3:
file_size = int(u.getheader("Content-Length")[0])
else:
file_size = int(u.info().getheaders("Content-Length")[0])
'''
file_exists = False
if os.path.isfile(file_name):
local_file_size = os.path.getsize(file_name)
if local_file_size == file_size:
sha1_file = file_name + '.sha1'
if os.path.isfile(sha1_file):
print('sha1 found')
with open(sha1_file) as f:
expected_sha1 = f.read()
BLOCKSIZE = 65536
sha1 = hashlib.sha1()
with open(file_name) as f:
buff = f.read(BLOCKSIZE)
while len(buff) > 0:
sha1.update(buff)
buff = f.read(BLOCKSIZE)
if expected_sha1 == sha1:
file_exists = True
else:
print("File corrupt. Downloading again.")
os.remove(file_name)
else:
file_exists = True
else:
print("File corrupt. Downloading again.")
os.remove(file_name)
if not file_exists:
factor = int(math.floor(math.log(file_size) / math.log(1024)))
display_file_size = str(file_size / 1024 ** factor) + \
['B', 'KB', 'MB', 'GB', 'TB', 'PB'][factor]
print("Source: " + url)
print("Destination " + file_name)
print("Size: " + display_file_size)
file_size_dl = 0
block_sz = 8192
f = open(file_name, 'wb')
pbar = ProgressBar(file_size)
for chunk in r.iter_content(chunk_size=block_sz):
if not chunk:
continue
chunk_size = len(chunk)
file_size_dl += chunk_size
f.write(chunk)
pbar.update(chunk_size)
# status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
# status = status + chr(8)*(len(status)+1)
# print(status)
f.close()
else:
print("File already exists - " + file_name)
return True
|
if py3:
file_size = int(u.getheader("Content-Length")[0])
else:
file_size = int(u.info().getheaders("Content-Length")[0])
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.