sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def _skyUserFromHeaderKwd(imageSet,paramDict):
"""
subtract the sky from all the chips in the imagefile that imageSet represents
imageSet is a single imageObject reference
paramDict should be the subset from an actual config object
"""
_skyValue=0.0 #this will be the sky value computed for the exposure
skyKW="MDRIZSKY" #header keyword that contains the sky that's been subtracted
#just making sure, tricky users and all, these are things that will be used
#by the sky function so we want them defined at least
try:
assert imageSet._numchips > 0, "invalid value for number of chips"
assert imageSet._filename != '', "image object filename is empty!, doh!"
assert imageSet._rootname != '', "image rootname is empty!, doh!"
assert imageSet.scienceExt !='', "image object science extension is empty!"
except AssertionError:
raise AssertionError
numchips=imageSet._numchips
sciExt=imageSet.scienceExt
# User Subtraction Case, User has done own sky subtraction,
# so use the image header value for subtractedsky value
skyuser=paramDict["skyuser"]
if skyuser != '':
print("User has computed their own sky values...")
if skyuser != skyKW:
print(" ...updating MDRIZSKY with supplied value.")
for chip in range(1,numchips+1,1):
chipext = '%s,%d'%(sciExt,chip)
if not imageSet[chipext].group_member:
# skip extensions/chips that will not be processed
continue
try:
_skyValue = imageSet[chipext].header[skyuser]
except:
print("**************************************************************")
print("*")
print("* Cannot find keyword ",skyuser," in ",imageSet._filename)
print("*")
print("**************************************************************\n\n\n")
raise KeyError
_updateKW(imageSet[sciExt+','+str(chip)],
imageSet._filename,(sciExt,chip),skyKW,_skyValue)
# Update internal record with subtracted sky value
imageSet[chipext].subtractedSky = _skyValue
imageSet[chipext].computedSky = None
print("Setting ",skyKW,"=",_skyValue)
|
subtract the sky from all the chips in the imagefile that imageSet represents
imageSet is a single imageObject reference
paramDict should be the subset from an actual config object
|
entailment
|
def _skySub(imageSet,paramDict,saveFile=False):
"""
subtract the sky from all the chips in the imagefile that imageSet represents
imageSet is a single imageObject reference
paramDict should be the subset from an actual config object
if saveFile=True, then images that have been sky subtracted are saved to a predetermined output name
else, overwrite the input images with the sky-subtracted results
the output from sky subtraction is a copy of the original input file
where all the science data extensions have been sky subtracted
"""
_skyValue=0.0 #this will be the sky value computed for the exposure
skyKW="MDRIZSKY" #header keyword that contains the sky that's been subtracted
#just making sure, tricky users and all, these are things that will be used
#by the sky function so we want them defined at least
try:
assert imageSet._numchips > 0, "invalid value for number of chips"
assert imageSet._filename != '', "image object filename is empty!, doh!"
assert imageSet._rootname != '', "image rootname is empty!, doh!"
assert imageSet.scienceExt !='', "image object science extension is empty!"
except AssertionError:
raise AssertionError
numchips=imageSet._numchips
sciExt=imageSet.scienceExt
# User Subtraction Case, User has done own sky subtraction,
# so use the image header value for subtractedsky value
skyuser=paramDict["skyuser"]
if skyuser != '':
print("User has computed their own sky values...")
if skyuser != skyKW:
print(" ...updating MDRIZSKY with supplied value.")
for chip in range(1,numchips+1,1):
try:
chipext = '%s,%d'%(sciExt,chip)
_skyValue = imageSet[chipext].header[skyuser]
except:
print("**************************************************************")
print("*")
print("* Cannot find keyword ",skyuser," in ",imageSet._filename)
print("*")
print("**************************************************************\n\n\n")
raise KeyError
_updateKW(imageSet[sciExt+','+str(chip)],imageSet._filename,(sciExt,chip),skyKW,_skyValue)
# Update internal record with subtracted sky value
imageSet[chipext].subtractedSky = _skyValue
imageSet[chipext].computedSky = None
print("Setting ",skyKW,"=",_skyValue)
else:
# Compute our own sky values and record the values for use later.
# The minimum sky value from all the science chips in the exposure
# is used as the reference sky for each chip
log.info("Computing minimum sky ...")
minSky=[] #store the sky for each chip
minpscale = []
for chip in range(1,numchips+1,1):
myext=sciExt+","+str(chip)
#add the data back into the chip, leave it there til the end of this function
imageSet[myext].data=imageSet.getData(myext)
image=imageSet[myext]
_skyValue= _computeSky(image, paramDict, memmap=False)
#scale the sky value by the area on sky
# account for the case where no IDCSCALE has been set, due to a
# lack of IDCTAB or to 'coeffs=False'.
pscale=imageSet[myext].wcs.idcscale
if pscale is None:
log.warning("No Distortion coefficients available...using "
"default plate scale.")
pscale = imageSet[myext].wcs.pscale
_scaledSky=_skyValue / (pscale**2)
#_skyValue=_scaledSky
minSky.append(_scaledSky)
minpscale.append(pscale)
_skyValue = min(minSky)
_reportedSky = _skyValue*(minpscale[minSky.index(_skyValue)]**2)
log.info("Minimum sky value for all chips %s" % _reportedSky)
#now subtract that value from all the chips in the exposure
#and update the chips header keyword with the sub
for chip in range(1,numchips+1,1):
image=imageSet[sciExt,chip]
myext = sciExt+","+str(chip)
# account for the case where no IDCSCALE has been set, due to a
# lack of IDCTAB or to 'coeffs=False'.
idcscale = image.wcs.idcscale
if idcscale is None: idcscale = image.wcs.pscale
_scaledSky=_skyValue * (idcscale**2)
image.subtractedSky = _scaledSky
image.computedSky = _scaledSky
log.info("Using sky from chip %d: %f\n" % (chip,_scaledSky))
###_subtractSky(image,(_scaledSky))
# Update the header so that the keyword in the image is
#the sky value which should be subtracted from the image
_updateKW(image,imageSet._filename,(sciExt,chip),skyKW,_scaledSky)
|
subtract the sky from all the chips in the imagefile that imageSet represents
imageSet is a single imageObject reference
paramDict should be the subset from an actual config object
if saveFile=True, then images that have been sky subtracted are saved to a predetermined output name
else, overwrite the input images with the sky-subtracted results
the output from sky subtraction is a copy of the original input file
where all the science data extensions have been sky subtracted
|
entailment
|
def _computeSky(image, skypars, memmap=False):
"""
Compute the sky value for the data array passed to the function
image is a fits object which contains the data and the header
for one image extension
skypars is passed in as paramDict
"""
#this object contains the returned values from the image stats routine
_tmp = imagestats.ImageStats(image.data,
fields = skypars['skystat'],
lower = skypars['skylower'],
upper = skypars['skyupper'],
nclip = skypars['skyclip'],
lsig = skypars['skylsigma'],
usig = skypars['skyusigma'],
binwidth = skypars['skywidth']
)
_skyValue = _extractSkyValue(_tmp,skypars['skystat'].lower())
log.info(" Computed sky value/pixel for %s: %s "%
(image.rootname, _skyValue))
del _tmp
return _skyValue
|
Compute the sky value for the data array passed to the function
image is a fits object which contains the data and the header
for one image extension
skypars is passed in as paramDict
|
entailment
|
def _subtractSky(image,skyValue,memmap=False):
"""
subtract the given sky value from each the data array
that has been passed. image is a fits object that
contains the data and header for one image extension
"""
try:
np.subtract(image.data,skyValue,image.data)
except IOError:
print("Unable to perform sky subtraction on data array")
raise IOError
|
subtract the given sky value from each the data array
that has been passed. image is a fits object that
contains the data and header for one image extension
|
entailment
|
def _updateKW(image, filename, exten, skyKW, Value):
"""update the header with the kw,value"""
# Update the value in memory
image.header[skyKW] = Value
# Now update the value on disk
if isinstance(exten,tuple):
strexten = '[%s,%s]'%(exten[0],str(exten[1]))
else:
strexten = '[%s]'%(exten)
log.info('Updating keyword %s in %s' % (skyKW, filename + strexten))
fobj = fileutil.openImage(filename, mode='update', memmap=False)
fobj[exten].header[skyKW] = (Value, 'Sky value computed by AstroDrizzle')
fobj.close()
|
update the header with the kw,value
|
entailment
|
def _addDefaultSkyKW(imageObjList):
"""Add MDRIZSKY keyword to "commanded" SCI headers of all input images,
if that keyword does not already exist.
"""
skyKW = "MDRIZSKY"
Value = 0.0
for imageSet in imageObjList:
fname = imageSet._filename
numchips=imageSet._numchips
sciExt=imageSet.scienceExt
fobj = fileutil.openImage(fname, mode='update', memmap=False)
for chip in range(1,numchips+1,1):
ext = (sciExt,chip)
if not imageSet[ext].group_member:
# skip over extensions not used in processing
continue
if skyKW not in fobj[ext].header:
fobj[ext].header[skyKW] = (Value, 'Sky value computed by AstroDrizzle')
log.info("MDRIZSKY keyword not found in the %s[%s,%d] header."%(
fname,sciExt,chip))
log.info(" Adding MDRIZSKY to header with default value of 0.")
fobj.close()
|
Add MDRIZSKY keyword to "commanded" SCI headers of all input images,
if that keyword does not already exist.
|
entailment
|
def help(file=None):
"""
Print out syntax help for running astrodrizzle
Parameters
----------
file : str (Default = None)
If given, write out help to the filename specified by this parameter
Any previously existing file with this name will be deleted before
writing out the help.
"""
helpstr = getHelpAsString(docstring=True, show_ver = True)
if file is None:
print(helpstr)
else:
if os.path.exists(file): os.remove(file)
f = open(file, mode = 'w')
f.write(helpstr)
f.close()
|
Print out syntax help for running astrodrizzle
Parameters
----------
file : str (Default = None)
If given, write out help to the filename specified by this parameter
Any previously existing file with this name will be deleted before
writing out the help.
|
entailment
|
def rd2xy(input,ra=None,dec=None,coordfile=None,colnames=None,
precision=6,output=None,verbose=True):
""" Primary interface to perform coordinate transformations from
pixel to sky coordinates using STWCS and full distortion models
read from the input image header.
"""
single_coord = False
if coordfile is not None:
if colnames in blank_list:
colnames = ['c1','c2']
elif isinstance(colnames,type('a')):
colnames = colnames.split(',')
# convert input file coordinates to lists of decimal degrees values
xlist,ylist = tweakutils.readcols(coordfile,cols=colnames)
else:
if isinstance(ra,np.ndarray):
ralist = ra.tolist()
declist = dec.tolist()
elif not isinstance(ra, list):
ralist = [ra]
declist = [dec]
else:
ralist = ra
declist = dec
xlist = [0]*len(ralist)
ylist = [0]*len(ralist)
if len(xlist) == 1:
single_coord = True
for i,(r,d) in enumerate(zip(ralist,declist)):
# convert input value into decimal degrees value
xval,yval = tweakutils.parse_skypos(r,d)
xlist[i] = xval
ylist[i] = yval
# start by reading in WCS+distortion info for input image
inwcs = wcsutil.HSTWCS(input)
if inwcs.wcs.is_unity():
print("####\nNo valid WCS found in {}.\n Results may be invalid.\n####\n".format(input))
# Now, convert pixel coordinates into sky coordinates
try:
outx,outy = inwcs.all_world2pix(xlist,ylist,1)
except RuntimeError:
outx,outy = inwcs.wcs_world2pix(xlist,ylist,1)
# add formatting based on precision here...
xstr = []
ystr = []
fmt = "%."+repr(precision)+"f"
for x,y in zip(outx,outy):
xstr.append(fmt%x)
ystr.append(fmt%y)
if verbose or (not verbose and util.is_blank(output)):
print ('# Coordinate transformations for ',input)
print('# X Y RA Dec\n')
for x,y,r,d in zip(xstr,ystr,xlist,ylist):
print("%s %s %s %s"%(x,y,r,d))
# Create output file, if specified
if output:
f = open(output,mode='w')
f.write("# Coordinates converted from %s\n"%input)
for x,y in zip(xstr,ystr):
f.write('%s %s\n'%(x,y))
f.close()
print('Wrote out results to: ',output)
if single_coord:
outx = outx[0]
outy = outy[0]
return outx, outy
|
Primary interface to perform coordinate transformations from
pixel to sky coordinates using STWCS and full distortion models
read from the input image header.
|
entailment
|
def create_astrometric_catalog(inputs, **pars):
"""Create an astrometric catalog that covers the inputs' field-of-view.
Parameters
----------
input : str, list
Filenames of images to be aligned to astrometric catalog
catalog : str, optional
Name of catalog to extract astrometric positions for sources in the
input images' field-of-view. Default: GAIADR2. Options available are
documented on the catalog web page.
output : str, optional
Filename to give to the astrometric catalog read in from the master
catalog web service. If None, no file will be written out.
gaia_only : bool, optional
Specify whether or not to only use sources from GAIA in output catalog
Default: False
existing_wcs : ~stwcs.wcsutils.HSTWCS`
existing WCS object specified by the user
Notes
-----
This function will point to astrometric catalog web service defined
through the use of the ASTROMETRIC_CATALOG_URL environment variable.
Returns
-------
ref_table : ~.astropy.table.Table`
Astropy Table object of the catalog
"""
# interpret input parameters
catalog = pars.get("catalog", 'GAIADR2')
output = pars.get("output", 'ref_cat.ecsv')
gaia_only = pars.get("gaia_only", False)
table_format = pars.get("table_format", 'ascii.ecsv')
existing_wcs = pars.get("existing_wcs", None)
inputs, _ = parseinput.parseinput(inputs)
# start by creating a composite field-of-view for all inputs
# This default output WCS will have the same plate-scale and orientation
# as the first chip in the list, which for WFPC2 data means the PC.
# Fortunately, for alignment, this doesn't matter since no resampling of
# data will be performed
if existing_wcs:
outwcs = existing_wcs
else:
outwcs = build_reference_wcs(inputs)
radius = compute_radius(outwcs)
ra, dec = outwcs.wcs.crval
# perform query for this field-of-view
ref_dict = get_catalog(ra, dec, sr=radius, catalog=catalog)
colnames = ('ra', 'dec', 'mag', 'objID', 'GaiaID')
col_types = ('f8', 'f8', 'f4', 'U25', 'U25')
ref_table = Table(names=colnames, dtype=col_types)
# Add catalog name as meta data
ref_table.meta['catalog'] = catalog
ref_table.meta['gaia_only'] = gaia_only
# rename coordinate columns to be consistent with tweakwcs
ref_table.rename_column('ra', 'RA')
ref_table.rename_column('dec', 'DEC')
# extract just the columns we want...
num_sources = 0
for source in ref_dict:
if 'GAIAsourceID' in source:
g = source['GAIAsourceID']
if gaia_only and g.strip() == '':
continue
else:
g = "-1" # indicator for no source ID extracted
r = float(source['ra'])
d = float(source['dec'])
m = -999.9 # float(source['mag'])
o = source['objID']
num_sources += 1
ref_table.add_row((r, d, m, o, g))
# Write out table to a file, if specified
if output:
ref_table.write(output, format=table_format)
log.info("Created catalog '{}' with {} sources".format(output, num_sources))
return ref_table
|
Create an astrometric catalog that covers the inputs' field-of-view.
Parameters
----------
input : str, list
Filenames of images to be aligned to astrometric catalog
catalog : str, optional
Name of catalog to extract astrometric positions for sources in the
input images' field-of-view. Default: GAIADR2. Options available are
documented on the catalog web page.
output : str, optional
Filename to give to the astrometric catalog read in from the master
catalog web service. If None, no file will be written out.
gaia_only : bool, optional
Specify whether or not to only use sources from GAIA in output catalog
Default: False
existing_wcs : ~stwcs.wcsutils.HSTWCS`
existing WCS object specified by the user
Notes
-----
This function will point to astrometric catalog web service defined
through the use of the ASTROMETRIC_CATALOG_URL environment variable.
Returns
-------
ref_table : ~.astropy.table.Table`
Astropy Table object of the catalog
|
entailment
|
def build_reference_wcs(inputs, sciname='sci'):
"""Create the reference WCS based on all the inputs for a field"""
# start by creating a composite field-of-view for all inputs
wcslist = []
for img in inputs:
nsci = countExtn(img)
for num in range(nsci):
extname = (sciname, num + 1)
if sciname == 'sci':
extwcs = wcsutil.HSTWCS(img, ext=extname)
else:
# Working with HDRLET as input and do the best we can...
extwcs = read_hlet_wcs(img, ext=extname)
wcslist.append(extwcs)
# This default output WCS will have the same plate-scale and orientation
# as the first chip in the list, which for WFPC2 data means the PC.
# Fortunately, for alignment, this doesn't matter since no resampling of
# data will be performed
outwcs = utils.output_wcs(wcslist)
return outwcs
|
Create the reference WCS based on all the inputs for a field
|
entailment
|
def get_catalog(ra, dec, sr=0.1, fmt='CSV', catalog='GSC241'):
""" Extract catalog from VO web service.
Parameters
----------
ra : float
Right Ascension (RA) of center of field-of-view (in decimal degrees)
dec : float
Declination (Dec) of center of field-of-view (in decimal degrees)
sr : float, optional
Search radius (in decimal degrees) from field-of-view center to use
for sources from catalog. Default: 0.1 degrees
fmt : str, optional
Format of output catalog to be returned. Options are determined by
web-service, and currently include (Default: CSV):
VOTABLE(default) | HTML | KML | CSV | TSV | JSON | TEXT
catalog : str, optional
Name of catalog to query, as defined by web-service. Default: 'GSC241'
Returns
-------
csv : CSV object
CSV object of returned sources with all columns as provided by catalog
"""
serviceType = 'vo/CatalogSearch.aspx'
spec_str = 'RA={}&DEC={}&SR={}&FORMAT={}&CAT={}&MINDET=5'
headers = {'Content-Type': 'text/csv'}
spec = spec_str.format(ra, dec, sr, fmt, catalog)
serviceUrl = '{}/{}?{}'.format(SERVICELOCATION, serviceType, spec)
rawcat = requests.get(serviceUrl, headers=headers)
r_contents = rawcat.content.decode() # convert from bytes to a String
rstr = r_contents.split('\r\n')
# remove initial line describing the number of sources returned
# CRITICAL to proper interpretation of CSV data
del rstr[0]
r_csv = csv.DictReader(rstr)
return r_csv
|
Extract catalog from VO web service.
Parameters
----------
ra : float
Right Ascension (RA) of center of field-of-view (in decimal degrees)
dec : float
Declination (Dec) of center of field-of-view (in decimal degrees)
sr : float, optional
Search radius (in decimal degrees) from field-of-view center to use
for sources from catalog. Default: 0.1 degrees
fmt : str, optional
Format of output catalog to be returned. Options are determined by
web-service, and currently include (Default: CSV):
VOTABLE(default) | HTML | KML | CSV | TSV | JSON | TEXT
catalog : str, optional
Name of catalog to query, as defined by web-service. Default: 'GSC241'
Returns
-------
csv : CSV object
CSV object of returned sources with all columns as provided by catalog
|
entailment
|
def compute_radius(wcs):
"""Compute the radius from the center to the furthest edge of the WCS."""
ra, dec = wcs.wcs.crval
img_center = SkyCoord(ra=ra * u.degree, dec=dec * u.degree)
wcs_foot = wcs.calc_footprint()
img_corners = SkyCoord(ra=wcs_foot[:, 0] * u.degree,
dec=wcs_foot[:, 1] * u.degree)
radius = img_center.separation(img_corners).max().value
return radius
|
Compute the radius from the center to the furthest edge of the WCS.
|
entailment
|
def find_gsc_offset(image, input_catalog='GSC1', output_catalog='GAIA'):
"""Find the GSC to GAIA offset based on guide star coordinates
Parameters
----------
image : str
Filename of image to be processed.
Returns
-------
delta_ra, delta_dec : tuple of floats
Offset in decimal degrees of image based on correction to guide star
coordinates relative to GAIA.
"""
serviceType = "GSCConvert/GSCconvert.aspx"
spec_str = "TRANSFORM={}-{}&IPPPSSOOT={}"
if 'rootname' in pf.getheader(image):
ippssoot = pf.getval(image, 'rootname').upper()
else:
ippssoot = fu.buildNewRootname(image).upper()
spec = spec_str.format(input_catalog, output_catalog, ippssoot)
serviceUrl = "{}/{}?{}".format(SERVICELOCATION, serviceType, spec)
rawcat = requests.get(serviceUrl)
if not rawcat.ok:
log.info("Problem accessing service with:\n{{}".format(serviceUrl))
raise ValueError
delta_ra = delta_dec = None
tree = BytesIO(rawcat.content)
for _, element in etree.iterparse(tree):
if element.tag == 'deltaRA':
delta_ra = float(element.text)
elif element.tag == 'deltaDEC':
delta_dec = float(element.text)
return delta_ra, delta_dec
|
Find the GSC to GAIA offset based on guide star coordinates
Parameters
----------
image : str
Filename of image to be processed.
Returns
-------
delta_ra, delta_dec : tuple of floats
Offset in decimal degrees of image based on correction to guide star
coordinates relative to GAIA.
|
entailment
|
def extract_sources(img, **pars):
"""Use photutils to find sources in image based on segmentation.
Parameters
----------
dqmask : ndarray
Bitmask which identifies whether a pixel should be used (1) in source
identification or not(0). If provided, this mask will be applied to the
input array prior to source identification.
fwhm : float
Full-width half-maximum (fwhm) of the PSF in pixels.
threshold : float or None
Value from the image which serves as the limit for determining sources.
If None, compute a default value of (background+5*rms(background)).
If threshold < 0.0, use absolute value as scaling factor for default value.
source_box : int
Size of box (in pixels) which defines the minimum size of a valid source.
classify : bool
Specify whether or not to apply classification based on invarient moments
of each source to determine whether or not a source is likely to be a
cosmic-ray, and not include those sources in the final catalog.
centering_mode : str
"segmentaton" or "starfind"
Algorithm to use when computing the positions of the detected sources.
Centering will only take place after `threshold` has been determined, and
sources are identified using segmentation. Centering using `segmentation`
will rely on `photutils.segmentation.source_properties` to generate the
properties for the source catalog. Centering using `starfind` will use
`photutils.IRAFStarFinder` to characterize each source in the catalog.
nlargest : int, None
Number of largest (brightest) sources in each chip/array to measure
when using 'starfind' mode.
output : str
If specified, write out the catalog of sources to the file with this name.
plot : bool
Specify whether or not to create a plot of the sources on a view of the image.
vmax : float
If plotting the sources, scale the image to this maximum value.
"""
fwhm = pars.get('fwhm', 3.0)
threshold = pars.get('threshold', None)
source_box = pars.get('source_box', 7)
classify = pars.get('classify', True)
output = pars.get('output', None)
plot = pars.get('plot', False)
vmax = pars.get('vmax', None)
centering_mode = pars.get('centering_mode', 'starfind')
deblend = pars.get('deblend', False)
dqmask = pars.get('dqmask', None)
nlargest = pars.get('nlargest', None)
# apply any provided dqmask for segmentation only
if dqmask is not None:
imgarr = img.copy()
imgarr[dqmask] = 0
else:
imgarr = img
bkg_estimator = MedianBackground()
bkg = None
exclude_percentiles = [10, 25, 50, 75]
for percentile in exclude_percentiles:
try:
bkg = Background2D(imgarr, (50, 50), filter_size=(3, 3),
bkg_estimator=bkg_estimator,
exclude_percentile=percentile)
# If it succeeds, stop and use that value
bkg_rms = (5. * bkg.background_rms)
bkg_rms_mean = bkg.background.mean() + 5. * bkg_rms.std()
default_threshold = bkg.background + bkg_rms
if threshold is None or threshold < 0.0:
if threshold is not None and threshold < 0.0:
threshold = -1 * threshold * default_threshold
log.info("{} based on {}".format(threshold.max(), default_threshold.max()))
bkg_rms_mean = threshold.max()
else:
threshold = default_threshold
else:
bkg_rms_mean = 3. * threshold
if bkg_rms_mean < 0:
bkg_rms_mean = 0.
break
except Exception:
bkg = None
# If Background2D does not work at all, define default scalar values for
# the background to be used in source identification
if bkg is None:
bkg_rms_mean = max(0.01, imgarr.min())
bkg_rms = bkg_rms_mean * 5
sigma = fwhm * gaussian_fwhm_to_sigma
kernel = Gaussian2DKernel(sigma, x_size=source_box, y_size=source_box)
kernel.normalize()
segm = detect_sources(imgarr, threshold, npixels=source_box,
filter_kernel=kernel)
if deblend:
segm = deblend_sources(imgarr, segm, npixels=5,
filter_kernel=kernel, nlevels=16,
contrast=0.01)
# If classify is turned on, it should modify the segmentation map
if classify:
cat = source_properties(imgarr, segm)
if len(cat) > 0:
# Remove likely cosmic-rays based on central_moments classification
bad_srcs = np.where(classify_sources(cat) == 0)[0] + 1
segm.remove_labels(bad_srcs) # CAUTION: May be time-consuming!!!
# convert segm to mask for daofind
if centering_mode == 'starfind':
src_table = None
# daofind = IRAFStarFinder(fwhm=fwhm, threshold=5.*bkg.background_rms_median)
log.info("Setting up DAOStarFinder with: \n fwhm={} threshold={}".format(fwhm, bkg_rms_mean))
daofind = DAOStarFinder(fwhm=fwhm, threshold=bkg_rms_mean)
# Identify nbrightest/largest sources
if nlargest is not None:
if nlargest > len(segm.labels):
nlargest = len(segm.labels)
large_labels = np.flip(np.argsort(segm.areas) + 1)[: nlargest]
log.info("Looking for sources in {} segments".format(len(segm.labels)))
for label in segm.labels:
if nlargest is not None and label not in large_labels:
continue # Move on to the next segment
# Get slice definition for the segment with this label
seg_slice = segm.segments[label - 1].slices
seg_yoffset = seg_slice[0].start
seg_xoffset = seg_slice[1].start
# Define raw data from this slice
detection_img = img[seg_slice]
# zero out any pixels which do not have this segments label
detection_img[np.where(segm.data[seg_slice] == 0)] = 0
# Detect sources in this specific segment
seg_table = daofind(detection_img)
# Pick out brightest source only
if src_table is None and len(seg_table) > 0:
# Initialize final master source list catalog
src_table = Table(names=seg_table.colnames,
dtype=[dt[1] for dt in seg_table.dtype.descr])
if len(seg_table) > 0:
max_row = np.where(seg_table['peak'] == seg_table['peak'].max())[0][0]
# Add row for detected source to master catalog
# apply offset to slice to convert positions into full-frame coordinates
seg_table['xcentroid'] += seg_xoffset
seg_table['ycentroid'] += seg_yoffset
src_table.add_row(seg_table[max_row])
else:
cat = source_properties(img, segm)
src_table = cat.to_table()
# Make column names consistent with IRAFStarFinder column names
src_table.rename_column('source_sum', 'flux')
src_table.rename_column('source_sum_err', 'flux_err')
if src_table is not None:
log.info("Total Number of detected sources: {}".format(len(src_table)))
else:
log.info("No detected sources!")
return None, None
# Move 'id' column from first to last position
# Makes it consistent for remainder of code
cnames = src_table.colnames
cnames.append(cnames[0])
del cnames[0]
tbl = src_table[cnames]
if output:
tbl['xcentroid'].info.format = '.10f' # optional format
tbl['ycentroid'].info.format = '.10f'
tbl['flux'].info.format = '.10f'
if not output.endswith('.cat'):
output += '.cat'
tbl.write(output, format='ascii.commented_header')
log.info("Wrote source catalog: {}".format(output))
if plot and plt is not None:
norm = None
if vmax is None:
norm = ImageNormalize(stretch=SqrtStretch())
fig, ax = plt.subplots(2, 2, figsize=(8, 8))
ax[0][0].imshow(imgarr, origin='lower', cmap='Greys_r', norm=norm, vmax=vmax)
ax[0][1].imshow(segm, origin='lower', cmap=segm.cmap(random_state=12345))
ax[0][1].set_title('Segmentation Map')
ax[1][0].imshow(bkg.background, origin='lower')
if not isinstance(threshold, float):
ax[1][1].imshow(threshold, origin='lower')
return tbl, segm
|
Use photutils to find sources in image based on segmentation.
Parameters
----------
dqmask : ndarray
Bitmask which identifies whether a pixel should be used (1) in source
identification or not(0). If provided, this mask will be applied to the
input array prior to source identification.
fwhm : float
Full-width half-maximum (fwhm) of the PSF in pixels.
threshold : float or None
Value from the image which serves as the limit for determining sources.
If None, compute a default value of (background+5*rms(background)).
If threshold < 0.0, use absolute value as scaling factor for default value.
source_box : int
Size of box (in pixels) which defines the minimum size of a valid source.
classify : bool
Specify whether or not to apply classification based on invarient moments
of each source to determine whether or not a source is likely to be a
cosmic-ray, and not include those sources in the final catalog.
centering_mode : str
"segmentaton" or "starfind"
Algorithm to use when computing the positions of the detected sources.
Centering will only take place after `threshold` has been determined, and
sources are identified using segmentation. Centering using `segmentation`
will rely on `photutils.segmentation.source_properties` to generate the
properties for the source catalog. Centering using `starfind` will use
`photutils.IRAFStarFinder` to characterize each source in the catalog.
nlargest : int, None
Number of largest (brightest) sources in each chip/array to measure
when using 'starfind' mode.
output : str
If specified, write out the catalog of sources to the file with this name.
plot : bool
Specify whether or not to create a plot of the sources on a view of the image.
vmax : float
If plotting the sources, scale the image to this maximum value.
|
entailment
|
def classify_sources(catalog, sources=None):
""" Convert moments_central attribute for source catalog into star/cr flag.
This algorithm interprets the central_moments from the source_properties
generated for the sources as more-likely a star or a cosmic-ray. It is not
intended or expected to be precise, merely a means of making a first cut at
removing likely cosmic-rays or other artifacts.
Parameters
----------
catalog : `~photutils.SourceCatalog`
The photutils catalog for the image/chip.
sources : tuple
Range of objects from catalog to process as a tuple of (min, max).
If None (default) all sources are processed.
Returns
-------
srctype : ndarray
An ndarray where a value of 1 indicates a likely valid, non-cosmic-ray
source, and a value of 0 indicates a likely cosmic-ray.
"""
moments = catalog.moments_central
if sources is None:
sources = (0, len(moments))
num_sources = sources[1] - sources[0]
srctype = np.zeros((num_sources,), np.int32)
for src in range(sources[0], sources[1]):
# Protect against spurious detections
src_x = catalog[src].xcentroid
src_y = catalog[src].ycentroid
if np.isnan(src_x) or np.isnan(src_y):
continue
x, y = np.where(moments[src] == moments[src].max())
if (x[0] > 1) and (y[0] > 1):
srctype[src] = 1
return srctype
|
Convert moments_central attribute for source catalog into star/cr flag.
This algorithm interprets the central_moments from the source_properties
generated for the sources as more-likely a star or a cosmic-ray. It is not
intended or expected to be precise, merely a means of making a first cut at
removing likely cosmic-rays or other artifacts.
Parameters
----------
catalog : `~photutils.SourceCatalog`
The photutils catalog for the image/chip.
sources : tuple
Range of objects from catalog to process as a tuple of (min, max).
If None (default) all sources are processed.
Returns
-------
srctype : ndarray
An ndarray where a value of 1 indicates a likely valid, non-cosmic-ray
source, and a value of 0 indicates a likely cosmic-ray.
|
entailment
|
def generate_source_catalog(image, **kwargs):
""" Build source catalogs for each chip using photutils.
The catalog returned by this function includes sources found in all chips
of the input image with the positions translated to the coordinate frame
defined by the reference WCS `refwcs`. The sources will be
- identified using photutils segmentation-based source finding code
- ignore any input pixel which has been flagged as 'bad' in the DQ
array, should a DQ array be found in the input HDUList.
- classified as probable cosmic-rays (if enabled) using central_moments
properties of each source, with these sources being removed from the
catalog.
Parameters
----------
image : `~astropy.io.fits.HDUList`
Input image as an astropy.io.fits HDUList.
dqname : str
EXTNAME for the DQ array, if present, in the input image HDUList.
output : bool
Specify whether or not to write out a separate catalog file for all the
sources found in each chip. Default: None (False)
threshold : float, optional
This parameter controls the threshold used for identifying sources in
the image relative to the background RMS.
If None, compute a default value of (background+3*rms(background)).
If threshold < 0.0, use absolute value as scaling factor for default value.
fwhm : float, optional
FWHM (in pixels) of the expected sources from the image, comparable to the
'conv_width' parameter from 'tweakreg'. Objects with FWHM closest to
this value will be identified as sources in the catalog.
Returns
-------
source_cats : dict
Dict of astropy Tables identified by chip number with
each table containing sources from image extension ``('sci', chip)``.
"""
if not isinstance(image, pf.HDUList):
raise ValueError("Input {} not fits.HDUList object".format(image))
dqname = kwargs.get('dqname', 'DQ')
output = kwargs.get('output', None)
# Build source catalog for entire image
source_cats = {}
numSci = countExtn(image, extname='SCI')
for chip in range(numSci):
chip += 1
# find sources in image
if output:
rootname = image[0].header['rootname']
outroot = '{}_sci{}_src'.format(rootname, chip)
kwargs['output'] = outroot
imgarr = image['sci', chip].data
# apply any DQ array, if available
dqmask = None
if image.index_of(dqname):
dqarr = image[dqname, chip].data
# "grow out" regions in DQ mask flagged as saturated by several
# pixels in every direction to prevent the
# source match algorithm from trying to match multiple sources
# from one image to a single source in the
# other or vice-versa.
# Create temp DQ mask containing all pixels flagged with any value EXCEPT 256
non_sat_mask = bitfield_to_boolean_mask(dqarr, ignore_flags=256)
# Create temp DQ mask containing saturated pixels ONLY
sat_mask = bitfield_to_boolean_mask(dqarr, ignore_flags=~256)
# Grow out saturated pixels by a few pixels in every direction
grown_sat_mask = ndimage.binary_dilation(sat_mask, iterations=5)
# combine the two temporary DQ masks into a single composite DQ mask.
dqmask = np.bitwise_or(non_sat_mask, grown_sat_mask)
# dqmask = bitfield_to_boolean_mask(dqarr, good_mask_value=False)
# TODO: <---Remove this old no-sat bit grow line once this
# thing works
seg_tab, segmap = extract_sources(imgarr, dqmask=dqmask, **kwargs)
seg_tab_phot = seg_tab
source_cats[chip] = seg_tab_phot
return source_cats
|
Build source catalogs for each chip using photutils.
The catalog returned by this function includes sources found in all chips
of the input image with the positions translated to the coordinate frame
defined by the reference WCS `refwcs`. The sources will be
- identified using photutils segmentation-based source finding code
- ignore any input pixel which has been flagged as 'bad' in the DQ
array, should a DQ array be found in the input HDUList.
- classified as probable cosmic-rays (if enabled) using central_moments
properties of each source, with these sources being removed from the
catalog.
Parameters
----------
image : `~astropy.io.fits.HDUList`
Input image as an astropy.io.fits HDUList.
dqname : str
EXTNAME for the DQ array, if present, in the input image HDUList.
output : bool
Specify whether or not to write out a separate catalog file for all the
sources found in each chip. Default: None (False)
threshold : float, optional
This parameter controls the threshold used for identifying sources in
the image relative to the background RMS.
If None, compute a default value of (background+3*rms(background)).
If threshold < 0.0, use absolute value as scaling factor for default value.
fwhm : float, optional
FWHM (in pixels) of the expected sources from the image, comparable to the
'conv_width' parameter from 'tweakreg'. Objects with FWHM closest to
this value will be identified as sources in the catalog.
Returns
-------
source_cats : dict
Dict of astropy Tables identified by chip number with
each table containing sources from image extension ``('sci', chip)``.
|
entailment
|
def generate_sky_catalog(image, refwcs, **kwargs):
"""Build source catalog from input image using photutils.
This script borrows heavily from build_source_catalog.
The catalog returned by this function includes sources found in all chips
of the input image with the positions translated to the coordinate frame
defined by the reference WCS `refwcs`. The sources will be
- identified using photutils segmentation-based source finding code
- ignore any input pixel which has been flagged as 'bad' in the DQ
array, should a DQ array be found in the input HDUList.
- classified as probable cosmic-rays (if enabled) using central_moments
properties of each source, with these sources being removed from the
catalog.
Parameters
----------
image : ~astropy.io.fits.HDUList`
Input image.
refwcs : `~stwcs.wcsutils.HSTWCS`
Definition of the reference frame WCS.
dqname : str
EXTNAME for the DQ array, if present, in the input image.
output : bool
Specify whether or not to write out a separate catalog file for all the
sources found in each chip. Default: None (False)
threshold : float, optional
This parameter controls the S/N threshold used for identifying sources in
the image relative to the background RMS in much the same way that
the 'threshold' parameter in 'tweakreg' works.
fwhm : float, optional
FWHM (in pixels) of the expected sources from the image, comparable to the
'conv_width' parameter from 'tweakreg'. Objects with FWHM closest to
this value will be identified as sources in the catalog.
Returns
--------
master_cat : `~astropy.table.Table`
Source catalog for all 'valid' sources identified from all chips of the
input image with positions translated to the reference WCS coordinate
frame.
"""
# Extract source catalogs for each chip
source_cats = generate_source_catalog(image, **kwargs)
# Build source catalog for entire image
master_cat = None
numSci = countExtn(image, extname='SCI')
# if no refwcs specified, build one now...
if refwcs is None:
refwcs = build_reference_wcs([image])
for chip in range(numSci):
chip += 1
# work with sources identified from this specific chip
seg_tab_phot = source_cats[chip]
if seg_tab_phot is None:
continue
# Convert pixel coordinates from this chip to sky coordinates
chip_wcs = wcsutil.HSTWCS(image, ext=('sci', chip))
seg_ra, seg_dec = chip_wcs.all_pix2world(seg_tab_phot['xcentroid'], seg_tab_phot['ycentroid'], 1)
# Convert sky positions to pixel positions in the reference WCS frame
seg_xy_out = refwcs.all_world2pix(seg_ra, seg_dec, 1)
seg_tab_phot['xcentroid'] = seg_xy_out[0]
seg_tab_phot['ycentroid'] = seg_xy_out[1]
if master_cat is None:
master_cat = seg_tab_phot
else:
master_cat = vstack([master_cat, seg_tab_phot])
return master_cat
|
Build source catalog from input image using photutils.
This script borrows heavily from build_source_catalog.
The catalog returned by this function includes sources found in all chips
of the input image with the positions translated to the coordinate frame
defined by the reference WCS `refwcs`. The sources will be
- identified using photutils segmentation-based source finding code
- ignore any input pixel which has been flagged as 'bad' in the DQ
array, should a DQ array be found in the input HDUList.
- classified as probable cosmic-rays (if enabled) using central_moments
properties of each source, with these sources being removed from the
catalog.
Parameters
----------
image : ~astropy.io.fits.HDUList`
Input image.
refwcs : `~stwcs.wcsutils.HSTWCS`
Definition of the reference frame WCS.
dqname : str
EXTNAME for the DQ array, if present, in the input image.
output : bool
Specify whether or not to write out a separate catalog file for all the
sources found in each chip. Default: None (False)
threshold : float, optional
This parameter controls the S/N threshold used for identifying sources in
the image relative to the background RMS in much the same way that
the 'threshold' parameter in 'tweakreg' works.
fwhm : float, optional
FWHM (in pixels) of the expected sources from the image, comparable to the
'conv_width' parameter from 'tweakreg'. Objects with FWHM closest to
this value will be identified as sources in the catalog.
Returns
--------
master_cat : `~astropy.table.Table`
Source catalog for all 'valid' sources identified from all chips of the
input image with positions translated to the reference WCS coordinate
frame.
|
entailment
|
def compute_photometry(catalog, photmode):
""" Compute magnitudes for sources from catalog based on observations photmode.
Parameters
----------
catalog : `~astropy.table.Table`
Astropy Table with 'source_sum' column for the measured flux for each source.
photmode : str
Specification of the observation filter configuration used for the exposure
as reported by the 'PHOTMODE' keyword from the PRIMARY header.
Returns
-------
phot_cat : `~astropy.table.Table`
Astropy Table object of input source catalog with added column for
VEGAMAG photometry (in magnitudes).
"""
# Determine VEGAMAG zero-point using pysynphot for this photmode
photmode = photmode.replace(' ', ', ')
vega = S.FileSpectrum(VEGASPEC)
bp = S.ObsBandpass(photmode)
vegauvis = S.Observation(vega, bp)
vegazpt = 2.5 * np.log10(vegauvis.countrate())
# Use zero-point to convert flux values from catalog into magnitudes
# source_phot = vegazpt - 2.5*np.log10(catalog['source_sum'])
source_phot = vegazpt - 2.5 * np.log10(catalog['flux'])
source_phot.name = 'vegamag'
# Now add this new column to the catalog table
catalog.add_column(source_phot)
return catalog
|
Compute magnitudes for sources from catalog based on observations photmode.
Parameters
----------
catalog : `~astropy.table.Table`
Astropy Table with 'source_sum' column for the measured flux for each source.
photmode : str
Specification of the observation filter configuration used for the exposure
as reported by the 'PHOTMODE' keyword from the PRIMARY header.
Returns
-------
phot_cat : `~astropy.table.Table`
Astropy Table object of input source catalog with added column for
VEGAMAG photometry (in magnitudes).
|
entailment
|
def filter_catalog(catalog, **kwargs):
""" Create a new catalog selected from input based on photometry.
Parameters
----------
bright_limit : float
Fraction of catalog based on brightness that should be retained.
Value of 1.00 means full catalog.
max_bright : int
Maximum number of sources to keep regardless of `bright_limit`.
min_bright : int
Minimum number of sources to keep regardless of `bright_limit`.
colname : str
Name of column to use for selection/sorting.
Returns
-------
new_catalog : `~astropy.table.Table`
New table which only has the sources that meet the selection criteria.
"""
# interpret input pars
bright_limit = kwargs.get('bright_limit', 1.00)
max_bright = kwargs.get('max_bright', None)
min_bright = kwargs.get('min_bright', 20)
colname = kwargs.get('colname', 'vegamag')
# sort by magnitude
phot_column = catalog[colname]
num_sources = len(phot_column)
sort_indx = np.argsort(phot_column)
if max_bright is None:
max_bright = num_sources
# apply limits, insuring no more than full catalog gets selected
limit_num = max(int(num_sources * bright_limit), min_bright)
limit_num = min(max_bright, limit_num, num_sources)
# Extract sources identified by selection
new_catalog = catalog[sort_indx[:limit_num]]
return new_catalog
|
Create a new catalog selected from input based on photometry.
Parameters
----------
bright_limit : float
Fraction of catalog based on brightness that should be retained.
Value of 1.00 means full catalog.
max_bright : int
Maximum number of sources to keep regardless of `bright_limit`.
min_bright : int
Minimum number of sources to keep regardless of `bright_limit`.
colname : str
Name of column to use for selection/sorting.
Returns
-------
new_catalog : `~astropy.table.Table`
New table which only has the sources that meet the selection criteria.
|
entailment
|
def build_self_reference(filename, clean_wcs=False):
""" This function creates a reference, undistorted WCS that can be used to
apply a correction to the WCS of the input file.
Parameters
----------
filename : str
Filename of image which will be corrected, and which will form the basis
of the undistorted WCS.
clean_wcs : bool
Specify whether or not to return the WCS object without any distortion
information, or any history of the original input image. This converts
the output from `utils.output_wcs()` into a pristine `~stwcs.wcsutils.HSTWCS` object.
Returns
-------
customwcs : `stwcs.wcsutils.HSTWCS`
HSTWCS object which contains the undistorted WCS representing the entire
field-of-view for the input image.
Examples
--------
This function can be used with the following syntax to apply a shift/rot/scale
change to the same image:
>>> import buildref
>>> from drizzlepac import updatehdr
>>> filename = "jce501erq_flc.fits"
>>> wcslin = buildref.build_self_reference(filename)
>>> updatehdr.updatewcs_with_shift(filename, wcslin, xsh=49.5694,
... ysh=19.2203, rot = 359.998, scale = 0.9999964)
"""
if 'sipwcs' in filename:
sciname = 'sipwcs'
else:
sciname = 'sci'
wcslin = build_reference_wcs([filename], sciname=sciname)
if clean_wcs:
wcsbase = wcslin.wcs
customwcs = build_hstwcs(wcsbase.crval[0], wcsbase.crval[1], wcsbase.crpix[0],
wcsbase.crpix[1], wcslin._naxis1, wcslin._naxis2,
wcslin.pscale, wcslin.orientat)
else:
customwcs = wcslin
return customwcs
|
This function creates a reference, undistorted WCS that can be used to
apply a correction to the WCS of the input file.
Parameters
----------
filename : str
Filename of image which will be corrected, and which will form the basis
of the undistorted WCS.
clean_wcs : bool
Specify whether or not to return the WCS object without any distortion
information, or any history of the original input image. This converts
the output from `utils.output_wcs()` into a pristine `~stwcs.wcsutils.HSTWCS` object.
Returns
-------
customwcs : `stwcs.wcsutils.HSTWCS`
HSTWCS object which contains the undistorted WCS representing the entire
field-of-view for the input image.
Examples
--------
This function can be used with the following syntax to apply a shift/rot/scale
change to the same image:
>>> import buildref
>>> from drizzlepac import updatehdr
>>> filename = "jce501erq_flc.fits"
>>> wcslin = buildref.build_self_reference(filename)
>>> updatehdr.updatewcs_with_shift(filename, wcslin, xsh=49.5694,
... ysh=19.2203, rot = 359.998, scale = 0.9999964)
|
entailment
|
def read_hlet_wcs(filename, ext):
"""Insure `stwcs.wcsutil.HSTWCS` includes all attributes of a full image WCS.
For headerlets, the WCS does not contain information about the size of the
image, as the image array is not present in the headerlet.
"""
hstwcs = wcsutil.HSTWCS(filename, ext=ext)
if hstwcs.naxis1 is None:
hstwcs.naxis1 = int(hstwcs.wcs.crpix[0] * 2.) # Assume crpix is center of chip
hstwcs.naxis2 = int(hstwcs.wcs.crpix[1] * 2.)
return hstwcs
|
Insure `stwcs.wcsutil.HSTWCS` includes all attributes of a full image WCS.
For headerlets, the WCS does not contain information about the size of the
image, as the image array is not present in the headerlet.
|
entailment
|
def build_hstwcs(crval1, crval2, crpix1, crpix2, naxis1, naxis2, pscale, orientat):
""" Create an `stwcs.wcsutil.HSTWCS` object for a default instrument without
distortion based on user provided parameter values.
"""
wcsout = wcsutil.HSTWCS()
wcsout.wcs.crval = np.array([crval1, crval2])
wcsout.wcs.crpix = np.array([crpix1, crpix2])
wcsout.naxis1 = naxis1
wcsout.naxis2 = naxis2
wcsout.wcs.cd = buildRotMatrix(orientat) * [-1, 1] * pscale / 3600.0
# Synchronize updates with astropy.wcs objects
wcsout.wcs.set()
wcsout.setPscale()
wcsout.setOrient()
wcsout.wcs.ctype = ['RA---TAN', 'DEC--TAN']
return wcsout
|
Create an `stwcs.wcsutil.HSTWCS` object for a default instrument without
distortion based on user provided parameter values.
|
entailment
|
def within_footprint(img, wcs, x, y):
"""Determine whether input x, y fall in the science area of the image.
Parameters
----------
img : ndarray
ndarray of image where non-science areas are marked with value of NaN.
wcs : `stwcs.wcsutil.HSTWCS`
HSTWCS or WCS object with naxis terms defined.
x, y : ndarray
arrays of x, y positions for sources to be checked.
Returns
-------
x, y : ndarray
New arrays which have been trimmed of all sources that fall outside
the science areas of the image
"""
# start with limits of WCS shape
if hasattr(wcs, 'naxis1'):
naxis1 = wcs.naxis1
naxis2 = wcs.naxis2
elif hasattr(wcs, 'pixel_shape'):
naxis1, naxis2 = wcs.pixel_shape
else:
naxis1 = wcs._naxis1
naxis2 = wcs._naxis2
maskx = np.bitwise_or(x < 0, x > naxis1)
masky = np.bitwise_or(y < 0, y > naxis2)
mask = ~np.bitwise_or(maskx, masky)
x = x[mask]
y = y[mask]
# Now, confirm that these points fall within actual science area of WCS
img_mask = create_image_footprint(img, wcs, border=1.0)
inmask = np.where(img_mask[y.astype(np.int32), x.astype(np.int32)])[0]
x = x[inmask]
y = y[inmask]
return x, y
|
Determine whether input x, y fall in the science area of the image.
Parameters
----------
img : ndarray
ndarray of image where non-science areas are marked with value of NaN.
wcs : `stwcs.wcsutil.HSTWCS`
HSTWCS or WCS object with naxis terms defined.
x, y : ndarray
arrays of x, y positions for sources to be checked.
Returns
-------
x, y : ndarray
New arrays which have been trimmed of all sources that fall outside
the science areas of the image
|
entailment
|
def create_image_footprint(image, refwcs, border=0.):
""" Create the footprint of the image in the reference WCS frame.
Parameters
----------
image : `astropy.io.fits.HDUList` or str
Image to extract sources for matching to
the external astrometric catalog.
refwcs : `stwcs.wcsutil.HSTWCS`
Reference WCS for coordinate frame of image.
border : float
Buffer (in arcseconds) around edge of image to exclude astrometric
sources.
"""
# Interpret input image to generate initial source catalog and WCS
if isinstance(image, str):
image = pf.open(image)
numSci = countExtn(image, extname='SCI')
ref_x = refwcs._naxis1
ref_y = refwcs._naxis2
# convert border value into pixels
border_pixels = int(border / refwcs.pscale)
mask_arr = np.zeros((ref_y, ref_x), dtype=int)
for chip in range(numSci):
chip += 1
# Build arrays of pixel positions for all edges of chip
chip_y, chip_x = image['sci', chip].data.shape
chipwcs = wcsutil.HSTWCS(image, ext=('sci', chip))
xpix = np.arange(chip_x) + 1
ypix = np.arange(chip_y) + 1
edge_x = np.hstack([[1] * chip_y, xpix, [chip_x] * chip_y, xpix])
edge_y = np.hstack([ypix, [1] * chip_x, ypix, [chip_y] * chip_x])
edge_ra, edge_dec = chipwcs.all_pix2world(edge_x, edge_y, 1)
edge_x_out, edge_y_out = refwcs.all_world2pix(edge_ra, edge_dec, 0)
edge_x_out = np.clip(edge_x_out.astype(np.int32), 0, ref_x - 1)
edge_y_out = np.clip(edge_y_out.astype(np.int32), 0, ref_y - 1)
mask_arr[edge_y_out, edge_x_out] = 1
# Fill in outline of each chip
mask_arr = ndimage.binary_fill_holes(ndimage.binary_dilation(mask_arr, iterations=2))
if border > 0.:
mask_arr = ndimage.binary_erosion(mask_arr, iterations=border_pixels)
return mask_arr
|
Create the footprint of the image in the reference WCS frame.
Parameters
----------
image : `astropy.io.fits.HDUList` or str
Image to extract sources for matching to
the external astrometric catalog.
refwcs : `stwcs.wcsutil.HSTWCS`
Reference WCS for coordinate frame of image.
border : float
Buffer (in arcseconds) around edge of image to exclude astrometric
sources.
|
entailment
|
def find_hist2d_offset(filename, reference, refwcs=None, refnames=['ra', 'dec'],
match_tolerance=5., chip_catalog=True, search_radius=15.0,
min_match=10, classify=True):
"""Iteratively look for the best cross-match between the catalog and ref.
Parameters
----------
filename : `~astropy.io.fits.HDUList` or str
Single image to extract sources for matching to
the external astrometric catalog.
reference : str or `~astropy.table.Table`
Reference catalog, either as a filename or ``astropy.Table``
containing astrometrically accurate sky coordinates for astrometric
standard sources.
refwcs : `~stwcs.wcsutil.HSTWCS`
This WCS will define the coordinate frame which will
be used to determine the offset. If None is specified, use the
WCS from the input image `filename` to build this WCS using
`build_self_reference()`.
refnames : list
List of table column names for sky coordinates of astrometric
standard sources from reference catalog.
match_tolerance : float
Tolerance (in pixels) for recognizing that a source position matches
an astrometric catalog position. Larger values allow for lower
accuracy source positions to be compared to astrometric catalog
chip_catalog : bool
Specify whether or not to write out individual source catalog for
each chip in the image.
search_radius : float
Maximum separation (in arcseconds) from source positions to look
for valid cross-matches with reference source positions.
min_match : int
Minimum number of cross-matches for an acceptable determination of
the offset.
classify : bool
Specify whether or not to use central_moments classification to
ignore likely cosmic-rays/bad-pixels when generating the source
catalog.
Returns
-------
best_offset : tuple
Offset in input image pixels between image source positions and
astrometric catalog positions that results in largest number of
matches of astrometric sources with image sources
seg_xy, ref_xy : astropy.Table
Source catalog and reference catalog, respectively, used for
determining the offset. Each catalog includes sources for the entire
field-of-view, not just a single chip.
"""
# Interpret input image to generate initial source catalog and WCS
if isinstance(filename, str):
image = pf.open(filename)
rootname = filename.split("_")[0]
else:
image = filename
rootname = image[0].header['rootname']
# check to see whether reference catalog can be found
if not os.path.exists(reference):
log.info("Could not find input reference catalog: {}".format(reference))
raise FileNotFoundError
# Extract reference WCS from image
if refwcs is None:
refwcs = build_self_reference(image, clean_wcs=True)
log.info("Computing offset for field-of-view defined by:")
log.info(refwcs)
# read in reference catalog
if isinstance(reference, str):
refcat = ascii.read(reference)
else:
refcat = reference
log.info("\nRead in reference catalog with {} sources.".format(len(refcat)))
ref_ra = refcat[refnames[0]]
ref_dec = refcat[refnames[1]]
# Build source catalog for entire image
img_cat = generate_source_catalog(image, refwcs, output=chip_catalog, classify=classify)
img_cat.write(filename.replace(".fits", "_xy.cat"), format='ascii.no_header',
overwrite=True)
# Retrieve source XY positions in reference frame
seg_xy = np.column_stack((img_cat['xcentroid'], img_cat['ycentroid']))
seg_xy = seg_xy[~np.isnan(seg_xy[:, 0])]
# Translate reference catalog positions into input image coordinate frame
xref, yref = refwcs.all_world2pix(ref_ra, ref_dec, 1)
# look for only sources within the viewable area of the exposure to
# determine the offset
xref, yref = within_footprint(image, refwcs, xref, yref)
ref_xy = np.column_stack((xref, yref))
log.info("\nWorking with {} astrometric sources for this field".format(len(ref_xy)))
# write out astrometric reference catalog that was actually used
ref_ra_img, ref_dec_img = refwcs.all_pix2world(xref, yref, 1)
ref_tab = Table([ref_ra_img, ref_dec_img, xref, yref], names=['ra', 'dec', 'x', 'y'])
ref_tab.write(reference.replace('.cat', '_{}.cat'.format(rootname)),
format='ascii.fast_commented_header', overwrite=True)
searchrad = search_radius / refwcs.pscale
# Use 2d-Histogram builder from drizzlepac.tweakreg -- for demo only...
xp, yp, nmatches, zpqual = build_xy_zeropoint(seg_xy, ref_xy,
searchrad=searchrad,
histplot=False, figure_id=1,
plotname=None, interactive=False)
hist2d_offset = (xp, yp)
log.info('best offset {} based on {} cross-matches'.format(hist2d_offset, nmatches))
return hist2d_offset, seg_xy, ref_xy
|
Iteratively look for the best cross-match between the catalog and ref.
Parameters
----------
filename : `~astropy.io.fits.HDUList` or str
Single image to extract sources for matching to
the external astrometric catalog.
reference : str or `~astropy.table.Table`
Reference catalog, either as a filename or ``astropy.Table``
containing astrometrically accurate sky coordinates for astrometric
standard sources.
refwcs : `~stwcs.wcsutil.HSTWCS`
This WCS will define the coordinate frame which will
be used to determine the offset. If None is specified, use the
WCS from the input image `filename` to build this WCS using
`build_self_reference()`.
refnames : list
List of table column names for sky coordinates of astrometric
standard sources from reference catalog.
match_tolerance : float
Tolerance (in pixels) for recognizing that a source position matches
an astrometric catalog position. Larger values allow for lower
accuracy source positions to be compared to astrometric catalog
chip_catalog : bool
Specify whether or not to write out individual source catalog for
each chip in the image.
search_radius : float
Maximum separation (in arcseconds) from source positions to look
for valid cross-matches with reference source positions.
min_match : int
Minimum number of cross-matches for an acceptable determination of
the offset.
classify : bool
Specify whether or not to use central_moments classification to
ignore likely cosmic-rays/bad-pixels when generating the source
catalog.
Returns
-------
best_offset : tuple
Offset in input image pixels between image source positions and
astrometric catalog positions that results in largest number of
matches of astrometric sources with image sources
seg_xy, ref_xy : astropy.Table
Source catalog and reference catalog, respectively, used for
determining the offset. Each catalog includes sources for the entire
field-of-view, not just a single chip.
|
entailment
|
def build_wcscat(image, group_id, source_catalog):
""" Return a list of `~tweakwcs.tpwcs.FITSWCS` objects for all chips in an image.
Parameters
----------
image : str, ~astropy.io.fits.HDUList`
Either filename or HDUList of a single HST observation.
group_id : int
Integer ID for group this image should be associated with; primarily
used when separate chips are in separate files to treat them all as one
exposure.
source_catalog : dict
If provided, these catalogs will be attached as `catalog`
entries in each chip's ``FITSWCS`` object. It should be provided as a
dict of astropy Tables identified by chip number with
each table containing sources from image extension ``('sci', chip)`` as
generated by `generate_source_catalog()`.
Returns
-------
wcs_catalogs : list of `~tweakwcs.tpwcs.FITSWCS`
List of `~tweakwcs.tpwcs.FITSWCS` objects defined for all chips in input image.
"""
open_file = False
if isinstance(image, str):
hdulist = pf.open(image)
open_file = True
elif isinstance(image, pf.HDUList):
hdulist = image
else:
log.info("Wrong type of input, {}, for build_wcscat...".format(type(image)))
raise ValueError
wcs_catalogs = []
numsci = countExtn(hdulist)
for chip in range(1, numsci + 1):
w = wcsutil.HSTWCS(hdulist, ('SCI', chip))
imcat = source_catalog[chip]
# rename xcentroid/ycentroid columns, if necessary, to be consistent with tweakwcs
if 'xcentroid' in imcat.colnames:
imcat.rename_column('xcentroid', 'x')
imcat.rename_column('ycentroid', 'y')
wcscat = FITSWCS(
w,
meta={
'chip': chip,
'group_id': group_id,
'filename': image,
'catalog': imcat,
'name': image
}
)
wcs_catalogs.append(wcscat)
if open_file:
hdulist.close()
return wcs_catalogs
|
Return a list of `~tweakwcs.tpwcs.FITSWCS` objects for all chips in an image.
Parameters
----------
image : str, ~astropy.io.fits.HDUList`
Either filename or HDUList of a single HST observation.
group_id : int
Integer ID for group this image should be associated with; primarily
used when separate chips are in separate files to treat them all as one
exposure.
source_catalog : dict
If provided, these catalogs will be attached as `catalog`
entries in each chip's ``FITSWCS`` object. It should be provided as a
dict of astropy Tables identified by chip number with
each table containing sources from image extension ``('sci', chip)`` as
generated by `generate_source_catalog()`.
Returns
-------
wcs_catalogs : list of `~tweakwcs.tpwcs.FITSWCS`
List of `~tweakwcs.tpwcs.FITSWCS` objects defined for all chips in input image.
|
entailment
|
def update_from_shiftfile(shiftfile,wcsname=None,force=False):
"""
Update headers of all images specified in shiftfile with shifts
from shiftfile.
Parameters
----------
shiftfile : str
Filename of shiftfile.
wcsname : str
Label to give to new WCS solution being created by this fit. If
a value of None is given, it will automatically use 'TWEAK' as the
label. [Default =None]
force : bool
Update header even though WCS already exists with this solution or
wcsname? [Default=False]
"""
f = open(fileutil.osfn(shiftfile))
shift_lines = [x.strip() for x in f.readlines()]
f.close()
# interpret header of shift file
for line in shift_lines:
if 'refimage' in line or 'reference' in line:
refimage = line.split(':')[-1]
refimage = refimage[:refimage.find('[wcs]')].lstrip()
break
# Determine the max length in the first column (filenames)
fnames = []
for row in shift_lines:
if row[0] == '#': continue
fnames.append(len(row.split(' ')[0]))
fname_fmt = 'S{0}'.format(max(fnames))
# Now read in numerical values from shiftfile
type_list = {'names':('fnames','xsh','ysh','rot','scale','xrms','yrms'),
'formats':(fname_fmt,'f4','f4','f4','f4','f4','f4')}
try:
sdict = np.loadtxt(shiftfile,dtype=type_list,unpack=False)
except IndexError:
tlist = {'names':('fnames','xsh','ysh','rot','scale'),
'formats':(fname_fmt,'f4','f4','f4','f4')}
s = np.loadtxt(shiftfile,dtype=tlist,unpack=False)
sdict = np.zeros([s['fnames'].shape[0],],dtype=type_list)
for sname in s.dtype.names:
sdict[sname] = s[sname]
for img in sdict:
updatewcs_with_shift(img['fnames'], refimage, wcsname=wcsname,
rot=img['rot'], scale=img['scale'],
xsh=img['xsh'], ysh=img['ysh'],
xrms=img['xrms'], yrms=img['yrms'],
force=force)
|
Update headers of all images specified in shiftfile with shifts
from shiftfile.
Parameters
----------
shiftfile : str
Filename of shiftfile.
wcsname : str
Label to give to new WCS solution being created by this fit. If
a value of None is given, it will automatically use 'TWEAK' as the
label. [Default =None]
force : bool
Update header even though WCS already exists with this solution or
wcsname? [Default=False]
|
entailment
|
def updatewcs_with_shift(image,reference,wcsname=None, reusename=False,
fitgeom='rscale',
rot=0.0,scale=1.0,xsh=0.0,ysh=0.0,fit=None,
xrms=None, yrms = None,
verbose=False,force=False,sciext='SCI'):
"""
Update the SCI headers in 'image' based on the fit provided as determined
in the WCS specified by 'reference'. The fit should be a 2-D matrix as
generated for use with 'make_vector_plot()'.
Notes
-----
The algorithm used to apply the provided fit solution to the image
involves applying the following steps to the WCS of each of the
input image's chips:
1. compute RA/Dec with full distortion correction for
reference point as (Rc_i,Dc_i)
2. find the Xc,Yc for each Rc_i,Dc_i and get the difference from the
CRPIX position for the reference WCS as (dXc_i,dYc_i)
3. apply fit (rot&scale) to (dXc_i,dYc_i) then apply shift, then add
CRPIX back to get new (Xcs_i,Ycs_i) position
4. compute (Rcs_i,Dcs_i) as the sky coordinates for (Xcs_i,Ycs_i)
5. compute delta of (Rcs_i-Rc_i, Dcs_i-Dcs_i) as (dRcs_i,dDcs_i)
6. apply the fit to the chip's undistorted CD matrix, the apply linear
distortion terms back in to create a new CD matrix
7. add (dRcs_i,dDcs_i) to CRVAL of the reference chip's WCS
8. update header with new WCS values
Parameters
----------
image : str or PyFITS.HDUList object
Filename, or PyFITS object, of image with WCS to be updated.
All extensions with EXTNAME matches the value of the 'sciext'
parameter value (by default, all 'SCI' extensions) will be updated.
reference : str
Filename of image/headerlet (FITS file) which contains the WCS
used to define the tangent plane in which all the fit parameters
(shift, rot, scale) were measured.
wcsname : str
Label to give to new WCS solution being created by this fit. If
a value of None is given, it will automatically use 'TWEAK' as the
label. If a WCS has a name with this specific value, the code will
automatically append a version ID using the format '_n', such as
'TWEAK_1', 'TWEAK_2',or 'TWEAK_update_1'.
[Default =None]
reusename : bool
User can specify whether or not to over-write WCS with same name.
[Default: False]
rot : float
Amount of rotation measured in fit to be applied.
[Default=0.0]
scale : float
Amount of scale change measured in fit to be applied.
[Default=1.0]
xsh : float
Offset in X pixels from defined tangent plane to be applied to image.
[Default=0.0]
ysh : float
Offset in Y pixels from defined tangent plane to be applied to image.
[Default=0.0]
fit : arr
Linear coefficients for fit
[Default = None]
xrms : float
RMS of fit in RA (in decimal degrees) that will be recorded as
CRDER1 in WCS and header
[Default = None]
yrms : float
RMS of fit in Dec (in decimal degrees) that will be recorded as
CRDER2 in WCS and header
[Default = None]
verbose : bool
Print extra messages during processing? [Default=False]
force : bool
Update header even though WCS already exists with this solution or
wcsname? [Default=False]
sciext : string
Value of FITS EXTNAME keyword for extensions with WCS headers to
be updated with the fit values. [Default='SCI']
"""
# if input reference is a ref_wcs file from tweakshifts, use it
if isinstance(reference, wcsutil.HSTWCS) or isinstance(reference, pywcs.WCS):
wref = reference
else:
refimg = fits.open(reference, memmap=False)
wref = None
for extn in refimg:
if 'extname' in extn.header and extn.header['extname'] == 'WCS':
wref = pywcs.WCS(refimg['wcs'].header)
break
refimg.close()
# else, we have presumably been provided a full undistorted image
# as a reference, so use it with HSTWCS instead
if wref is None:
wref = wcsutil.HSTWCS(reference)
if isinstance(image, fits.HDUList):
open_image = False
filename = image.filename()
if image.fileinfo(0)['filemode'] is 'update':
image_update = True
else:
image_update = False
else:
open_image = True
filename = image
image_update = None
# Now that we are sure we have a good reference WCS to use,
# continue with the update
logstr = "....Updating header for {:s}...".format(filename)
if verbose:
print("\n{:s}\n".format(logstr))
else:
log.info(logstr)
# reset header WCS keywords to original (OPUS generated) values
extlist = get_ext_list(image, extname='SCI')
if extlist:
if image_update:
# Create initial WCSCORR extension
wcscorr.init_wcscorr(image,force=force)
else:
extlist = [0]
# insure that input PRIMARY WCS has been archived before overwriting
# with new solution
if open_image:
fimg = fits.open(image, mode='update', memmap=False)
image_update = True
else:
fimg = image
if image_update:
wcsutil.altwcs.archiveWCS(fimg,extlist,reusekey=True)
# Process MEF images...
for ext in extlist:
logstr = "Processing {:s}[{:s}]".format(fimg.filename(),
ext2str(ext))
if verbose:
print("\n{:s}\n".format(logstr))
else:
log.info(logstr)
chip_wcs = wcsutil.HSTWCS(fimg,ext=ext)
update_refchip_with_shift(chip_wcs, wref, fitgeom=fitgeom,
rot=rot, scale=scale, xsh=xsh, ysh=ysh,
fit=fit, xrms=xrms, yrms=yrms)
#if util.is_blank(wcsname):
#wcsname = 'TWEAK'
# Update FITS file with newly updated WCS for this chip
extnum = fimg.index(fimg[ext])
update_wcs(fimg, extnum, chip_wcs, wcsname=wcsname,
reusename=reusename, verbose=verbose)
if open_image:
fimg.close()
|
Update the SCI headers in 'image' based on the fit provided as determined
in the WCS specified by 'reference'. The fit should be a 2-D matrix as
generated for use with 'make_vector_plot()'.
Notes
-----
The algorithm used to apply the provided fit solution to the image
involves applying the following steps to the WCS of each of the
input image's chips:
1. compute RA/Dec with full distortion correction for
reference point as (Rc_i,Dc_i)
2. find the Xc,Yc for each Rc_i,Dc_i and get the difference from the
CRPIX position for the reference WCS as (dXc_i,dYc_i)
3. apply fit (rot&scale) to (dXc_i,dYc_i) then apply shift, then add
CRPIX back to get new (Xcs_i,Ycs_i) position
4. compute (Rcs_i,Dcs_i) as the sky coordinates for (Xcs_i,Ycs_i)
5. compute delta of (Rcs_i-Rc_i, Dcs_i-Dcs_i) as (dRcs_i,dDcs_i)
6. apply the fit to the chip's undistorted CD matrix, the apply linear
distortion terms back in to create a new CD matrix
7. add (dRcs_i,dDcs_i) to CRVAL of the reference chip's WCS
8. update header with new WCS values
Parameters
----------
image : str or PyFITS.HDUList object
Filename, or PyFITS object, of image with WCS to be updated.
All extensions with EXTNAME matches the value of the 'sciext'
parameter value (by default, all 'SCI' extensions) will be updated.
reference : str
Filename of image/headerlet (FITS file) which contains the WCS
used to define the tangent plane in which all the fit parameters
(shift, rot, scale) were measured.
wcsname : str
Label to give to new WCS solution being created by this fit. If
a value of None is given, it will automatically use 'TWEAK' as the
label. If a WCS has a name with this specific value, the code will
automatically append a version ID using the format '_n', such as
'TWEAK_1', 'TWEAK_2',or 'TWEAK_update_1'.
[Default =None]
reusename : bool
User can specify whether or not to over-write WCS with same name.
[Default: False]
rot : float
Amount of rotation measured in fit to be applied.
[Default=0.0]
scale : float
Amount of scale change measured in fit to be applied.
[Default=1.0]
xsh : float
Offset in X pixels from defined tangent plane to be applied to image.
[Default=0.0]
ysh : float
Offset in Y pixels from defined tangent plane to be applied to image.
[Default=0.0]
fit : arr
Linear coefficients for fit
[Default = None]
xrms : float
RMS of fit in RA (in decimal degrees) that will be recorded as
CRDER1 in WCS and header
[Default = None]
yrms : float
RMS of fit in Dec (in decimal degrees) that will be recorded as
CRDER2 in WCS and header
[Default = None]
verbose : bool
Print extra messages during processing? [Default=False]
force : bool
Update header even though WCS already exists with this solution or
wcsname? [Default=False]
sciext : string
Value of FITS EXTNAME keyword for extensions with WCS headers to
be updated with the fit values. [Default='SCI']
|
entailment
|
def linearize(wcsim, wcsima, wcsref, imcrpix, f, shift, hx=1.0, hy=1.0):
""" linearization using 5-point formula for first order derivative
"""
x0 = imcrpix[0]
y0 = imcrpix[1]
p = np.asarray([[x0, y0],
[x0 - hx, y0],
[x0 - hx * 0.5, y0],
[x0 + hx * 0.5, y0],
[x0 + hx, y0],
[x0, y0 - hy],
[x0, y0 - hy * 0.5],
[x0, y0 + hy * 0.5],
[x0, y0 + hy]],
dtype=np.float64)
# convert image coordinates to reference image coordinates:
p = wcsref.wcs_world2pix(wcsim.wcs_pix2world(p, 1), 1).astype(ndfloat128)
# apply linear fit transformation:
p = np.dot(f, (p - shift).T).T
# convert back to image coordinate system:
p = wcsima.wcs_world2pix(
wcsref.wcs_pix2world(p.astype(np.float64), 1), 1).astype(ndfloat128)
# derivative with regard to x:
u1 = ((p[1] - p[4]) + 8 * (p[3] - p[2])) / (6*hx)
# derivative with regard to y:
u2 = ((p[5] - p[8]) + 8 * (p[7] - p[6])) / (6*hy)
return (np.asarray([u1, u2]).T, p[0])
|
linearization using 5-point formula for first order derivative
|
entailment
|
def update_refchip_with_shift(chip_wcs, wcslin, fitgeom='rscale',
rot=0.0, scale=1.0, xsh=0.0, ysh=0.0,
fit=None, xrms=None, yrms=None):
""" Compute the matrix for the scale and rotation correction
Parameters
----------
chip_wcs: wcs object
HST of the input image
wcslin: wcs object
Reference WCS from which the offsets/rotations are determined
fitgeom: str
NOT USED
rot : float
Amount of rotation measured in fit to be applied.
[Default=0.0]
scale : float
Amount of scale change measured in fit to be applied.
[Default=1.0]
xsh : float
Offset in X pixels from defined tangent plane to be applied to image.
[Default=0.0]
ysh : float
Offset in Y pixels from defined tangent plane to be applied to image.
[Default=0.0]
fit : arr
Linear coefficients for fit
[Default = None]
xrms : float
RMS of fit in RA (in decimal degrees) that will be recorded as
CRDER1 in WCS and header
[Default = None]
yrms : float
RMS of fit in Dec (in decimal degrees) that will be recorded as
CRDER2 in WCS and header
[Default = None]
"""
# compute the matrix for the scale and rotation correction
if fit is None:
fit = linearfit.buildFitMatrix(rot, scale)
shift = np.asarray([xsh, ysh]) - np.dot(wcslin.wcs.crpix, fit) + wcslin.wcs.crpix
fit = np.linalg.inv(fit).T
cwcs = chip_wcs.deepcopy()
cd_eye = np.eye(chip_wcs.wcs.cd.shape[0], dtype=ndfloat128)
zero_shift = np.zeros(2, dtype=ndfloat128)
naxis1, naxis2 = chip_wcs.pixel_shape
# estimate precision necessary for iterative processes:
maxiter = 100
crpix2corners = np.dstack([i.flatten() for i in np.meshgrid(
[1, naxis1], [1, naxis2])])[0] - chip_wcs.wcs.crpix
maxUerr = 1.0e-5 / np.amax(np.linalg.norm(crpix2corners, axis=1))
# estimate step for numerical differentiation. We need a step
# large enough to avoid rounding errors and small enough to get a
# better precision for numerical differentiation.
# TODO: The logic below should be revised at a later time so that it
# better takes into account the two competing requirements.
hx = max(1.0, min(20.0, (chip_wcs.wcs.crpix[0] - 1.0) / 100.0,
(naxis1 - chip_wcs.wcs.crpix[0]) / 100.0))
hy = max(1.0, min(20.0, (chip_wcs.wcs.crpix[1] - 1.0) / 100.0,
(naxis2 - chip_wcs.wcs.crpix[1]) / 100.0))
# compute new CRVAL for the image WCS:
crpixinref = wcslin.wcs_world2pix(
chip_wcs.wcs_pix2world([chip_wcs.wcs.crpix],1),1)
crpixinref = np.dot(fit, (crpixinref - shift).T).T
chip_wcs.wcs.crval = wcslin.wcs_pix2world(crpixinref, 1)[0]
chip_wcs.wcs.set()
# initial approximation for CD matrix of the image WCS:
(U, u) = linearize(cwcs, chip_wcs, wcslin, chip_wcs.wcs.crpix,
fit, shift, hx=hx, hy=hy)
err0 = np.amax(np.abs(U-cd_eye)).astype(np.float64)
chip_wcs.wcs.cd = np.dot(chip_wcs.wcs.cd.astype(ndfloat128), U).astype(np.float64)
chip_wcs.wcs.set()
# NOTE: initial solution is the exact mathematical solution (modulo numeric
# differentiation). However, e.g., due to rounding errors, approximate
# numerical differentiation, the solution may be improved by performing
# several iterations. The next step will try to perform
# fixed-point iterations to "improve" the solution
# but this is not really required.
# Perform fixed-point iterations to improve the approximation
# for CD matrix of the image WCS (actually for the U matrix).
for i in range(maxiter):
(U, u) = linearize(chip_wcs, chip_wcs, wcslin, chip_wcs.wcs.crpix,
cd_eye, zero_shift, hx=hx, hy=hy)
err = np.amax(np.abs(U-cd_eye)).astype(np.float64)
if err > err0:
break
chip_wcs.wcs.cd = np.dot(chip_wcs.wcs.cd, U).astype(np.float64)
chip_wcs.wcs.set()
if err < maxUerr:
break
err0 = err
if xrms is not None:
chip_wcs.wcs.crder = np.array([xrms,yrms])
|
Compute the matrix for the scale and rotation correction
Parameters
----------
chip_wcs: wcs object
HST of the input image
wcslin: wcs object
Reference WCS from which the offsets/rotations are determined
fitgeom: str
NOT USED
rot : float
Amount of rotation measured in fit to be applied.
[Default=0.0]
scale : float
Amount of scale change measured in fit to be applied.
[Default=1.0]
xsh : float
Offset in X pixels from defined tangent plane to be applied to image.
[Default=0.0]
ysh : float
Offset in Y pixels from defined tangent plane to be applied to image.
[Default=0.0]
fit : arr
Linear coefficients for fit
[Default = None]
xrms : float
RMS of fit in RA (in decimal degrees) that will be recorded as
CRDER1 in WCS and header
[Default = None]
yrms : float
RMS of fit in Dec (in decimal degrees) that will be recorded as
CRDER2 in WCS and header
[Default = None]
|
entailment
|
def update_wcs(image,extnum,new_wcs,wcsname="",reusename=False,verbose=False):
"""
Updates the WCS of the specified extension number with the new WCS
after archiving the original WCS.
The value of 'new_wcs' needs to be the full
HSTWCS object.
Parameters
----------
image : str
Filename of image with WCS that needs to be updated
extnum : int
Extension number for extension with WCS to be updated/replaced
new_wcs : object
Full HSTWCS object which will replace/update the existing WCS
wcsname : str
Label to give newly updated WCS
reusename : bool
User can choose whether to over-write WCS with same name or not.
[Default: False]
verbose : bool, int
Print extra messages during processing? [Default: False]
"""
# Start by insuring that the correct value of 'orientat' has been computed
new_wcs.setOrient()
fimg_open=False
if not isinstance(image, fits.HDUList):
fimg = fits.open(image, mode='update', memmap=False)
fimg_open = True
fimg_update = True
else:
fimg = image
if fimg.fileinfo(0)['filemode'] is 'update':
fimg_update = True
else:
fimg_update = False
# Determine final (unique) WCSNAME value, either based on the default or
# user-provided name
if util.is_blank(wcsname):
wcsname = 'TWEAK'
if not reusename:
wcsname = create_unique_wcsname(fimg, extnum, wcsname)
idchdr = True
if new_wcs.idcscale is None:
idchdr = False
# Open the file for updating the WCS
try:
logstr = 'Updating header for %s[%s]'%(fimg.filename(),str(extnum))
if verbose:
print(logstr)
else:
log.info(logstr)
hdr = fimg[extnum].header
if verbose:
log.info(' with WCS of')
new_wcs.printwcs()
print("WCSNAME : ",wcsname)
# Insure that if a copy of the WCS has not been created yet, it will be now
wcs_hdr = new_wcs.wcs2header(idc2hdr=idchdr, relax=True)
for key in wcs_hdr:
hdr[key] = wcs_hdr[key]
hdr['ORIENTAT'] = new_wcs.orientat
hdr['WCSNAME'] = wcsname
util.updateNEXTENDKw(fimg)
# Only if this image was opened in update mode should this
# newly updated WCS be archived, as it will never be written out
# to a file otherwise.
if fimg_update:
if not reusename:
# Save the newly updated WCS as an alternate WCS as well
wkey = wcsutil.altwcs.next_wcskey(fimg,ext=extnum)
else:
wkey = wcsutil.altwcs.getKeyFromName(hdr,wcsname)
# wcskey needs to be specified so that archiveWCS will create a
# duplicate WCS with the same WCSNAME as the Primary WCS
wcsutil.altwcs.archiveWCS(fimg,[extnum],wcsname=wcsname,
wcskey=wkey, reusekey=reusename)
finally:
if fimg_open:
# finish up by closing the file now
fimg.close()
|
Updates the WCS of the specified extension number with the new WCS
after archiving the original WCS.
The value of 'new_wcs' needs to be the full
HSTWCS object.
Parameters
----------
image : str
Filename of image with WCS that needs to be updated
extnum : int
Extension number for extension with WCS to be updated/replaced
new_wcs : object
Full HSTWCS object which will replace/update the existing WCS
wcsname : str
Label to give newly updated WCS
reusename : bool
User can choose whether to over-write WCS with same name or not.
[Default: False]
verbose : bool, int
Print extra messages during processing? [Default: False]
|
entailment
|
def create_unique_wcsname(fimg, extnum, wcsname):
"""
This function evaluates whether the specified wcsname value has
already been used in this image. If so, it automatically modifies
the name with a simple version ID using wcsname_NNN format.
Parameters
----------
fimg : obj
PyFITS object of image with WCS information to be updated
extnum : int
Index of extension with WCS information to be updated
wcsname : str
Value of WCSNAME specified by user for labelling the new WCS
Returns
-------
uniqname : str
Unique WCSNAME value
"""
wnames = list(wcsutil.altwcs.wcsnames(fimg, ext=extnum).values())
if wcsname not in wnames:
uniqname = wcsname
else:
# setup pattern to match
rpatt = re.compile(wcsname+'_\d')
index = 0
for wname in wnames:
rmatch = rpatt.match(wname)
if rmatch:
# get index
n = int(wname[wname.rfind('_')+1:])
if n > index: index = 1
index += 1 # for use with new name
uniqname = "%s_%d"%(wcsname,index)
return uniqname
|
This function evaluates whether the specified wcsname value has
already been used in this image. If so, it automatically modifies
the name with a simple version ID using wcsname_NNN format.
Parameters
----------
fimg : obj
PyFITS object of image with WCS information to be updated
extnum : int
Index of extension with WCS information to be updated
wcsname : str
Value of WCSNAME specified by user for labelling the new WCS
Returns
-------
uniqname : str
Unique WCSNAME value
|
entailment
|
def get_pool_size(usr_config_value, num_tasks):
""" Determine size of thread/process-pool for parallel processing.
This examines the cpu_count to decide and return the right pool
size to use. Also take into account the user's wishes via the config
object value, if specified. On top of that, don't allow the pool size
returned to be any higher than the number of parallel tasks, if specified.
Only use what we need (mp.Pool starts pool_size processes, needed or not).
If number of tasks is unknown, call this with "num_tasks" set to None.
Returns 1 when indicating that parallel processing should not be used.
Consolidate all such logic here, not in the caller. """
if not can_parallel:
return 1
# Give priority to their specified cfg value, over the actual cpu count
if usr_config_value is not None:
if num_tasks is None:
return usr_config_value
else:
# usr_config_value may be needlessly high
return min(usr_config_value, num_tasks)
# they haven't specified a cfg value, so go with the cpu_count
if num_tasks is None:
return _cpu_count
else:
# run no more workers than tasks
return min(_cpu_count, num_tasks)
|
Determine size of thread/process-pool for parallel processing.
This examines the cpu_count to decide and return the right pool
size to use. Also take into account the user's wishes via the config
object value, if specified. On top of that, don't allow the pool size
returned to be any higher than the number of parallel tasks, if specified.
Only use what we need (mp.Pool starts pool_size processes, needed or not).
If number of tasks is unknown, call this with "num_tasks" set to None.
Returns 1 when indicating that parallel processing should not be used.
Consolidate all such logic here, not in the caller.
|
entailment
|
def init_logging(logfile=DEFAULT_LOGNAME, default=None, level=logging.INFO):
"""
Set up logger for capturing stdout/stderr messages.
Must be called prior to writing any messages that you want to log.
"""
if logfile == "INDEF":
if not is_blank(default):
logname = fileutil.buildNewRootname(default, '.log')
else:
logname = DEFAULT_LOGNAME
elif logfile not in [None, "" , " "]:
if logfile.endswith('.log'):
logname = logfile
else:
logname = logfile + '.log'
else:
logname = None
if logname is not None:
logutil.setup_global_logging()
# Don't use logging.basicConfig since it can only be called once in a
# session
# TODO: Would be fine to use logging.config.dictConfig, but it's not
# available in Python 2.5
global _log_file_handler
root_logger = logging.getLogger()
if _log_file_handler:
root_logger.removeHandler(_log_file_handler)
# Default mode is 'a' which is fine
_log_file_handler = logging.FileHandler(logname)
# TODO: Make the default level configurable in the task parameters
_log_file_handler.setLevel(level)
_log_file_handler.setFormatter(
logging.Formatter('[%(levelname)-8s] %(message)s'))
root_logger.setLevel(level)
root_logger.addHandler(_log_file_handler)
print('Setting up logfile : ', logname)
#stdout_logger = logging.getLogger('stsci.tools.logutil.stdout')
# Disable display of prints to stdout from all packages except
# drizzlepac
#stdout_logger.addFilter(logutil.EchoFilter(include=['drizzlepac']))
else:
print('No trailer file created...')
|
Set up logger for capturing stdout/stderr messages.
Must be called prior to writing any messages that you want to log.
|
entailment
|
def end_logging(filename=None):
"""
Close log file and restore system defaults.
"""
if logutil.global_logging_started:
if filename:
print('Trailer file written to: ', filename)
else:
# This generally shouldn't happen if logging was started with
# init_logging and a filename was given...
print('No trailer file saved...')
logutil.teardown_global_logging()
else:
print('No trailer file saved...')
|
Close log file and restore system defaults.
|
entailment
|
def findrootname(filename):
"""
Return the rootname of the given file.
"""
puncloc = [filename.find(char) for char in string.punctuation]
if sys.version_info[0] >= 3:
val = sys.maxsize
else:
val = sys.maxint
for num in puncloc:
if num !=-1 and num < val:
val = num
return filename[0:val]
|
Return the rootname of the given file.
|
entailment
|
def removeFileSafely(filename,clobber=True):
""" Delete the file specified, but only if it exists and clobber is True.
"""
if filename is not None and filename.strip() != '':
if os.path.exists(filename) and clobber: os.remove(filename)
|
Delete the file specified, but only if it exists and clobber is True.
|
entailment
|
def displayEmptyInputWarningBox(display=True, parent=None):
""" Displays a warning box for the 'input' parameter.
"""
if sys.version_info[0] >= 3:
from tkinter.messagebox import showwarning
else:
from tkMessageBox import showwarning
if display:
msg = 'No valid input files found! '+\
'Please check the value for the "input" parameter.'
showwarning(parent=parent,message=msg, title="No valid inputs!")
return "yes"
|
Displays a warning box for the 'input' parameter.
|
entailment
|
def count_sci_extensions(filename):
""" Return the number of SCI extensions and the EXTNAME from a input MEF file.
"""
num_sci = 0
extname = 'SCI'
hdu_list = fileutil.openImage(filename, memmap=False)
for extn in hdu_list:
if 'extname' in extn.header and extn.header['extname'] == extname:
num_sci += 1
if num_sci == 0:
extname = 'PRIMARY'
num_sci = 1
hdu_list.close()
return num_sci,extname
|
Return the number of SCI extensions and the EXTNAME from a input MEF file.
|
entailment
|
def verifyUniqueWcsname(fname,wcsname):
"""
Report whether or not the specified WCSNAME already exists in the file
"""
uniq = True
numsci,extname = count_sci_extensions(fname)
wnames = altwcs.wcsnames(fname,ext=(extname,1))
if wcsname in wnames.values():
uniq = False
return uniq
|
Report whether or not the specified WCSNAME already exists in the file
|
entailment
|
def verifyUpdatewcs(fname):
"""
Verify the existence of WCSNAME in the file. If it is not present,
report this to the user and raise an exception. Returns True if WCSNAME
was found in all SCI extensions.
"""
updated = True
numsci,extname = count_sci_extensions(fname)
for n in range(1,numsci+1):
hdr = fits.getheader(fname, extname=extname, extver=n, memmap=False)
if 'wcsname' not in hdr:
updated = False
break
return updated
|
Verify the existence of WCSNAME in the file. If it is not present,
report this to the user and raise an exception. Returns True if WCSNAME
was found in all SCI extensions.
|
entailment
|
def verifyRefimage(refimage):
"""
Verify that the value of refimage specified by the user points to an
extension with a proper WCS defined. It starts by making sure an extension gets
specified by the user when using a MEF file. The final check comes by looking
for a CD matrix in the WCS object itself. If either test fails, it returns
a value of False.
"""
valid = True
# start by trying to see whether the code can even find the file
if is_blank(refimage):
valid=True
return valid
refroot,extroot = fileutil.parseFilename(refimage)
if not os.path.exists(refroot):
valid = False
return valid
# if a MEF has been specified, make sure extension contains a valid WCS
if valid:
if extroot is None:
extn = findWCSExtn(refimage)
if extn is None:
valid = False
else:
valid = True
else:
# check for CD matrix in WCS object
refwcs = wcsutil.HSTWCS(refimage)
if not refwcs.wcs.has_cd():
valid = False
else:
valid = True
del refwcs
return valid
|
Verify that the value of refimage specified by the user points to an
extension with a proper WCS defined. It starts by making sure an extension gets
specified by the user when using a MEF file. The final check comes by looking
for a CD matrix in the WCS object itself. If either test fails, it returns
a value of False.
|
entailment
|
def findWCSExtn(filename):
""" Return new filename with extension that points to an extension with a
valid WCS.
Returns
=======
extnum : str, None
Value of extension name as a string either as provided by the user
or based on the extension number for the first extension which
contains a valid HSTWCS object. Returns None if no extension can be
found with a valid WCS.
Notes
=====
The return value from this function can be used as input to
create another HSTWCS with the syntax::
`HSTWCS('{}[{}]'.format(filename,extnum))
"""
rootname,extroot = fileutil.parseFilename(filename)
extnum = None
if extroot is None:
fimg = fits.open(rootname, memmap=False)
for i,extn in enumerate(fimg):
if 'crval1' in extn.header:
refwcs = wcsutil.HSTWCS('{}[{}]'.format(rootname,i))
if refwcs.wcs.has_cd():
extnum = '{}'.format(i)
break
fimg.close()
else:
try:
refwcs = wcsutil.HSTWCS(filename)
if refwcs.wcs.has_cd():
extnum = extroot
except:
extnum = None
return extnum
|
Return new filename with extension that points to an extension with a
valid WCS.
Returns
=======
extnum : str, None
Value of extension name as a string either as provided by the user
or based on the extension number for the first extension which
contains a valid HSTWCS object. Returns None if no extension can be
found with a valid WCS.
Notes
=====
The return value from this function can be used as input to
create another HSTWCS with the syntax::
`HSTWCS('{}[{}]'.format(filename,extnum))
|
entailment
|
def verifyFilePermissions(filelist, chmod=True):
""" Verify that images specified in 'filelist' can be updated.
A message will be printed reporting the names of any images which
do not have write-permission, then quit.
"""
badfiles = []
archive_dir = False
for img in filelist:
fname = fileutil.osfn(img)
if 'OrIg_files' in os.path.split(fname)[0]:
archive_dir = True
try:
fp = open(fname,mode='a')
fp.close()
except IOError as e:
if e.errno == errno.EACCES:
badfiles.append(img)
# Not a permission error.
pass
num_bad = len(badfiles)
if num_bad > 0:
if archive_dir:
print('\n')
print('#'*40)
print(' Working in "OrIg_files" (archive) directory. ')
print(' This directory has been created to serve as an archive')
print(' for the original input images. ')
print('\n These files should be copied into another directory')
print(' for processing. ')
print('#'*40)
print('\n')
print('#'*40)
print('Found %d files which can not be updated!'%(num_bad))
for img in badfiles:
print(' %s'%(img))
print('\nPlease reset permissions for these files and restart...')
print('#'*40)
print('\n')
filelist = None
return filelist
|
Verify that images specified in 'filelist' can be updated.
A message will be printed reporting the names of any images which
do not have write-permission, then quit.
|
entailment
|
def getFullParList(configObj):
"""
Return a single list of all parameter names included in the configObj
regardless of which section the parameter was stored
"""
plist = []
for par in configObj.keys():
if isinstance(configObj[par],configobj.Section):
plist.extend(getFullParList(configObj[par]))
else:
plist.append(par)
return plist
|
Return a single list of all parameter names included in the configObj
regardless of which section the parameter was stored
|
entailment
|
def validateUserPars(configObj,input_dict):
""" Compares input parameter names specified by user with those already
recognized by the task.
Any parameters provided by the user that does not match a known
task parameter will be reported and a ValueError exception will be
raised.
"""
# check to see whether any input parameters are unexpected.
# Any unexpected parameters provided on input should be reported and
# the code should stop
plist = getFullParList(configObj)
extra_pars = []
for kw in input_dict:
if kw not in plist:
extra_pars.append(kw)
if len(extra_pars) > 0:
print ('='*40)
print ('The following input parameters were not recognized as valid inputs:')
for p in extra_pars:
print(" %s"%(p))
print('\nPlease check the spelling of the parameter(s) and try again...')
print('='*40)
raise ValueError
|
Compares input parameter names specified by user with those already
recognized by the task.
Any parameters provided by the user that does not match a known
task parameter will be reported and a ValueError exception will be
raised.
|
entailment
|
def applyUserPars_steps(configObj, input_dict, step='3a'):
""" Apply logic to turn on use of user-specified output WCS if user provides
any parameter on command-line regardless of how final_wcs was set.
"""
step_kws = {'7a': 'final_wcs', '3a': 'driz_sep_wcs'}
stepname = getSectionName(configObj,step)
finalParDict = configObj[stepname].copy()
del finalParDict[step_kws[step]]
# interpret input_dict to find any parameters for this step specified by the user
user_pars = {}
for kw in finalParDict:
if kw in input_dict: user_pars[kw] = input_dict[kw]
if len(user_pars) > 0:
configObj[stepname][step_kws[step]] = True
|
Apply logic to turn on use of user-specified output WCS if user provides
any parameter on command-line regardless of how final_wcs was set.
|
entailment
|
def getDefaultConfigObj(taskname,configObj,input_dict={},loadOnly=True):
""" Return default configObj instance for task updated
with user-specified values from input_dict.
Parameters
----------
taskname : string
Name of task to load into TEAL
configObj : string
The valid values for 'configObj' would be::
None - loads last saved user .cfg file
'defaults' - loads task default .cfg file
name of .cfg file (string)- loads user-specified .cfg file
input_dict : dict
Set of parameters and values specified by user to be different from
what gets loaded in from the .cfg file for the task
loadOnly : bool
Setting 'loadOnly' to False causes the TEAL GUI to start allowing the
user to edit the values further and then run the task if desired.
"""
if configObj is None:
# Start by grabbing the default values without using the GUI
# This insures that all subsequent use of the configObj includes
# all parameters and their last saved values
configObj = teal.load(taskname)
elif isinstance(configObj,str):
if configObj.lower().strip() == 'defaults':
# Load task default .cfg file with all default values
configObj = teal.load(taskname,defaults=True)
# define default filename for configObj
configObj.filename = taskname.lower()+'.cfg'
else:
# Load user-specified .cfg file with its special default values
# we need to call 'fileutil.osfn()' to insure all environment
# variables specified by the user in the configObj filename are
# expanded to the full path
configObj = teal.load(fileutil.osfn(configObj))
# merge in the user values for this run
# this, though, does not save the results for use later
if input_dict not in [None,{}]:# and configObj not in [None, {}]:
# check to see whether any input parameters are unexpected.
# Any unexpected parameters provided on input should be reported and
# the code should stop
validateUserPars(configObj,input_dict)
# If everything looks good, merge user inputs with configObj and continue
cfgpars.mergeConfigObj(configObj, input_dict)
# Update the input .cfg file with the updated parameter values
#configObj.filename = os.path.join(cfgpars.getAppDir(),os.path.basename(configObj.filename))
#configObj.write()
if not loadOnly:
# We want to run the GUI AFTER merging in any parameters
# specified by the user on the command-line and provided in
# input_dict
configObj = teal.teal(configObj,loadOnly=False)
return configObj
|
Return default configObj instance for task updated
with user-specified values from input_dict.
Parameters
----------
taskname : string
Name of task to load into TEAL
configObj : string
The valid values for 'configObj' would be::
None - loads last saved user .cfg file
'defaults' - loads task default .cfg file
name of .cfg file (string)- loads user-specified .cfg file
input_dict : dict
Set of parameters and values specified by user to be different from
what gets loaded in from the .cfg file for the task
loadOnly : bool
Setting 'loadOnly' to False causes the TEAL GUI to start allowing the
user to edit the values further and then run the task if desired.
|
entailment
|
def getSectionName(configObj,stepnum):
""" Return section label based on step number.
"""
for key in configObj.keys():
if key.find('STEP '+str(stepnum)+':') >= 0:
return key
|
Return section label based on step number.
|
entailment
|
def displayMakewcsWarningBox(display=True, parent=None):
""" Displays a warning box for the 'makewcs' parameter.
"""
if sys.version_info[0] >= 3:
from tkinter.messagebox import showwarning
else:
from tkMessageBox import showwarning
ans = {'yes':True,'no':False}
if ans[display]:
msg = 'Setting "updatewcs=yes" will result '+ \
'in all input WCS values to be recomputed '+ \
'using the original distortion model and alignment.'
showwarning(parent=parent,message=msg, title="WCS will be overwritten!")
return True
|
Displays a warning box for the 'makewcs' parameter.
|
entailment
|
def printParams(paramDictionary, all=False, log=None):
"""
Print nicely the parameters from the dictionary.
"""
if log is not None:
def output(msg):
log.info(msg)
else:
def output(msg):
print(msg)
if not paramDictionary:
output('No parameters were supplied')
else:
for key in sorted(paramDictionary):
if all or (not isinstance(paramDictionary[key], dict)) \
and key[0] != '_':
output('\t' + '\t'.join([str(key) + ' :',
str(paramDictionary[key])]))
if log is None:
output('\n')
|
Print nicely the parameters from the dictionary.
|
entailment
|
def isCommaList(inputFilelist):
"""Return True if the input is a comma separated list of names."""
if isinstance(inputFilelist, int) or isinstance(inputFilelist, np.int32):
ilist = str(inputFilelist)
else:
ilist = inputFilelist
if "," in ilist:
return True
return False
|
Return True if the input is a comma separated list of names.
|
entailment
|
def loadFileList(inputFilelist):
"""Open up the '@ file' and read in the science and possible
ivm filenames from the first two columns.
"""
f = open(inputFilelist[1:])
# check the first line in order to determine whether
# IVM files have been specified in a second column...
lines = f.readline()
f.close()
# If there is a second column...
if len(line.split()) == 2:
# ...parse out the names of the IVM files as well
ivmlist = irafglob.irafglob(input, atfile=atfile_ivm)
# Parse the @-file with irafglob to extract the input filename
filelist = irafglob.irafglob(input, atfile=atfile_sci)
return filelist
|
Open up the '@ file' and read in the science and possible
ivm filenames from the first two columns.
|
entailment
|
def readCommaList(fileList):
""" Return a list of the files with the commas removed. """
names=fileList.split(',')
fileList=[]
for item in names:
fileList.append(item)
return fileList
|
Return a list of the files with the commas removed.
|
entailment
|
def update_input(filelist, ivmlist=None, removed_files=None):
"""
Removes files flagged to be removed from the input filelist.
Removes the corresponding ivm files if present.
"""
newfilelist = []
if removed_files == []:
return filelist, ivmlist
else:
sci_ivm = list(zip(filelist, ivmlist))
for f in removed_files:
result=[sci_ivm.remove(t) for t in sci_ivm if t[0] == f ]
ivmlist = [el[1] for el in sci_ivm]
newfilelist = [el[0] for el in sci_ivm]
return newfilelist, ivmlist
|
Removes files flagged to be removed from the input filelist.
Removes the corresponding ivm files if present.
|
entailment
|
def get_expstart(header,primary_hdr):
"""shouldn't this just be defined in the instrument subclass of imageobject?"""
if 'expstart' in primary_hdr:
exphdr = primary_hdr
else:
exphdr = header
if 'EXPSTART' in exphdr:
expstart = float(exphdr['EXPSTART'])
expend = float(exphdr['EXPEND'])
else:
expstart = 0.
expend = 0.0
return (expstart,expend)
|
shouldn't this just be defined in the instrument subclass of imageobject?
|
entailment
|
def compute_texptime(imageObjectList):
"""
Add up the exposure time for all the members in
the pattern, since 'drizzle' doesn't have the necessary
information to correctly set this itself.
"""
expnames = []
exptimes = []
start = []
end = []
for img in imageObjectList:
expnames += img.getKeywordList('_expname')
exptimes += img.getKeywordList('_exptime')
start += img.getKeywordList('_expstart')
end += img.getKeywordList('_expend')
exptime = 0.
expstart = min(start)
expend = max(end)
exposure = None
for n in range(len(expnames)):
if expnames[n] != exposure:
exposure = expnames[n]
exptime += exptimes[n]
return (exptime,expstart,expend)
|
Add up the exposure time for all the members in
the pattern, since 'drizzle' doesn't have the necessary
information to correctly set this itself.
|
entailment
|
def computeRange(corners):
""" Determine the range spanned by an array of pixel positions. """
x = corners[:, 0]
y = corners[:, 1]
_xrange = (np.minimum.reduce(x), np.maximum.reduce(x))
_yrange = (np.minimum.reduce(y), np.maximum.reduce(y))
return _xrange, _yrange
|
Determine the range spanned by an array of pixel positions.
|
entailment
|
def getRotatedSize(corners, angle):
""" Determine the size of a rotated (meta)image."""
if angle:
_rotm = fileutil.buildRotMatrix(angle)
# Rotate about the center
_corners = np.dot(corners, _rotm)
else:
# If there is no rotation, simply return original values
_corners = corners
return computeRange(_corners)
|
Determine the size of a rotated (meta)image.
|
entailment
|
def readcols(infile, cols=[0, 1, 2, 3], hms=False):
"""
Read the columns from an ASCII file as numpy arrays.
Parameters
----------
infile : str
Filename of ASCII file with array data as columns.
cols : list of int
List of 0-indexed column numbers for columns to be turned into numpy arrays
(DEFAULT- [0,1,2,3]).
Returns
-------
outarr : list of numpy arrays
Simple list of numpy arrays in the order as specifed in the 'cols' parameter.
"""
fin = open(infile,'r')
outarr = []
for l in fin.readlines():
l = l.strip()
if len(l) == 0 or len(l.split()) < len(cols) or (len(l) > 0 and l[0] == '#' or (l.find("INDEF") > -1)): continue
for i in range(10):
lnew = l.replace(" "," ")
if lnew == l: break
else: l = lnew
lspl = lnew.split(" ")
if len(outarr) == 0:
for c in range(len(cols)): outarr.append([])
for c,n in zip(cols,list(range(len(cols)))):
if not hms:
val = float(lspl[c])
else:
val = lspl[c]
outarr[n].append(val)
fin.close()
for n in range(len(cols)):
outarr[n] = np.array(outarr[n])
return outarr
|
Read the columns from an ASCII file as numpy arrays.
Parameters
----------
infile : str
Filename of ASCII file with array data as columns.
cols : list of int
List of 0-indexed column numbers for columns to be turned into numpy arrays
(DEFAULT- [0,1,2,3]).
Returns
-------
outarr : list of numpy arrays
Simple list of numpy arrays in the order as specifed in the 'cols' parameter.
|
entailment
|
def parse_colnames(colnames,coords=None):
""" Convert colnames input into list of column numbers.
"""
cols = []
if not isinstance(colnames,list):
colnames = colnames.split(',')
# parse column names from coords file and match to input values
if coords is not None and fileutil.isFits(coords)[0]:
# Open FITS file with table
ftab = fits.open(coords, memmap=False)
# determine which extension has the table
for extn in ftab:
if isinstance(extn, fits.BinTableHDU):
# parse column names from table and match to inputs
cnames = extn.columns.names
if colnames is not None:
for c in colnames:
for name,i in zip(cnames,list(range(len(cnames)))):
if c == name.lower(): cols.append(i)
if len(cols) < len(colnames):
errmsg = "Not all input columns found in table..."
ftab.close()
raise ValueError(errmsg)
else:
cols = cnames[:2]
break
ftab.close()
else:
for c in colnames:
if isinstance(c, str):
if c[0].lower() == 'c': cols.append(int(c[1:])-1)
else:
cols.append(int(c))
else:
if isinstance(c, int):
cols.append(c)
else:
errmsg = "Unsupported column names..."
raise ValueError(errmsg)
return cols
|
Convert colnames input into list of column numbers.
|
entailment
|
def createFile(dataArray=None, outfile=None, header=None):
"""
Create a simple fits file for the given data array and header.
Returns either the FITS object in-membory when outfile==None or
None when the FITS file was written out to a file.
"""
# Insure that at least a data-array has been provided to create the file
assert(dataArray is not None), "Please supply a data array for createFiles"
try:
# Create the output file
fitsobj = fits.HDUList()
if header is not None:
try:
del(header['NAXIS1'])
del(header['NAXIS2'])
if 'XTENSION' in header:
del(header['XTENSION'])
if 'EXTNAME' in header:
del(header['EXTNAME'])
if 'EXTVER' in header:
del(header['EXTVER'])
except KeyError:
pass
if 'NEXTEND' in header:
header['NEXTEND'] = 0
hdu = fits.PrimaryHDU(data=dataArray, header=header)
try:
del hdu.header['PCOUNT']
del hdu.header['GCOUNT']
except KeyError:
pass
else:
hdu = fits.PrimaryHDU(data=dataArray)
fitsobj.append(hdu)
if outfile is not None:
fitsobj.writeto(outfile)
finally:
# CLOSE THE IMAGE FILES
fitsobj.close()
if outfile is not None:
del fitsobj
fitsobj = None
return fitsobj
|
Create a simple fits file for the given data array and header.
Returns either the FITS object in-membory when outfile==None or
None when the FITS file was written out to a file.
|
entailment
|
def base_taskname(taskname, packagename=None):
"""
Extract the base name of the task.
Many tasks in the `drizzlepac` have "compound" names such as
'drizzlepac.sky'. This function will search for the presence of a dot
in the input `taskname` and if found, it will return the string
to the right of the right-most dot. If a dot is not found, it will return
the input string.
Parameters
----------
taskname : str, None
Full task name. If it is `None`, :py:func:`base_taskname` will
return `None`\ .
packagename : str, None (Default = None)
Package name. It is assumed that a compound task name is formed by
concatenating `packagename` + '.' + `taskname`\ . If `packagename`
is not `None`, :py:func:`base_taskname` will check that the string
to the left of the right-most dot matches `packagename` and will
raise an `AssertionError` if the package name derived from the
input `taskname` does not match the supplied `packagename`\ . This
is intended as a check for discrepancies that may arise
during the development of the tasks. If `packagename` is `None`,
no such check will be performed.
Raises
------
AssertionError
Raised when package name derived from the input `taskname` does not
match the supplied `packagename`
"""
if not isinstance(taskname, str):
return taskname
indx = taskname.rfind('.')
if indx >= 0:
base_taskname = taskname[(indx+1):]
pkg_name = taskname[:indx]
else:
base_taskname = taskname
pkg_name = ''
assert(True if packagename is None else (packagename == pkg_name))
return base_taskname
|
Extract the base name of the task.
Many tasks in the `drizzlepac` have "compound" names such as
'drizzlepac.sky'. This function will search for the presence of a dot
in the input `taskname` and if found, it will return the string
to the right of the right-most dot. If a dot is not found, it will return
the input string.
Parameters
----------
taskname : str, None
Full task name. If it is `None`, :py:func:`base_taskname` will
return `None`\ .
packagename : str, None (Default = None)
Package name. It is assumed that a compound task name is formed by
concatenating `packagename` + '.' + `taskname`\ . If `packagename`
is not `None`, :py:func:`base_taskname` will check that the string
to the left of the right-most dot matches `packagename` and will
raise an `AssertionError` if the package name derived from the
input `taskname` does not match the supplied `packagename`\ . This
is intended as a check for discrepancies that may arise
during the development of the tasks. If `packagename` is `None`,
no such check will be performed.
Raises
------
AssertionError
Raised when package name derived from the input `taskname` does not
match the supplied `packagename`
|
entailment
|
def addStep(self,key):
"""
Add information about a new step to the dict of steps
The value 'ptime' is the output from '_ptime()' containing
both the formatted and unformatted time for the start of the
step.
"""
ptime = _ptime()
print('==== Processing Step ',key,' started at ',ptime[0])
self.steps[key] = {'start':ptime}
self.order.append(key)
|
Add information about a new step to the dict of steps
The value 'ptime' is the output from '_ptime()' containing
both the formatted and unformatted time for the start of the
step.
|
entailment
|
def endStep(self,key):
"""
Record the end time for the step.
If key==None, simply record ptime as end time for class to represent
the overall runtime since the initialization of the class.
"""
ptime = _ptime()
if key is not None:
self.steps[key]['end'] = ptime
self.steps[key]['elapsed'] = ptime[1] - self.steps[key]['start'][1]
self.end = ptime
print('==== Processing Step ',key,' finished at ',ptime[0])
print('')
|
Record the end time for the step.
If key==None, simply record ptime as end time for class to represent
the overall runtime since the initialization of the class.
|
entailment
|
def reportTimes(self):
"""
Print out a formatted summary of the elapsed times for all the
performed steps.
"""
self.end = _ptime()
total_time = 0
print(ProcSteps.__report_header)
for step in self.order:
if 'elapsed' in self.steps[step]:
_time = self.steps[step]['elapsed']
else:
_time = 0.0
total_time += _time
print(' %20s %0.4f sec.' % (step, _time))
print(' %20s %s' % ('=' * 20, '=' * 20))
print(' %20s %0.4f sec.' % ('Total', total_time))
|
Print out a formatted summary of the elapsed times for all the
performed steps.
|
entailment
|
def run(configObj=None, input_dict={}, loadOnly=False):
""" Build DQ masks from all input images, then apply static mask(s).
"""
# If called from interactive user-interface, configObj will not be
# defined yet, so get defaults using EPAR/TEAL.
#
# Also insure that the input_dict (user-specified values) are folded in
# with a fully populated configObj instance.
configObj = util.getDefaultConfigObj(__taskname__,configObj,input_dict,loadOnly=loadOnly)
if configObj is None:
return
# Define list of imageObject instances and output WCSObject instance
# based on input paramters
imgObjList,outwcs = processInput.setCommonInput(configObj)
# Build DQ masks for all input images.
buildMask(imgObjList,configObj)
|
Build DQ masks from all input images, then apply static mask(s).
|
entailment
|
def buildDQMasks(imageObjectList,configObj):
""" Build DQ masks for all input images.
"""
# Insure that input imageObject is a list
if not isinstance(imageObjectList, list):
imageObjectList = [imageObjectList]
for img in imageObjectList:
img.buildMask(configObj['single'], configObj['bits'])
|
Build DQ masks for all input images.
|
entailment
|
def buildMask(dqarr, bitvalue):
""" Builds a bit-mask from an input DQ array and a bitvalue flag """
return bitfield_to_boolean_mask(dqarr, bitvalue, good_mask_value=1,
dtype=np.uint8)
|
Builds a bit-mask from an input DQ array and a bitvalue flag
|
entailment
|
def buildMaskImage(rootname, bitvalue, output, extname='DQ', extver=1):
""" Builds mask image from rootname's DQ array
If there is no valid 'DQ' array in image, then return
an empty string.
"""
# If no bitvalue is set or rootname given, assume no mask is desired
# However, this name would be useful as the output mask from
# other processing, such as MultiDrizzle, so return it anyway.
#if bitvalue == None or rootname == None:
# return None
# build output name
maskname = output
# If an old version of the maskfile was present, remove it and rebuild it.
if fileutil.findFile(maskname):
fileutil.removeFile(maskname)
# Open input file with DQ array
fdq = fileutil.openImage(rootname, mode='readonly', memmap=False)
try:
_extn = fileutil.findExtname(fdq, extname, extver=extver)
if _extn is not None:
# Read in DQ array
dqarr = fdq[_extn].data
else:
dqarr = None
# For the case where there is no DQ array,
# create a mask image of all ones.
if dqarr is None:
# We need to get the dimensions of the output DQ array
# Since the DQ array is non-existent, look for the SCI extension
_sci_extn = fileutil.findExtname(fdq,'SCI',extver=extver)
if _sci_extn is not None:
_shape = fdq[_sci_extn].data.shape
dqarr = np.zeros(_shape,dtype=np.uint16)
else:
raise Exception
# Build mask array from DQ array
maskarr = buildMask(dqarr,bitvalue)
#Write out the mask file as simple FITS file
fmask = fits.open(maskname, mode='append', memmap=False)
maskhdu = fits.PrimaryHDU(data = maskarr)
fmask.append(maskhdu)
#Close files
fmask.close()
del fmask
fdq.close()
del fdq
except:
fdq.close()
del fdq
# Safeguard against leaving behind an incomplete file
if fileutil.findFile(maskname):
os.remove(maskname)
_errstr = "\nWarning: Problem creating MASK file for "+rootname+".\n"
#raise IOError, _errstr
print(_errstr)
return None
# Return the name of the mask image written out
return maskname
|
Builds mask image from rootname's DQ array
If there is no valid 'DQ' array in image, then return
an empty string.
|
entailment
|
def buildShadowMaskImage(dqfile,detnum,extnum,maskname,bitvalue=None,binned=1):
""" Builds mask image from WFPC2 shadow calibrations.
detnum - string value for 'DETECTOR' detector
"""
# insure detnum is a string
if type(detnum) != type(''):
detnum = repr(detnum)
_funcroot = '_func_Shadow_WF'
# build template shadow mask's filename
# If an old version of the maskfile was present, remove it and rebuild it.
if fileutil.findFile(maskname):
fileutil.removeFile(maskname)
_use_inmask = not fileutil.findFile(dqfile) or bitvalue is None
# Check for existance of input .c1h file for use in making inmask file
if _use_inmask:
#_mask = 'wfpc2_inmask'+detnum+'.fits'
_mask = maskname
# Check to see if file exists...
if not fileutil.findFile(_mask):
# If not, create the file.
# This takes a long time to run, so it should be done
# only when absolutely necessary...
try:
_funcx = _funcroot+detnum+'x'
_funcy = _funcroot+detnum+'y'
_xarr = np.clip(np.fromfunction(eval(_funcx),(800,800)),0.0,1.0).astype(np.uint8)
_yarr = np.clip(np.fromfunction(eval(_funcy),(800,800)),0.0,1.0).astype(np.uint8)
maskarr = _xarr * _yarr
if binned !=1:
bmaskarr = maskarr[::2,::2]
bmaskarr *= maskarr[1::2,::2]
bmaskarr *= maskarr[::2,1::2]
bmaskarr *= maskarr[1::2,1::2]
maskarr = bmaskarr.copy()
del bmaskarr
#Write out the mask file as simple FITS file
fmask = fits.open(_mask, mode='append', memmap=False)
maskhdu = fits.PrimaryHDU(data=maskarr)
fmask.append(maskhdu)
#Close files
fmask.close()
del fmask
except:
return None
else:
#
# Build full mask based on .c1h and shadow mask
#
fdq = fileutil.openImage(dqfile, mode='readonly', memmap=False)
try:
# Read in DQ array from .c1h and from shadow mask files
dqarr = fdq[int(extnum)].data
#maskarr = fsmask[0].data
# Build mask array from DQ array
dqmaskarr = buildMask(dqarr,bitvalue)
#Write out the mask file as simple FITS file
fdqmask = fits.open(maskname, mode='append', memmap=False)
maskhdu = fits.PrimaryHDU(data=dqmaskarr)
fdqmask.append(maskhdu)
#Close files
fdqmask.close()
del fdqmask
fdq.close()
del fdq
except:
fdq.close()
del fdq
# Safeguard against leaving behind an incomplete file
if fileutil.findFile(maskname):
os.remove(maskname)
_errstr = "\nWarning: Problem creating DQMASK file for "+rootname+".\n"
#raise IOError, _errstr
print(_errstr)
return None
# Return the name of the mask image written out
return maskname
|
Builds mask image from WFPC2 shadow calibrations.
detnum - string value for 'DETECTOR' detector
|
entailment
|
def xy2rd(input,x=None,y=None,coords=None, coordfile=None,colnames=None,separator=None,
hms=True, precision=6,output=None,verbose=True):
""" Primary interface to perform coordinate transformations from
pixel to sky coordinates using STWCS and full distortion models
read from the input image header.
"""
single_coord = False
# Only use value provided in `coords` if nothing has been specified for coordfile
if coords is not None and coordfile is None:
coordfile = coords
warnings.simplefilter('always',DeprecationWarning)
warnings.warn("Please update calling code to pass in `coordfile` instead of `coords`.",
category=DeprecationWarning)
warnings.simplefilter('default',DeprecationWarning)
if coordfile is not None:
if colnames in blank_list:
colnames = ['c1','c2']
# Determine columns which contain pixel positions
cols = util.parse_colnames(colnames,coordfile)
# read in columns from input coordinates file
xyvals = np.loadtxt(coordfile,usecols=cols,delimiter=separator)
if xyvals.ndim == 1: # only 1 entry in coordfile
xlist = [xyvals[0].copy()]
ylist = [xyvals[1].copy()]
else:
xlist = xyvals[:,0].copy()
ylist = xyvals[:,1].copy()
del xyvals
else:
if isinstance(x, np.ndarray):
xlist = x.tolist()
ylist = y.tolist()
elif not isinstance(x,list):
xlist = [x]
ylist = [y]
single_coord = True
else:
xlist = x
ylist = y
# start by reading in WCS+distortion info for input image
inwcs = wcsutil.HSTWCS(input)
if inwcs.wcs.is_unity():
print("####\nNo valid WCS found in {}.\n Results may be invalid.\n####\n".format(input))
# Now, convert pixel coordinates into sky coordinates
dra,ddec = inwcs.all_pix2world(xlist,ylist,1)
# convert to HH:MM:SS.S format, if specified
if hms:
ra,dec = wcs_functions.ddtohms(dra,ddec,precision=precision)
rastr = ra
decstr = dec
else:
# add formatting based on precision here...
rastr = []
decstr = []
fmt = "%."+repr(precision)+"f"
for r,d in zip(dra,ddec):
rastr.append(fmt%r)
decstr.append(fmt%d)
ra = dra
dec = ddec
if verbose or (not verbose and util.is_blank(output)):
print('# Coordinate transformations for ',input)
print('# X Y RA Dec\n')
for x,y,r,d in zip(xlist,ylist,rastr,decstr):
print("%.4f %.4f %s %s"%(x,y,r,d))
# Create output file, if specified
if output:
f = open(output,mode='w')
f.write("# Coordinates converted from %s\n"%input)
for r,d in zip(rastr,decstr):
f.write('%s %s\n'%(r,d))
f.close()
print('Wrote out results to: ',output)
if single_coord:
ra = ra[0]
dec = dec[0]
return ra,dec
|
Primary interface to perform coordinate transformations from
pixel to sky coordinates using STWCS and full distortion models
read from the input image header.
|
entailment
|
def run(configObj, wcsmap=None):
""" Interface for running `wdrizzle` from TEAL or Python command-line.
This code performs all file ``I/O`` to set up the use of the drizzle code for
a single exposure to replicate the functionality of the original `wdrizzle`.
"""
# Insure all output filenames specified have .fits extensions
if configObj['outdata'][-5:] != '.fits': configObj['outdata'] += '.fits'
if not util.is_blank(configObj['outweight']) and configObj['outweight'][-5:] != '.fits': configObj['outweight'] += '.fits'
if not util.is_blank(configObj['outcontext']) and configObj['outcontext'][-5:] != '.fits': configObj['outcontext'] += '.fits'
# Keep track of any files we need to open
in_sci_handle = None
in_wht_handle = None
out_sci_handle = None
out_wht_handle = None
out_con_handle = None
_wcskey = configObj['wcskey']
if util.is_blank(_wcskey):
_wcskey = ' '
scale_pars = configObj['Data Scaling Parameters']
user_wcs_pars = configObj['User WCS Parameters']
# Open the SCI (and WHT?) image
# read file to get science array
insci = get_data(configObj['input'])
expin = fileutil.getKeyword(configObj['input'],scale_pars['expkey'])
in_sci_phdr = fits.getheader(fileutil.parseFilename(configObj['input'])[0], memmap=False)
# we need to read in the input WCS
input_wcs = stwcs.wcsutil.HSTWCS(configObj['input'],wcskey=_wcskey)
if not util.is_blank(configObj['inweight']):
inwht = get_data(configObj['inweight']).astype(np.float32)
else:
# Generate a default weight map of all good pixels
inwht = np.ones(insci.shape,dtype=insci.dtype)
output_exists = False
outname = fileutil.osfn(fileutil.parseFilename(configObj['outdata'])[0])
if os.path.exists(outname):
output_exists = True
# Output was specified as a filename, so open it in 'update' mode
outsci = get_data(configObj['outdata'])
if output_exists:
# we also need to read in the output WCS from pre-existing output
output_wcs = stwcs.wcsutil.HSTWCS(configObj['outdata'])
out_sci_hdr = fits.getheader(outname, memmap=False)
outexptime = out_sci_hdr['DRIZEXPT']
if 'ndrizim' in out_sci_hdr:
uniqid = out_sci_hdr['ndrizim']+1
else:
uniqid = 1
else: # otherwise, define the output WCS either from user pars or refimage
if util.is_blank(configObj['User WCS Parameters']['refimage']):
# Define a WCS based on user provided WCS values
# NOTE:
# All parameters must be specified, not just one or a few
if not util.is_blank(user_wcs_pars['outscale']):
output_wcs = wcs_functions.build_hstwcs(
user_wcs_pars['raref'], user_wcs_pars['decref'],
user_wcs_pars['xrefpix'], user_wcs_pars['yrefpix'],
user_wcs_pars['outnx'], user_wcs_pars['outny'],
user_wcs_pars['outscale'], user_wcs_pars['orient'] )
else:
# Define default WCS based on input image
applydist = True
if input_wcs.sip is None or input_wcs.instrument=='DEFAULT':
applydist = False
output_wcs = stwcs.distortion.utils.output_wcs([input_wcs],undistort=applydist)
else:
refimage = configObj['User WCS Parameters']['refimage']
refroot,extroot = fileutil.parseFilename(refimage)
if extroot is None:
fimg = fits.open(refroot, memmap=False)
for i,extn in enumerate(fimg):
if 'CRVAL1' in extn.header: # Key on CRVAL1 for valid WCS
refwcs = wcsutil.HSTWCS('{}[{}]'.format(refroot,i))
if refwcs.wcs.has_cd():
extroot = i
break
fimg.close()
# try to find extension with valid WCS
refimage = "{}[{}]".format(refroot,extroot)
# Define the output WCS based on a user specified reference image WCS
output_wcs = stwcs.wcsutil.HSTWCS(refimage)
# Initialize values used for combining results
outexptime = 0.0
uniqid = 1
# Set up the output data array and insure that the units for that array is 'cps'
if outsci is None:
# Define a default blank array based on definition of output_wcs
outsci = np.empty(output_wcs.array_shape, dtype=np.float32)
outsci.fill(np.nan)
else:
# Convert array to units of 'cps', if needed
if outexptime != 0.0:
np.divide(outsci, outexptime, outsci)
outsci = outsci.astype(np.float32)
# Now update output exposure time for additional input file
outexptime += expin
outwht = None
if not util.is_blank(configObj['outweight']):
outwht = get_data(configObj['outweight'])
if outwht is None:
outwht = np.zeros(output_wcs.array_shape, dtype=np.float32)
else:
outwht = outwht.astype(np.float32)
outcon = None
keep_con = False
if not util.is_blank(configObj['outcontext']):
outcon = get_data(configObj['outcontext'])
keep_con = True
if outcon is None:
outcon = np.zeros((1,) + output_wcs.array_shape, dtype=np.int32)
else:
outcon = outcon.astype(np.int32)
planeid = int((uniqid - 1)/ 32)
# Add a new plane to the context image if planeid overflows
while outcon.shape[0] <= planeid:
plane = np.zeros_like(outcon[0])
outcon = np.append(outcon, plane, axis=0)
# Interpret wt_scl parameter
if configObj['wt_scl'] == 'exptime':
wt_scl = expin
elif configObj['wt_scl'] == 'expsq':
wt_scl = expin*expin
else:
wt_scl = float(configObj['wt_scl'])
# Interpret coeffs parameter to determine whether to apply coeffs or not
undistort = True
if not configObj['coeffs'] or input_wcs.sip is None or input_wcs.instrument == 'DEFAULT':
undistort = False
# turn off use of coefficients if undistort is False (coeffs == False)
if not undistort:
input_wcs.sip = None
input_wcs.cpdis1 = None
input_wcs.cpdis2 = None
input_wcs.det2im = None
wcslin = distortion.utils.output_wcs([input_wcs],undistort=undistort)
# Perform actual drizzling now...
_vers = do_driz(insci, input_wcs, inwht,
output_wcs, outsci, outwht, outcon,
expin, scale_pars['in_units'],
wt_scl, wcslin_pscale=wcslin.pscale ,uniqid=uniqid,
pixfrac=configObj['pixfrac'], kernel=configObj['kernel'],
fillval=scale_pars['fillval'], stepsize=configObj['stepsize'],
wcsmap=None)
out_sci_handle,outextn = create_output(configObj['outdata'])
if not output_exists:
# Also, define default header based on input image Primary header
out_sci_handle[outextn].header = in_sci_phdr.copy()
# Update header of output image with exptime used to scale the output data
# if out_units is not counts, this will simply be a value of 1.0
# the keyword 'exptime' will always contain the total exposure time
# of all input image regardless of the output units
out_sci_handle[outextn].header['EXPTIME'] = outexptime
# create CTYPE strings
ctype1 = input_wcs.wcs.ctype[0]
ctype2 = input_wcs.wcs.ctype[1]
if ctype1.find('-SIP'): ctype1 = ctype1.replace('-SIP','')
if ctype2.find('-SIP'): ctype2 = ctype2.replace('-SIP','')
# Update header with WCS keywords
out_sci_handle[outextn].header['ORIENTAT'] = output_wcs.orientat
out_sci_handle[outextn].header['CD1_1'] = output_wcs.wcs.cd[0][0]
out_sci_handle[outextn].header['CD1_2'] = output_wcs.wcs.cd[0][1]
out_sci_handle[outextn].header['CD2_1'] = output_wcs.wcs.cd[1][0]
out_sci_handle[outextn].header['CD2_2'] = output_wcs.wcs.cd[1][1]
out_sci_handle[outextn].header['CRVAL1'] = output_wcs.wcs.crval[0]
out_sci_handle[outextn].header['CRVAL2'] = output_wcs.wcs.crval[1]
out_sci_handle[outextn].header['CRPIX1'] = output_wcs.wcs.crpix[0]
out_sci_handle[outextn].header['CRPIX2'] = output_wcs.wcs.crpix[1]
out_sci_handle[outextn].header['CTYPE1'] = ctype1
out_sci_handle[outextn].header['CTYPE2'] = ctype2
out_sci_handle[outextn].header['VAFACTOR'] = 1.0
if scale_pars['out_units'] == 'counts':
np.multiply(outsci, outexptime, outsci)
out_sci_handle[outextn].header['DRIZEXPT'] = outexptime
else:
out_sci_handle[outextn].header['DRIZEXPT'] = 1.0
# Update header keyword NDRIZIM to keep track of how many images have
# been combined in this product so far
out_sci_handle[outextn].header['NDRIZIM'] = uniqid
#define keywords to be written out to product header
drizdict = outputimage.DRIZ_KEYWORDS.copy()
# Update drizdict with current values
drizdict['VER']['value'] = _vers[:44]
drizdict['DATA']['value'] = configObj['input'][:64]
drizdict['DEXP']['value'] = expin
drizdict['OUDA']['value'] = configObj['outdata'][:64]
drizdict['OUWE']['value'] = configObj['outweight'][:64]
drizdict['OUCO']['value'] = configObj['outcontext'][:64]
drizdict['MASK']['value'] = configObj['inweight'][:64]
drizdict['WTSC']['value'] = wt_scl
drizdict['KERN']['value'] = configObj['kernel']
drizdict['PIXF']['value'] = configObj['pixfrac']
drizdict['OUUN']['value'] = scale_pars['out_units']
drizdict['FVAL']['value'] = scale_pars['fillval']
drizdict['WKEY']['value'] = configObj['wcskey']
outputimage.writeDrizKeywords(out_sci_handle[outextn].header,uniqid,drizdict)
# add output array to output file
out_sci_handle[outextn].data = outsci
out_sci_handle.close()
if not util.is_blank(configObj['outweight']):
out_wht_handle,outwhtext = create_output(configObj['outweight'])
out_wht_handle[outwhtext].header = out_sci_handle[outextn].header.copy()
out_wht_handle[outwhtext].data = outwht
out_wht_handle.close()
if keep_con:
out_con_handle,outconext = create_output(configObj['outcontext'])
out_con_handle[outconext].data = outcon
out_con_handle.close()
|
Interface for running `wdrizzle` from TEAL or Python command-line.
This code performs all file ``I/O`` to set up the use of the drizzle code for
a single exposure to replicate the functionality of the original `wdrizzle`.
|
entailment
|
def mergeDQarray(maskname,dqarr):
""" Merge static or CR mask with mask created from DQ array on-the-fly here.
"""
maskarr = None
if maskname is not None:
if isinstance(maskname, str):
# working with file on disk (default case)
if os.path.exists(maskname):
mask = fileutil.openImage(maskname, memmap=False)
maskarr = mask[0].data.astype(np.bool)
mask.close()
else:
if isinstance(maskname, fits.HDUList):
# working with a virtual input file
maskarr = maskname[0].data.astype(np.bool)
else:
maskarr = maskname.data.astype(np.bool)
if maskarr is not None:
# merge array with dqarr now
np.bitwise_and(dqarr,maskarr,dqarr)
|
Merge static or CR mask with mask created from DQ array on-the-fly here.
|
entailment
|
def _setDefaults(configObj={}):
"""set up the default parameters to run drizzle
build,single,units,wt_scl,pixfrac,kernel,fillval,
rot,scale,xsh,ysh,blotnx,blotny,outnx,outny,data
Used exclusively for unit-testing, if any are defined.
"""
paramDict={"build":True,
"single":True,
"stepsize":10,
"in_units":"cps",
"wt_scl":1.,
"pixfrac":1.,
"kernel":"square",
"fillval":999.,
"maskval": None,
"rot":0.,
"scale":1.,
"xsh":0.,
"ysh":0.,
"blotnx":2048,
"blotny":2048,
"outnx":4096,
"outny":4096,
"data":None,
"driz_separate":True,
"driz_combine":False}
if(len(configObj) !=0):
for key in configObj.keys():
paramDict[key]=configObj[key]
return paramDict
|
set up the default parameters to run drizzle
build,single,units,wt_scl,pixfrac,kernel,fillval,
rot,scale,xsh,ysh,blotnx,blotny,outnx,outny,data
Used exclusively for unit-testing, if any are defined.
|
entailment
|
def interpret_maskval(paramDict):
""" Apply logic for interpreting final_maskval value...
"""
# interpret user specified final_maskval value to use for initializing
# output SCI array...
if 'maskval' not in paramDict:
return 0
maskval = paramDict['maskval']
if maskval is None:
maskval = np.nan
else:
maskval = float(maskval) # just to be clear and absolutely sure...
return maskval
|
Apply logic for interpreting final_maskval value...
|
entailment
|
def run_driz(imageObjectList,output_wcs,paramDict,single,build,wcsmap=None):
""" Perform drizzle operation on input to create output.
The input parameters originally was a list
of dictionaries, one for each input, that matches the
primary parameters for an ``IRAF`` `drizzle` task.
This method would then loop over all the entries in the
list and run `drizzle` for each entry.
Parameters required for input in paramDict:
build,single,units,wt_scl,pixfrac,kernel,fillval,
rot,scale,xsh,ysh,blotnx,blotny,outnx,outny,data
"""
# Insure that input imageObject is a list
if not isinstance(imageObjectList, list):
imageObjectList = [imageObjectList]
#
# Setup the versions info dictionary for output to PRIMARY header
# The keys will be used as the name reported in the header, as-is
#
_versions = {'AstroDrizzle':__version__,
'PyFITS':util.__fits_version__,
'Numpy':util.__numpy_version__}
# Set sub-sampling rate for drizzling
#stepsize = 2.0
log.info(' **Using sub-sampling value of %s for kernel %s' %
(paramDict['stepsize'], paramDict['kernel']))
maskval = interpret_maskval(paramDict)
outwcs = copy.deepcopy(output_wcs)
# Check for existance of output file.
if (not single and build and
fileutil.findFile(imageObjectList[0].outputNames['outFinal'])):
log.info('Removing previous output product...')
os.remove(imageObjectList[0].outputNames['outFinal'])
# print out parameters being used for drizzling
log.info("Running Drizzle to create output frame with WCS of: ")
output_wcs.printwcs()
# Will we be running in parallel?
pool_size = util.get_pool_size(paramDict.get('num_cores'), len(imageObjectList))
will_parallel = single and pool_size > 1
if will_parallel:
log.info('Executing %d parallel workers' % pool_size)
else:
if single: # not yet an option for final drizzle, msg would confuse
log.info('Executing serially')
# Set parameters for each input and run drizzle on it here.
#
# Perform drizzling...
numctx = 0
for img in imageObjectList:
numctx += img._nmembers
_numctx = {'all':numctx}
# if single:
# Determine how many chips make up each single image
for img in imageObjectList:
for chip in img.returnAllChips(extname=img.scienceExt):
plsingle = chip.outputNames['outSingle']
if plsingle in _numctx: _numctx[plsingle] += 1
else: _numctx[plsingle] = 1
# Compute how many planes will be needed for the context image.
_nplanes = int((_numctx['all']-1) / 32) + 1
# For single drizzling or when context is turned off,
# minimize to 1 plane only...
if single or imageObjectList[0][1].outputNames['outContext'] in [None,'',' ']:
_nplanes = 1
#
# An image buffer needs to be setup for converting the input
# arrays (sci and wht) from FITS format to native format
# with respect to byteorder and byteswapping.
# This buffer should be reused for each input if possible.
#
_outsci = _outwht = _outctx = _hdrlist = None
if (not single) or \
(single and (not will_parallel) and (not imageObjectList[0].inmemory)):
# Note there are four cases/combinations for single drizzle alone here:
# (not-inmem, serial), (not-inmem, parallel), (inmem, serial), (inmem, parallel)
_outsci=np.empty(output_wcs.array_shape, dtype=np.float32)
_outsci.fill(maskval)
_outwht=np.zeros(output_wcs.array_shape, dtype=np.float32)
# initialize context to 3-D array but only pass appropriate plane to drizzle as needed
_outctx=np.zeros((_nplanes,) + output_wcs.array_shape, dtype=np.int32)
_hdrlist = []
# Keep track of how many chips have been processed
# For single case, this will determine when to close
# one product and open the next.
_chipIdx = 0
# Remember the name of the 1st image that goes into this particular product
# Insure that the header reports the proper values for the start of the
# exposure time used to make this; in particular, TIME-OBS and DATE-OBS.
template = None
#
# Work on each image
#
subprocs = []
for img in imageObjectList:
chiplist = img.returnAllChips(extname=img.scienceExt)
# How many inputs should go into this product?
num_in_prod = _numctx['all']
if single:
num_in_prod = _numctx[chiplist[0].outputNames['outSingle']]
# The name of the 1st image
fnames = []
for chip in chiplist:
fnames.append(chip.outputNames['data'])
if _chipIdx == 0:
template = fnames
else:
template.extend(fnames)
# Work each image, possibly in parallel
if will_parallel:
# use multiprocessing.Manager only if in parallel and in memory
if img.inmemory:
manager = multiprocessing.Manager()
dproxy = manager.dict(img.virtualOutputs) # copy & wrap it in proxy
img.virtualOutputs = dproxy
# parallelize run_driz_img (currently for separate drizzle only)
p = multiprocessing.Process(target=run_driz_img,
name='adrizzle.run_driz_img()', # for err msgs
args=(img,chiplist,output_wcs,outwcs,template,paramDict,
single,num_in_prod,build,_versions,_numctx,_nplanes,
_chipIdx,None,None,None,None,wcsmap))
subprocs.append(p)
else:
# serial run_driz_img run (either separate drizzle or final drizzle)
run_driz_img(img,chiplist,output_wcs,outwcs,template,paramDict,
single,num_in_prod,build,_versions,_numctx,_nplanes,
_chipIdx,_outsci,_outwht,_outctx,_hdrlist,wcsmap)
# Increment/reset master chip counter
_chipIdx += len(chiplist)
if _chipIdx == num_in_prod:
_chipIdx = 0
# do the join if we spawned tasks
if will_parallel:
mputil.launch_and_wait(subprocs, pool_size) # blocks till all done
del _outsci,_outwht,_outctx,_hdrlist
|
Perform drizzle operation on input to create output.
The input parameters originally was a list
of dictionaries, one for each input, that matches the
primary parameters for an ``IRAF`` `drizzle` task.
This method would then loop over all the entries in the
list and run `drizzle` for each entry.
Parameters required for input in paramDict:
build,single,units,wt_scl,pixfrac,kernel,fillval,
rot,scale,xsh,ysh,blotnx,blotny,outnx,outny,data
|
entailment
|
def run_driz_img(img,chiplist,output_wcs,outwcs,template,paramDict,single,
num_in_prod,build,_versions,_numctx,_nplanes,chipIdxCopy,
_outsci,_outwht,_outctx,_hdrlist,wcsmap):
""" Perform the drizzle operation on a single image.
This is separated out from :py:func:`run_driz` so as to keep together
the entirety of the code which is inside the loop over
images. See the :py:func:`run_driz` code for more documentation.
"""
maskval = interpret_maskval(paramDict)
# Check for unintialized inputs
here = _outsci is None and _outwht is None and _outctx is None
if _outsci is None:
_outsci=np.empty(output_wcs.array_shape, dtype=np.float32)
if single:
_outsci.fill(0)
else:
_outsci.fill(maskval)
if _outwht is None:
_outwht=np.zeros(output_wcs.array_shape, dtype=np.float32)
if _outctx is None:
_outctx = np.zeros((_nplanes,) + output_wcs.array_shape, dtype=np.int32)
if _hdrlist is None:
_hdrlist = []
# Work on each chip - note that they share access to the arrays above
for chip in chiplist:
# See if we will be writing out data
doWrite = chipIdxCopy == num_in_prod-1
# debuglog('#chips='+str(chipIdxCopy)+', num_in_prod='+\
# str(num_in_prod)+', single='+str(single)+', write='+\
# str(doWrite)+', here='+str(here))
# run_driz_chip
run_driz_chip(img,chip,output_wcs,outwcs,template,paramDict,
single,doWrite,build,_versions,_numctx,_nplanes,
chipIdxCopy,_outsci,_outwht,_outctx,_hdrlist,wcsmap)
# Increment chip counter (also done outside of this function)
chipIdxCopy += 1
#
# Reset for next output image...
#
if here:
del _outsci,_outwht,_outctx,_hdrlist
elif single:
np.multiply(_outsci,0.,_outsci)
np.multiply(_outwht,0.,_outwht)
np.multiply(_outctx,0,_outctx)
# this was "_hdrlist=[]", but we need to preserve the var ptr itself
while len(_hdrlist)>0: _hdrlist.pop()
|
Perform the drizzle operation on a single image.
This is separated out from :py:func:`run_driz` so as to keep together
the entirety of the code which is inside the loop over
images. See the :py:func:`run_driz` code for more documentation.
|
entailment
|
def run_driz_chip(img,chip,output_wcs,outwcs,template,paramDict,single,
doWrite,build,_versions,_numctx,_nplanes,_numchips,
_outsci,_outwht,_outctx,_hdrlist,wcsmap):
""" Perform the drizzle operation on a single chip.
This is separated out from `run_driz_img` so as to keep together
the entirety of the code which is inside the loop over
chips. See the `run_driz` code for more documentation.
"""
global time_pre_all, time_driz_all, time_post_all, time_write_all
epoch = time.time()
# Look for sky-subtracted product
if os.path.exists(chip.outputNames['outSky']):
chipextn = '['+chip.header['extname']+','+str(chip.header['extver'])+']'
_expname = chip.outputNames['outSky']+chipextn
else:
# If sky-subtracted product does not exist, use regular input
_expname = chip.outputNames['data']
log.info('-Drizzle input: %s' % _expname)
# Open the SCI image
_handle = fileutil.openImage(_expname, mode='readonly', memmap=False)
_sciext = _handle[chip.header['extname'],chip.header['extver']]
# Apply sky subtraction and unit conversion to input array
if chip.computedSky is None:
_insci = _sciext.data
else:
log.info("Applying sky value of %0.6f to %s"%(chip.computedSky,_expname))
_insci = _sciext.data - chip.computedSky
# If input SCI image is still integer format (RAW files)
# transform it to float32 for all subsequent operations
# needed for numpy >=1.12.x
if np.issubdtype(_insci[0,0],np.int16):
_insci = _insci.astype(np.float32)
_insci *= chip._effGain
# Set additional parameters needed by 'drizzle'
_in_units = chip.in_units.lower()
if _in_units == 'cps':
_expin = 1.0
else:
_expin = chip._exptime
####
#
# Put the units keyword handling in the imageObject class
#
####
# Determine output value of BUNITS
# and make sure it is not specified as 'ergs/cm...'
_bunit = chip._bunit
_bindx = _bunit.find('/')
if paramDict['units'] == 'cps':
# If BUNIT value does not specify count rate already...
if _bindx < 1:
# ... append '/SEC' to value
_bunit += '/S'
else:
# reset _bunit here to None so it does not
# overwrite what is already in header
_bunit = None
else:
if _bindx > 0:
# remove '/S'
_bunit = _bunit[:_bindx]
else:
# reset _bunit here to None so it does not
# overwrite what is already in header
_bunit = None
_uniqid = _numchips + 1
if _nplanes == 1:
# We need to reset what gets passed to TDRIZ
# when only 1 context image plane gets generated
# to prevent overflow problems with trying to access
# planes that weren't created for large numbers of inputs.
_uniqid = ((_uniqid-1) % 32) + 1
# Select which mask needs to be read in for drizzling
####
#
# Actually need to generate mask file here 'on-demand'
# and combine it with the static_mask for single_drizzle case...
#
####
# Build basic DQMask from DQ array and bits value
dqarr = img.buildMask(chip._chip,bits=paramDict['bits'])
# get correct mask filenames/objects
staticMaskName = chip.outputNames['staticMask']
crMaskName = chip.outputNames['crmaskImage']
if img.inmemory:
if staticMaskName in img.virtualOutputs:
staticMaskName = img.virtualOutputs[staticMaskName]
if crMaskName in img.virtualOutputs:
crMaskName = img.virtualOutputs[crMaskName]
# Merge appropriate additional mask(s) with DQ mask
if single:
mergeDQarray(staticMaskName,dqarr)
if dqarr.sum() == 0:
log.warning('All pixels masked out when applying static mask!')
else:
mergeDQarray(staticMaskName,dqarr)
if dqarr.sum() == 0:
log.warning('All pixels masked out when applying static mask!')
else:
# Only apply cosmic-ray mask when some good pixels remain after
# applying the static mask
mergeDQarray(crMaskName,dqarr)
if dqarr.sum() == 0:
log.warning('WARNING: All pixels masked out when applying '
'cosmic ray mask to %s' % _expname)
updateInputDQArray(chip.dqfile,chip.dq_extn,chip._chip,
crMaskName, paramDict['crbit'])
img.set_wtscl(chip._chip,paramDict['wt_scl'])
pix_ratio = outwcs.pscale / chip.wcslin_pscale
# Convert mask to a datatype expected by 'tdriz'
# Also, base weight mask on ERR or IVM file as requested by user
wht_type = paramDict['wht_type']
if wht_type == 'ERR':
_inwht = img.buildERRmask(chip._chip,dqarr,pix_ratio)
elif wht_type == 'IVM':
_inwht = img.buildIVMmask(chip._chip,dqarr,pix_ratio)
elif wht_type == 'EXP':
_inwht = img.buildEXPmask(chip._chip,dqarr)
else: # wht_type == None, used for single drizzle images
_inwht = chip._exptime * dqarr.astype(np.float32)
if not(paramDict['clean']):
# Write out mask file if 'clean' has been turned off
if single:
step_mask = 'singleDrizMask'
else:
step_mask = 'finalMask'
_outmaskname = chip.outputNames[step_mask]
if os.path.exists(_outmaskname): os.remove(_outmaskname)
pimg = fits.PrimaryHDU(data=_inwht)
img.saveVirtualOutputs({step_mask:pimg})
# Only write out mask files if in_memory=False
if not img.inmemory:
pimg.writeto(_outmaskname)
del pimg
log.info('Writing out mask file: %s' % _outmaskname)
time_pre = time.time() - epoch; epoch = time.time()
# New interface to performing the drizzle operation on a single chip/image
_vers = do_driz(_insci, chip.wcs, _inwht, outwcs, _outsci, _outwht, _outctx,
_expin, _in_units, chip._wtscl,
wcslin_pscale=chip.wcslin_pscale, uniqid=_uniqid,
pixfrac=paramDict['pixfrac'], kernel=paramDict['kernel'],
fillval=paramDict['fillval'], stepsize=paramDict['stepsize'],
wcsmap=wcsmap)
time_driz = time.time() - epoch; epoch = time.time()
# Set up information for generating output FITS image
#### Check to see what names need to be included here for use in _hdrlist
chip.outputNames['driz_version'] = _vers
chip.outputNames['driz_wcskey'] = paramDict['wcskey']
outputvals = chip.outputNames.copy()
# Update entries for names/values based on final output
outputvals.update(img.outputValues)
for kw in img.outputNames:
if kw[:3] == 'out':
outputvals[kw] = img.outputNames[kw]
outputvals['exptime'] = chip._exptime
outputvals['expstart'] = chip._expstart
outputvals['expend'] = chip._expend
outputvals['wt_scl_val'] = chip._wtscl
_hdrlist.append(outputvals)
time_post = time.time() - epoch; epoch = time.time()
if doWrite:
###########################
#
# IMPLEMENTATION REQUIREMENT:
#
# Need to implement scaling of the output image
# from 'cps' to 'counts' in the case where 'units'
# was set to 'counts'... 21-Mar-2005
#
###########################
# Convert output data from electrons/sec to counts/sec as specified
native_units = img.native_units
if paramDict['proc_unit'].lower() == 'native' and native_units.lower()[:6] == 'counts':
np.divide(_outsci, chip._gain, _outsci)
_bunit = native_units.lower()
if paramDict['units'] == 'counts':
indx = _bunit.find('/')
if indx > 0: _bunit = _bunit[:indx]
# record IDCSCALE for output to product header
paramDict['idcscale'] = chip.wcs.idcscale
#If output units were set to 'counts', rescale the array in-place
if paramDict['units'] == 'counts':
#determine what exposure time needs to be used
# to rescale the product.
if single:
_expscale = chip._exptime
else:
_expscale = img.outputValues['texptime']
np.multiply(_outsci, _expscale, _outsci)
#
# Write output arrays to FITS file(s)
#
if not single:
img.inmemory = False
_outimg = outputimage.OutputImage(_hdrlist, paramDict, build=build,
wcs=output_wcs, single=single)
_outimg.set_bunit(_bunit)
_outimg.set_units(paramDict['units'])
outimgs = _outimg.writeFITS(template,_outsci,_outwht,ctxarr=_outctx,
versions=_versions,virtual=img.inmemory)
del _outimg
# update imageObject with product in memory
if single:
img.saveVirtualOutputs(outimgs)
# this is after the doWrite
time_write = time.time() - epoch; epoch = time.time()
if False and not single: # turn off all this perf reporting for now
time_pre_all.append(time_pre)
time_driz_all.append(time_driz)
time_post_all.append(time_post)
time_write_all.append(time_write)
log.info('chip time pre-drizzling: %6.3f' % time_pre)
log.info('chip time drizzling: %6.3f' % time_driz)
log.info('chip time post-drizzling: %6.3f' % time_post)
log.info('chip time writing output: %6.3f' % time_write)
if doWrite:
tot_pre = sum(time_pre_all)
tot_driz = sum(time_driz_all)
tot_post = sum(time_post_all)
tot_write = sum(time_write_all)
tot = tot_pre+tot_driz+tot_post+tot_write
log.info('chip total pre-drizzling: %6.3f (%4.1f%%)' % (tot_pre, (100.*tot_pre/tot)))
log.info('chip total drizzling: %6.3f (%4.1f%%)' % (tot_driz, (100.*tot_driz/tot)))
log.info('chip total post-drizzling: %6.3f (%4.1f%%)' % (tot_post, (100.*tot_post/tot)))
log.info('chip total writing output: %6.3f (%4.1f%%)' % (tot_write, (100.*tot_write/tot)))
|
Perform the drizzle operation on a single chip.
This is separated out from `run_driz_img` so as to keep together
the entirety of the code which is inside the loop over
chips. See the `run_driz` code for more documentation.
|
entailment
|
def do_driz(insci, input_wcs, inwht,
output_wcs, outsci, outwht, outcon,
expin, in_units, wt_scl,
wcslin_pscale=1.0,uniqid=1, pixfrac=1.0, kernel='square',
fillval="INDEF", stepsize=10,wcsmap=None):
"""
Core routine for performing 'drizzle' operation on a single input image
All input values will be Python objects such as ndarrays, instead
of filenames.
File handling (input and output) will be performed by calling routine.
"""
# Insure that the fillval parameter gets properly interpreted for use with tdriz
if util.is_blank(fillval):
fillval = 'INDEF'
else:
fillval = str(fillval)
if in_units == 'cps':
expscale = 1.0
else:
expscale = expin
# Compute what plane of the context image this input would
# correspond to:
planeid = int((uniqid-1) / 32)
# Check if the context image has this many planes
if outcon.ndim == 3:
nplanes = outcon.shape[0]
elif outcon.ndim == 2:
nplanes = 1
else:
nplanes = 0
if nplanes <= planeid:
raise IndexError("Not enough planes in drizzle context image")
# Alias context image to the requested plane if 3d
if outcon.ndim == 2:
outctx = outcon
else:
outctx = outcon[planeid]
pix_ratio = output_wcs.pscale/wcslin_pscale
if wcsmap is None and cdriz is not None:
log.info('Using WCSLIB-based coordinate transformation...')
log.info('stepsize = %s' % stepsize)
mapping = cdriz.DefaultWCSMapping(
input_wcs, output_wcs,
input_wcs.pixel_shape[0], input_wcs.pixel_shape[1],
stepsize
)
else:
#
##Using the Python class for the WCS-based transformation
#
# Use user provided mapping function
log.info('Using coordinate transformation defined by user...')
if wcsmap is None:
wcsmap = wcs_functions.WCSMap
wmap = wcsmap(input_wcs,output_wcs)
mapping = wmap.forward
_shift_fr = 'output'
_shift_un = 'output'
ystart = 0
nmiss = 0
nskip = 0
#
# This call to 'cdriz.tdriz' uses the new C syntax
#
_dny = insci.shape[0]
# Call 'drizzle' to perform image combination
if insci.dtype > np.float32:
#WARNING: Input array recast as a float32 array
insci = insci.astype(np.float32)
_vers,nmiss,nskip = cdriz.tdriz(insci, inwht, outsci, outwht,
outctx, uniqid, ystart, 1, 1, _dny,
pix_ratio, 1.0, 1.0, 'center', pixfrac,
kernel, in_units, expscale, wt_scl,
fillval, nmiss, nskip, 1, mapping)
if nmiss > 0:
log.warning('! %s points were outside the output image.' % nmiss)
if nskip > 0:
log.debug('! Note, %s input lines were skipped completely.' % nskip)
return _vers
|
Core routine for performing 'drizzle' operation on a single input image
All input values will be Python objects such as ndarrays, instead
of filenames.
File handling (input and output) will be performed by calling routine.
|
entailment
|
def _regwrite(shapelist,outfile):
""" Writes the current shape list out as a region file """
# This function corrects bugs and provides improvements over the pyregion's
# ShapeList.write method in the following:
#
# 1. ShapeList.write crashes if regions have no comments;
# 2. ShapeList.write converts 'exclude' ("-") regions to normal regions ("+");
# 3. ShapeList.write does not support mixed coordinate systems in a
# region list.
#
# NOTE: This function is provided as a temoprary workaround for the above
# listed problems of the ShapeList.write. We hope that a future version
# of pyregion will address all these issues.
#
#TODO: Push these changes to pyregion.
if len(shapelist) < 1:
_print_warning("The region list is empty. The region file \"%s\" "\
"will be empty." % outfile)
try:
outf = open(outfile,'w')
outf.close()
return
except IOError as e:
cmsg = "Unable to create region file \'%s\'." % outfile
if e.args:
e.args = (e.args[0] + "\n" + cmsg,) + e.args[1:]
else:
e.args=(cmsg,)
raise e
except:
raise
prev_cs = shapelist[0].coord_format
outf = None
try:
outf = open(outfile,'w')
attr0 = shapelist[0].attr[1]
defaultline = " ".join(["%s=%s" % (a,attr0[a]) for a in attr0 \
if a!='text'])
# first line is globals
print("global", defaultline, file=outf)
# second line must be a coordinate format
print(prev_cs, file=outf)
for shape in shapelist:
shape_attr = '' if prev_cs == shape.coord_format \
else shape.coord_format+"; "
shape_excl = '-' if shape.exclude else ''
text_coordlist = ["%f" % f for f in shape.coord_list]
shape_coords = "(" + ",".join(text_coordlist) + ")"
shape_comment = " # " + shape.comment if shape.comment else ''
shape_str = shape_attr + shape_excl + shape.name + shape_coords + \
shape_comment
print(shape_str, file=outf)
except IOError as e:
cmsg = "Unable to create region file \'%s\'." % outfile
if e.args:
e.args = (e.args[0] + "\n" + cmsg,) + e.args[1:]
else:
e.args=(cmsg,)
if outf: outf.close()
raise e
except:
if outf: outf.close()
raise
outf.close()
|
Writes the current shape list out as a region file
|
entailment
|
def _needs_ref_WCS(reglist):
""" Check if the region list contains shapes in image-like coordinates
"""
from pyregion.wcs_helper import image_like_coordformats
for r in reglist:
if r.coord_format in image_like_coordformats:
return True
return False
|
Check if the region list contains shapes in image-like coordinates
|
entailment
|
def extension_from_filename(filename):
"""
Parse out filename from any specified extensions.
Returns rootname and string version of extension name.
"""
# Parse out any extension specified in filename
_indx1 = filename.find('[')
_indx2 = filename.find(']')
if _indx1 > 0:
# check for closing square bracket:
if _indx2 < _indx1:
raise RuntimeError("Incorrect extension specification in file " \
"name \'%s\'." % filename)
# Read extension name provided
_fname = filename[:_indx1]
_extn = filename[_indx1+1:_indx2].strip()
else:
_fname = filename
_extn = None
return _fname, _extn
|
Parse out filename from any specified extensions.
Returns rootname and string version of extension name.
|
entailment
|
def count_extensions(img, extname='SCI'):
""" Return the number of 'extname' extensions. 'img' can be either a file
name, an HDU List object (from fits), or None (to get the number of all
HDU headers.
"""
if isinstance(img, str):
img = fits.open(img, memmap=False)
img.close()
elif not isinstance(img, fits.HDUList):
raise TypeError("Argument 'img' must be either a file name (string) " \
"or a `astropy.io.fits.HDUList` object.")
if extname is None:
return len(img)
if not isinstance(extname, str):
raise TypeError("Argument 'extname' must be either a string " \
"indicating the value of the 'EXTNAME' keyword of the extensions " \
"to be counted or None to return the count of all HDUs in the " \
"'img' FITS file.")
extname = extname.upper()
n = 0
for e in img:
#if isinstance(e, fits.ImageHDU): continue
if 'EXTNAME' in list(map(str.upper, list(e.header.keys()))) \
and e.header['extname'].upper() == extname:
n += 1
return n
|
Return the number of 'extname' extensions. 'img' can be either a file
name, an HDU List object (from fits), or None (to get the number of all
HDU headers.
|
entailment
|
def get_extver_list(img, extname='SCI'):
""" Return a list of all extension versions of 'extname' extensions.
'img' can be either a file name or a HDU List object (from fits).
"""
if isinstance(img, str):
img = fits.open(img, memmap=False)
img.close()
elif not isinstance(img, fits.HDUList):
raise TypeError("Argument 'img' must be either a file name (string) " \
"or a fits.HDUList object.")
# when extver is None - return the range of all FITS extensions
if extname is None:
extver = list(range(len(img)))
return extver
if not isinstance(extname, str):
raise TypeError("Argument 'extname' must be either a string " \
"indicating the value of the 'EXTNAME' keyword of the extensions " \
"whose versions are to be returned or None to return " \
"extension numbers of all HDUs in the 'img' FITS file.")
extname = extname.upper()
extver = []
for e in img:
#if not isinstance(e, fits.ImageHDU): continue
hkeys = list(map(str.upper, list(e.header.keys())))
if 'EXTNAME' in hkeys and e.header['EXTNAME'].upper() == extname:
extver.append(e.header['EXTVER'] if 'EXTVER' in hkeys else 1)
return extver
|
Return a list of all extension versions of 'extname' extensions.
'img' can be either a file name or a HDU List object (from fits).
|
entailment
|
def _check_FITS_extvers(img, extname, extvers):
"""Returns True if all (except None) extension versions specified by the
argument 'extvers' and that are of the type specified by the argument
'extname' are present in the 'img' FITS file. Returns False if some of the
extension versions for a given EXTNAME cannot be found in the FITS image.
"""
default_extn = 1 if isinstance(extname, str) else 0
if isinstance(extvers, list):
extv = [default_extn if ext is None else ext for ext in extvers]
else:
extv = [default_extn if extvers is None else extvers]
extv_in_fits = get_extver_list(img, extname)
return set(extv).issubset(set(extv_in_fits))
|
Returns True if all (except None) extension versions specified by the
argument 'extvers' and that are of the type specified by the argument
'extname' are present in the 'img' FITS file. Returns False if some of the
extension versions for a given EXTNAME cannot be found in the FITS image.
|
entailment
|
def run_generator(product_category,obs_info):
"""
This is the main calling subroutine. It decides which filename generation subroutine should be run based on the
input product_category, and then passes the information stored in input obs_info to the subroutine so that the
appropriate filenames can be generated.
Parameters
----------
product_category : string
The type of final output product which filenames will be generated for
obs_info : string
A string containing space-separated items that will be used to
generate the filenames.
Returns
--------
product_filename_dict : dictionary
A dictionary containing the generated filenames.
"""
category_generator_mapping = {'single exposure product': single_exposure_product_filename_generator,
'filter product': filter_product_filename_generator,
'total detection product': total_detection_product_filename_generator,
'multivisit mosaic product': multivisit_mosaic_product_filename_generator}
# Determine which name generator to use based on input product_category
for key in category_generator_mapping.keys():
if product_category.startswith(key):
generator_name = category_generator_mapping[key]
category_num = product_category.replace(key+" ","")
break
# parse out obs_info into a list
obs_info = obs_info.split(" ")
# pad 4-character proposal_id values with leading 0s so that proposal_id is
# a 5-character sting.
if key != "multivisit mosaic product": # pad
obs_info[0] = "{}{}".format("0"*(5-len(obs_info[0])),obs_info[0])
# generate and return filenames
product_filename_dict=generator_name(obs_info,category_num)
return(product_filename_dict)
|
This is the main calling subroutine. It decides which filename generation subroutine should be run based on the
input product_category, and then passes the information stored in input obs_info to the subroutine so that the
appropriate filenames can be generated.
Parameters
----------
product_category : string
The type of final output product which filenames will be generated for
obs_info : string
A string containing space-separated items that will be used to
generate the filenames.
Returns
--------
product_filename_dict : dictionary
A dictionary containing the generated filenames.
|
entailment
|
def single_exposure_product_filename_generator(obs_info,nn):
"""
Generate image and sourcelist filenames for single-exposure products
Parameters
----------
obs_info : list
list of items that will be used to generate the filenames: proposal_id,
visit_id, instrument, detector, filter, and ipppssoot
nn : string
the single-exposure image number (NOTE: only used in
single_exposure_product_filename_generator())
Returns
--------
product_filename_dict : dictionary
A dictionary containing the generated filenames.
"""
proposal_id = obs_info[0]
visit_id = obs_info[1]
instrument = obs_info[2]
detector = obs_info[3]
filter = obs_info[4]
ipppssoot = obs_info[5]
product_filename_dict = {}
product_filename_dict["image"] = "hst_{}_{}_{}_{}_{}_{}_{}.fits".format(proposal_id,visit_id,instrument,detector,filter,ipppssoot,nn)
product_filename_dict["source catalog"]= product_filename_dict["image"].replace(".fits",".cat")
return(product_filename_dict)
|
Generate image and sourcelist filenames for single-exposure products
Parameters
----------
obs_info : list
list of items that will be used to generate the filenames: proposal_id,
visit_id, instrument, detector, filter, and ipppssoot
nn : string
the single-exposure image number (NOTE: only used in
single_exposure_product_filename_generator())
Returns
--------
product_filename_dict : dictionary
A dictionary containing the generated filenames.
|
entailment
|
def filter_product_filename_generator(obs_info,nn):
"""
Generate image and sourcelist filenames for filter products
Parameters
----------
obs_info : list
list of items that will be used to generate the filenames: proposal_id,
visit_id, instrument, detector, and filter
nn : string
the single-exposure image number (NOTE: only used in
single_exposure_product_filename_generator())
Returns
--------
product_filename_dict : dictionary
A dictionary containing the generated filenames.
"""
proposal_id = obs_info[0]
visit_id = obs_info[1]
instrument = obs_info[2]
detector = obs_info[3]
filter = obs_info[4]
product_filename_dict = {}
product_filename_dict["image"] = "hst_{}_{}_{}_{}_{}.fits".format(proposal_id,visit_id,instrument,detector,filter)
product_filename_dict["source catalog"] = product_filename_dict["image"].replace(".fits",".cat")
return(product_filename_dict)
|
Generate image and sourcelist filenames for filter products
Parameters
----------
obs_info : list
list of items that will be used to generate the filenames: proposal_id,
visit_id, instrument, detector, and filter
nn : string
the single-exposure image number (NOTE: only used in
single_exposure_product_filename_generator())
Returns
--------
product_filename_dict : dictionary
A dictionary containing the generated filenames.
|
entailment
|
def total_detection_product_filename_generator(obs_info,nn):
"""
Generate image and sourcelist filenames for total detection products
Parameters
----------
obs_info : list
list of items that will be used to generate the filenames: proposal_id,
visit_id, instrument, and detector
nn : string
the single-exposure image number (NOTE: only used in
single_exposure_product_filename_generator())
Returns
--------
product_filename_dict : dictionary
A dictionary containing the generated filenames.
"""
proposal_id = obs_info[0]
visit_id = obs_info[1]
instrument = obs_info[2]
detector = obs_info[3]
product_filename_dict = {}
product_filename_dict["image"] = "hst_{}_{}_{}_{}.fits".format(proposal_id, visit_id, instrument, detector)
product_filename_dict["source catalog"] = product_filename_dict["image"].replace(".fits",".cat")
return (product_filename_dict)
|
Generate image and sourcelist filenames for total detection products
Parameters
----------
obs_info : list
list of items that will be used to generate the filenames: proposal_id,
visit_id, instrument, and detector
nn : string
the single-exposure image number (NOTE: only used in
single_exposure_product_filename_generator())
Returns
--------
product_filename_dict : dictionary
A dictionary containing the generated filenames.
|
entailment
|
def multivisit_mosaic_product_filename_generator(obs_info,nn):
"""
Generate image and sourcelist filenames for multi-visit mosaic products
Parameters
----------
obs_info : list
list of items that will be used to generate the filenames: group_id,
instrument, detector, and filter
nn : string
the single-exposure image number (NOTE: only used in
single_exposure_product_filename_generator())
Returns
--------
product_filename_dict : dictionary
A dictionary containing the generated filenames.
"""
group_num = obs_info[0]
instrument = obs_info[1]
detector = obs_info[2]
filter = obs_info[3]
product_filename_dict = {}
product_filename_dict["image"] = "hst_mos_{}_{}_{}_{}.fits".format(group_num,instrument,detector,filter)
product_filename_dict["source catalog"] = product_filename_dict["image"].replace(".fits",".cat")
return (product_filename_dict)
|
Generate image and sourcelist filenames for multi-visit mosaic products
Parameters
----------
obs_info : list
list of items that will be used to generate the filenames: group_id,
instrument, detector, and filter
nn : string
the single-exposure image number (NOTE: only used in
single_exposure_product_filename_generator())
Returns
--------
product_filename_dict : dictionary
A dictionary containing the generated filenames.
|
entailment
|
def build_referenceWCS(catalog_list):
""" Compute default reference WCS from list of Catalog objects.
"""
wcslist = []
for catalog in catalog_list:
for scichip in catalog.catalogs:
wcslist.append(catalog.catalogs[scichip]['wcs'])
return utils.output_wcs(wcslist)
|
Compute default reference WCS from list of Catalog objects.
|
entailment
|
def convex_hull(points):
"""Computes the convex hull of a set of 2D points.
Implements `Andrew's monotone chain algorithm <http://en.wikibooks.org/wiki/Algorithm_Implementation/Geometry/Convex_hull/Monotone_chain>`_.
The algorithm has O(n log n) complexity.
Credit: `<http://en.wikibooks.org/wiki/Algorithm_Implementation/Geometry/Convex_hull/Monotone_chain>`_
Parameters
----------
points : list of tuples
An iterable sequence of (x, y) pairs representing the points.
Returns
-------
Output : list
A list of vertices of the convex hull in counter-clockwise order,
starting from the vertex with the lexicographically smallest
coordinates.
"""
# Sort the points lexicographically (tuples are compared lexicographically).
# Remove duplicates to detect the case we have just one unique point.
points = sorted(set(points))
# Boring case: no points or a single point, possibly repeated multiple times.
if len(points) <= 1:
return points
# 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.
# Returns a positive value, if OAB makes a counter-clockwise turn,
# negative for clockwise turn, and zero if the points are collinear.
def cross(o, a, b):
return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])
# Build lower hull
lower = []
for p in points:
while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:
lower.pop()
lower.append(p)
# Build upper hull
upper = []
for p in reversed(points):
while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:
upper.pop()
upper.append(p)
# Concatenation of the lower and upper hulls gives the convex hull.
# Last point of each list is omitted because it is repeated at the beginning of the other list.
return lower[:-1] + upper
|
Computes the convex hull of a set of 2D points.
Implements `Andrew's monotone chain algorithm <http://en.wikibooks.org/wiki/Algorithm_Implementation/Geometry/Convex_hull/Monotone_chain>`_.
The algorithm has O(n log n) complexity.
Credit: `<http://en.wikibooks.org/wiki/Algorithm_Implementation/Geometry/Convex_hull/Monotone_chain>`_
Parameters
----------
points : list of tuples
An iterable sequence of (x, y) pairs representing the points.
Returns
-------
Output : list
A list of vertices of the convex hull in counter-clockwise order,
starting from the vertex with the lexicographically smallest
coordinates.
|
entailment
|
def _estimate_2dhist_shift(imgxy, refxy, searchrad=3.0):
""" Create a 2D matrix-histogram which contains the delta between each
XY position and each UV position. Then estimate initial offset
between catalogs.
"""
print("Computing initial guess for X and Y shifts...")
# create ZP matrix
zpmat = _xy_2dhist(imgxy, refxy, r=searchrad)
nonzeros = np.count_nonzero(zpmat)
if nonzeros == 0:
# no matches within search radius. Return (0, 0):
print("WARNING: No matches found within a search radius of {:g} "
"pixels.".format(searchrad))
return 0.0, 0.0, 0, 0, zpmat, False
elif nonzeros == 1:
# only one non-zero bin:
yp, xp = np.unravel_index(np.argmax(zpmat), zpmat.shape)
maxval = int(np.ceil(zpmat[yp, xp]))
xp -= searchrad
yp -= searchrad
print("Found initial X and Y shifts of {:.4g}, {:.4g} "
"based on a single non-zero bin and {} matches"
.format(xp, yp, maxval))
return xp, yp, maxval, maxval, zpmat, True
(xp, yp), fit_status, fit_sl = _find_peak(zpmat, peak_fit_box=5,
mask=zpmat > 0)
if fit_status.startswith('ERROR'):
print("WARNING: No valid shift found within a search radius of {:g} "
"pixels.".format(searchrad))
maxval = int(np.ceil(zpmat.max()))
return 0.0, 0.0, maxval, maxval, zpmat, False
xp -= searchrad
yp -= searchrad
if fit_status == 'WARNING:EDGE':
print(
"WARNING: Found peak in the 2D histogram lies at the edge of "
"the histogram. Try increasing 'searchrad' for improved results."
)
# Attempt to estimate "significance of detection":
maxval = zpmat.max()
zpmat_mask = (zpmat > 0) & (zpmat < maxval)
if np.any(zpmat_mask):
bkg = zpmat[zpmat_mask].mean()
sig = maxval / np.sqrt(bkg)
flux = int(zpmat[fit_sl].sum())
print("Found initial X and Y shifts of {:.4g}, {:.4g} "
"with significance of {:.4g} and {:d} matches"
.format(xp, yp, sig, flux))
return xp, yp, int(np.ceil(maxval)), flux, zpmat, True
|
Create a 2D matrix-histogram which contains the delta between each
XY position and each UV position. Then estimate initial offset
between catalogs.
|
entailment
|
def _find_peak(data, peak_fit_box=5, mask=None):
"""
Find location of the peak in an array. This is done by fitting a second
degree 2D polynomial to the data within a `peak_fit_box` and computing the
location of its maximum. An initial
estimate of the position of the maximum will be performed by searching
for the location of the pixel/array element with the maximum value.
Parameters
----------
data : numpy.ndarray
2D data.
peak_fit_box : int, optional
Size (in pixels) of the box around the initial estimate of the maximum
to be used for quadratic fitting from which peak location is computed.
It is assumed that fitting box is a square with sides of length
given by ``peak_fit_box``.
mask : numpy.ndarray, optional
A boolean type `~numpy.ndarray` indicating "good" pixels in image data
(`True`) and "bad" pixels (`False`). If not provided all pixels
in `image_data` will be used for fitting.
Returns
-------
coord : tuple of float
A pair of coordinates of the peak.
fit_status : str
Status of the peak search. Currently the following values can be
returned:
- ``'SUCCESS'``: Fit was successful and peak is not on the edge of
the input array;
- ``'ERROR:NODATA'``: Not enough valid data to perform the fit; The
returned coordinate is the center of input array;
- ``'WARNING:EDGE'``: Peak lies on the edge of the input array.
Returned coordinates are the result of a discreet search;
- ``'WARNING:BADFIT'``: Performed fid did not find a maximum. Returned
coordinates are the result of a discreet search;
- ``'WARNING:CENTER-OF-MASS'``: Returned coordinates are the result
of a center-of-mass estimate instead of a polynomial fit. This is
either due to too few points to perform a fit or due to a
failure of the polynomial fit.
fit_box : a tuple of two tuples
A tuple of two tuples of the form ``((x1, x2), (y1, y2))`` that
indicates pixel ranges used for fitting (these indices can be used
directly for slicing input data)
"""
# check arguments:
data = np.asarray(data, dtype=np.float64)
ny, nx = data.shape
# find index of the pixel having maximum value:
if mask is None:
jmax, imax = np.unravel_index(np.argmax(data), data.shape)
coord = (float(imax), float(jmax))
else:
j, i = np.indices(data.shape)
i = i[mask]
j = j[mask]
if i.size == 0:
# no valid data:
coord = ((nx - 1.0) / 2.0, (ny - 1.0) / 2.0)
return coord, 'ERROR:NODATA', np.s_[0:ny-1, 0:nx-1]
ind = np.argmax(data[mask])
imax = i[ind]
jmax = j[ind]
coord = (float(imax), float(jmax))
if data[jmax, imax] < 1:
# no valid data: we need some counts in the histogram bins
coord = ((nx - 1.0) / 2.0, (ny - 1.0) / 2.0)
return coord, 'ERROR:NODATA', np.s_[0:ny-1, 0:nx-1]
# choose a box around maxval pixel:
x1 = max(0, imax - peak_fit_box // 2)
x2 = min(nx, x1 + peak_fit_box)
y1 = max(0, jmax - peak_fit_box // 2)
y2 = min(ny, y1 + peak_fit_box)
# if peak is at the edge of the box, return integer indices of the max:
if imax == x1 or imax == x2 or jmax == y1 or jmax == y2:
return (float(imax), float(jmax)), 'WARNING:EDGE', np.s_[y1:y2, x1:x2]
# expand the box if needed:
if (x2 - x1) < peak_fit_box:
if x1 == 0:
x2 = min(nx, x1 + peak_fit_box)
if x2 == nx:
x1 = max(0, x2 - peak_fit_box)
if (y2 - y1) < peak_fit_box:
if y1 == 0:
y2 = min(ny, y1 + peak_fit_box)
if y2 == ny:
y1 = max(0, y2 - peak_fit_box)
if x2 - x1 == 0 or y2 - y1 == 0:
# not enough data:
coord = ((nx - 1.0) / 2.0, (ny - 1.0) / 2.0)
return coord, 'ERROR:NODATA', np.s_[y1:y2, x1:x2]
# fit a 2D 2nd degree polynomial to data:
xi = np.arange(x1, x2)
yi = np.arange(y1, y2)
x, y = np.meshgrid(xi, yi)
x = x.ravel()
y = y.ravel()
v = np.vstack((np.ones_like(x), x, y, x*y, x*x, y*y)).T
d = data[y1:y2, x1:x2].ravel()
if mask is not None:
m = mask[y1:y2, x1:x2].ravel()
v = v[m]
d = d[m]
if d.size == 0 or np.max(d) <= 0:
coord = ((nx - 1.0) / 2.0, (ny - 1.0) / 2.0)
return coord, 'ERROR:NODATA', np.s_[y1:y2, x1:x2]
if d.size < 6:
# we need at least 6 points to fit a 2D quadratic polynomial
# attempt center-of-mass instead:
dt = d.sum()
xc = np.dot(v[:, 1], d) / dt
yc = np.dot(v[:, 2], d) / dt
return (xc, yc), 'WARNING:CENTER-OF-MASS', np.s_[y1:y2, x1:x2]
try:
c = np.linalg.lstsq(v, d, rcond=None)[0]
except np.linalg.LinAlgError:
print("WARNING: Least squares failed!\n{}".format(c))
# attempt center-of-mass instead:
dt = d.sum()
xc = np.dot(v[:, 1], d) / dt
yc = np.dot(v[:, 2], d) / dt
return (xc, yc), 'WARNING:CENTER-OF-MASS', np.s_[y1:y2, x1:x2]
# find maximum of the polynomial:
_, c10, c01, c11, c20, c02 = c
det = 4 * c02 * c20 - c11**2
if det <= 0 or ((c20 > 0.0 and c02 >= 0.0) or (c20 >= 0.0 and c02 > 0.0)):
# polynomial does not have max. return maximum value in the data:
return coord, 'WARNING:BADFIT', np.s_[y1:y2, x1:x2]
xm = (c01 * c11 - 2.0 * c02 * c10) / det
ym = (c10 * c11 - 2.0 * c01 * c20) / det
if 0.0 <= xm <= (nx - 1.0) and 0.0 <= ym <= (ny - 1.0):
coord = (xm, ym)
fit_status = 'SUCCESS'
else:
xm = 0.0 if xm < 0.0 else min(xm, nx - 1.0)
ym = 0.0 if ym < 0.0 else min(ym, ny - 1.0)
fit_status = 'WARNING:EDGE'
return coord, fit_status, np.s_[y1:y2, x1:x2]
|
Find location of the peak in an array. This is done by fitting a second
degree 2D polynomial to the data within a `peak_fit_box` and computing the
location of its maximum. An initial
estimate of the position of the maximum will be performed by searching
for the location of the pixel/array element with the maximum value.
Parameters
----------
data : numpy.ndarray
2D data.
peak_fit_box : int, optional
Size (in pixels) of the box around the initial estimate of the maximum
to be used for quadratic fitting from which peak location is computed.
It is assumed that fitting box is a square with sides of length
given by ``peak_fit_box``.
mask : numpy.ndarray, optional
A boolean type `~numpy.ndarray` indicating "good" pixels in image data
(`True`) and "bad" pixels (`False`). If not provided all pixels
in `image_data` will be used for fitting.
Returns
-------
coord : tuple of float
A pair of coordinates of the peak.
fit_status : str
Status of the peak search. Currently the following values can be
returned:
- ``'SUCCESS'``: Fit was successful and peak is not on the edge of
the input array;
- ``'ERROR:NODATA'``: Not enough valid data to perform the fit; The
returned coordinate is the center of input array;
- ``'WARNING:EDGE'``: Peak lies on the edge of the input array.
Returned coordinates are the result of a discreet search;
- ``'WARNING:BADFIT'``: Performed fid did not find a maximum. Returned
coordinates are the result of a discreet search;
- ``'WARNING:CENTER-OF-MASS'``: Returned coordinates are the result
of a center-of-mass estimate instead of a polynomial fit. This is
either due to too few points to perform a fit or due to a
failure of the polynomial fit.
fit_box : a tuple of two tuples
A tuple of two tuples of the form ``((x1, x2), (y1, y2))`` that
indicates pixel ranges used for fitting (these indices can be used
directly for slicing input data)
|
entailment
|
def openFile(self, openDQ=False):
""" Open file and set up filehandle for image file
"""
if self._im.closed:
if not self._dq.closed:
self._dq.release()
assert(self._dq.closed)
fi = FileExtMaskInfo(clobber=False,
doNotOpenDQ=not openDQ,
im_fmode=self.open_mode)
fi.image = self.name
self._im = fi.image
fi.append_ext(spu.get_ext_list(self._im, extname='SCI'))
fi.finalize()
self._im = fi.image
self._dq = fi.DQimage
self._imext = fi.fext
self._dqext = fi.dqext
|
Open file and set up filehandle for image file
|
entailment
|
def get_wcs(self):
""" Helper method to return a list of all the input WCS objects associated
with this image.
"""
wcslist = []
for chip in self.chip_catalogs:
wcslist.append(self.chip_catalogs[chip]['wcs'])
return wcslist
|
Helper method to return a list of all the input WCS objects associated
with this image.
|
entailment
|
def buildSkyCatalog(self):
""" Convert sky catalog for all chips into a single catalog for
the entire field-of-view of this image.
"""
self.all_radec = None
self.all_radec_orig = None
ralist = []
declist = []
fluxlist = []
idlist = []
for scichip in self.chip_catalogs:
skycat = self.chip_catalogs[scichip]['catalog'].radec
xycat = self.chip_catalogs[scichip]['catalog'].xypos
if skycat is not None:
ralist.append(skycat[0])
declist.append(skycat[1])
if xycat is not None and len(xycat) > 2:
fluxlist.append(xycat[2])
idlist.append(xycat[3])
elif len(skycat) > 2:
fluxlist.append(skycat[2])
idlist.append(skycat[3])
else:
fluxlist.append([999.0]*len(skycat[0]))
idlist.append(np.arange(len(skycat[0])))
self.all_radec = [np.concatenate(ralist),np.concatenate(declist),
np.concatenate(fluxlist),np.concatenate(idlist)]
self.all_radec_orig = copy.deepcopy(self.all_radec)
|
Convert sky catalog for all chips into a single catalog for
the entire field-of-view of this image.
|
entailment
|
def buildDefaultRefWCS(self):
""" Generate a default reference WCS for this image. """
self.default_refWCS = None
if self.use_wcs:
wcslist = []
for scichip in self.chip_catalogs:
wcslist.append(self.chip_catalogs[scichip]['wcs'])
self.default_refWCS = utils.output_wcs(wcslist)
|
Generate a default reference WCS for this image.
|
entailment
|
def transformToRef(self,ref_wcs,force=False):
""" Transform sky coords from ALL chips into X,Y coords in reference WCS.
"""
if not isinstance(ref_wcs, pywcs.WCS):
print(textutil.textbox('Reference WCS not a valid HSTWCS object'),
file=sys.stderr)
raise ValueError
# Need to concatenate catalogs from each input
if self.outxy is None or force:
outxy = ref_wcs.wcs_world2pix(self.all_radec[0],self.all_radec[1],self.origin)
# convert outxy list to a Nx2 array
self.outxy = np.column_stack([outxy[0][:,np.newaxis],outxy[1][:,np.newaxis]])
if self.pars['writecat']:
catname = self.rootname+"_refxy_catalog.coo"
self.write_outxy(catname)
self.catalog_names['ref_xy'] = catname
|
Transform sky coords from ALL chips into X,Y coords in reference WCS.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.