sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def qderiv(array): # TAKE THE ABSOLUTE DERIVATIVE OF A NUMARRY OBJECT
"""Take the absolute derivate of an image in memory."""
#Create 2 empty arrays in memory of the same dimensions as 'array'
tmpArray = np.zeros(array.shape,dtype=np.float64)
outArray = np.zeros(array.shape, dtype=np.float64)
# Get the length of an array side
(naxis1,naxis2) = array.shape
#print "The input image size is (",naxis1,",",naxis2,")."
#Main derivate loop:
#Shift images +/- 1 in Y.
for y in range(-1,2,2):
if y == -1:
#shift input image 1 pixel right
tmpArray[0:(naxis1-1),1:(naxis2-1)] = array[0:(naxis1-1),0:(naxis2-2)]
#print "Y shift = 1"
else:
#shift input image 1 pixel left
tmpArray[0:(naxis1-1),0:(naxis2-2)] = array[0:(naxis1-1),1:(naxis2-1)]
#print "Y shift = -1"
#print "call _absoluteSubtract()"
(tmpArray,outArray) = _absoluteSubtract(array,tmpArray,outArray)
#Shift images +/- 1 in X.
for x in range(-1,2,2):
if x == -1:
#shift input image 1 pixel right
tmpArray[1:(naxis1-1),0:(naxis2-1)] = array[0:(naxis1-2),0:(naxis2-1)]
#print "X shift = 1"
else:
#shift input image 1 pixel left
tmpArray[0:(naxis1-2),0:(naxis2-1)] = array[1:(naxis1-1),0:(naxis2-1)]
#print "X shift = -1"
#print "call _absoluteSubtract()"
(tmpArray,outArray) = _absoluteSubtract(array,tmpArray,outArray)
return outArray.astype(np.float32)
|
Take the absolute derivate of an image in memory.
|
entailment
|
def randomSelectFromCSV(tableName, numEntries, seedValue):
"""Function to extract random entries (lines) from a CSV file
Parameters
==========
tableName: str
Filename of the input master CSV file containing individual
images or association names, as well as observational
information regarding the images
numEntries : int
Number of entries/rows to extract from the master input CSV file
seedValue : int
Value used to initialize the random number generator for the
selection of random entries
Returns
=======
outputTable : object
Astropy Table object
"""
# Initialize the random number generator
seed(seedValue)
# Get the contents of the table
dataTable = Table.read(tableName, format='ascii.csv')
numRows = len(dataTable)
# Generate a sequence of integers the size of the table, and then
# obtain a random subset of the sequence with no duplicate selections
sequence = list(range(numRows))
subset = sample(sequence, numEntries)
# Extract the subset rows...
outputTable = dataTable[subset]
#outputTable = dataTable[0:numEntries]
# Returns the outputTable which is an Astropy Table object
return(outputTable)
|
Function to extract random entries (lines) from a CSV file
Parameters
==========
tableName: str
Filename of the input master CSV file containing individual
images or association names, as well as observational
information regarding the images
numEntries : int
Number of entries/rows to extract from the master input CSV file
seedValue : int
Value used to initialize the random number generator for the
selection of random entries
Returns
=======
outputTable : object
Astropy Table object
|
entailment
|
def get_hstwcs(filename,hdulist,extnum):
""" Return the HSTWCS object for a given chip. """
hdrwcs = wcsutil.HSTWCS(hdulist,ext=extnum)
hdrwcs.filename = filename
hdrwcs.expname = hdulist[extnum].header['expname']
hdrwcs.extver = hdulist[extnum].header['extver']
return hdrwcs
|
Return the HSTWCS object for a given chip.
|
entailment
|
def update_linCD(cdmat, delta_rot=0.0, delta_scale=1.0, cx=[0.0,1.0], cy=[1.0,0.0]):
""" Modify an existing linear CD matrix with rotation and/or scale changes
and return a new CD matrix. If 'cx' and 'cy' are specified, it will
return a distorted CD matrix.
Only those terms which are varying need to be specified on input.
"""
rotmat = fileutil.buildRotMatrix(delta_rot)*delta_scale
new_lincd = np.dot(cdmat,rotmat)
cxymat = np.array([[cx[1],cx[0]],[cy[1],cy[0]]])
new_cd = np.dot(new_lincd,cxymat)
return new_cd
|
Modify an existing linear CD matrix with rotation and/or scale changes
and return a new CD matrix. If 'cx' and 'cy' are specified, it will
return a distorted CD matrix.
Only those terms which are varying need to be specified on input.
|
entailment
|
def create_CD(orient, scale, cx=None, cy=None):
""" Create a (un?)distorted CD matrix from the basic inputs.
The 'cx' and 'cy' parameters, if given, provide the X and Y coefficients of
the distortion as returned by reading the IDCTAB. Only the first 2 elements
are used and should correspond to the 'OC[X/Y]10' and 'OC[X/Y]11' terms in that
order as read from the expanded SIP headers.
The units of 'scale' should be 'arcseconds/pixel' of the reference pixel.
The value of 'orient' should be the absolute orientation on the sky of the
reference pixel.
"""
cxymat = np.array([[cx[1],cx[0]],[cy[1],cy[0]]])
rotmat = fileutil.buildRotMatrix(orient)*scale/3600.
new_cd = np.dot(rotmat,cxymat)
return new_cd
|
Create a (un?)distorted CD matrix from the basic inputs.
The 'cx' and 'cy' parameters, if given, provide the X and Y coefficients of
the distortion as returned by reading the IDCTAB. Only the first 2 elements
are used and should correspond to the 'OC[X/Y]10' and 'OC[X/Y]11' terms in that
order as read from the expanded SIP headers.
The units of 'scale' should be 'arcseconds/pixel' of the reference pixel.
The value of 'orient' should be the absolute orientation on the sky of the
reference pixel.
|
entailment
|
def ddtohms(xsky,ysky,verbose=False,precision=6):
""" Convert sky position(s) from decimal degrees to HMS format. """
xskyh = xsky /15.
xskym = (xskyh - np.floor(xskyh)) * 60.
xskys = (xskym - np.floor(xskym)) * 60.
yskym = (np.abs(ysky) - np.floor(np.abs(ysky))) * 60.
yskys = (yskym - np.floor(yskym)) * 60.
fmt = "%."+repr(precision)+"f"
if isinstance(xskyh,np.ndarray):
rah,dech = [],[]
for i in range(len(xskyh)):
rastr = repr(int(xskyh[i]))+':'+repr(int(xskym[i]))+':'+fmt%(xskys[i])
decstr = repr(int(ysky[i]))+':'+repr(int(yskym[i]))+':'+fmt%(yskys[i])
rah.append(rastr)
dech.append(decstr)
if verbose:
print('RA = ',rastr,', Dec = ',decstr)
else:
rastr = repr(int(xskyh))+':'+repr(int(xskym))+':'+fmt%(xskys)
decstr = repr(int(ysky))+':'+repr(int(yskym))+':'+fmt%(yskys)
rah = rastr
dech = decstr
if verbose:
print('RA = ',rastr,', Dec = ',decstr)
return rah,dech
|
Convert sky position(s) from decimal degrees to HMS format.
|
entailment
|
def make_outputwcs(imageObjectList, output, configObj=None, perfect=False):
""" Computes the full output WCS based on the set of input imageObjects
provided as input, along with the pre-determined output name from
process_input. The user specified output parameters are then used to
modify the default WCS to produce the final desired output frame.
The input imageObjectList has the outputValues dictionary
updated with the information from the computed output WCS.
It then returns this WCS as a WCSObject(imageObject)
instance.
"""
if not isinstance(imageObjectList,list):
imageObjectList = [imageObjectList]
# Compute default output WCS, replace later if user specifies a refimage
hstwcs_list = []
undistort=True
for img in imageObjectList:
chip_wcs = copy.deepcopy(img.getKeywordList('wcs'))
# IF the user turned off use of coeffs (coeffs==False)
if not configObj['coeffs']:
for cw in chip_wcs:
# Turn off distortion model for each input
cw.sip = None
cw.cpdis1 = None
cw.cpdis2 = None
cw.det2im = None
undistort=False
hstwcs_list += chip_wcs
if not undistort and len(hstwcs_list) == 1:
default_wcs = hstwcs_list[0].deepcopy()
else:
default_wcs = utils.output_wcs(hstwcs_list, undistort=undistort)
if perfect:
default_wcs.wcs.cd = make_perfect_cd(default_wcs)
# Turn WCS instances into WCSObject instances
outwcs = createWCSObject(output, default_wcs, imageObjectList)
# Merge in user-specified attributes for the output WCS
# as recorded in the input configObj object.
final_pars = DEFAULT_WCS_PARS.copy()
# More interpretation of the configObj needs to be done here to translate
# the input parameter names to those understood by 'mergeWCS' as defined
# by the DEFAULT_WCS_PARS dictionary.
single_step = configObj[util.getSectionName(configObj, 3)]
singleParDict = configObj[util.getSectionName(configObj, '3a')].copy()
if single_step['driz_separate'] and singleParDict['driz_sep_wcs']:
single_pars = DEFAULT_WCS_PARS.copy()
del singleParDict['driz_sep_wcs']
keyname = 'driz_sep_'
for key in singleParDict:
k = key[len(keyname):]
if k != 'refimage':
single_pars[k] = singleParDict[key]
# Now, account for any user-specified reference image
def_wcs = default_wcs.deepcopy()
if singleParDict[keyname + 'refimage']:
default_wcs = wcsutil.HSTWCS(singleParDict[keyname + 'refimage'])
### Create single_wcs instance based on user parameters
outwcs.single_wcs = mergeWCS(default_wcs, single_pars)
# restore global default WCS to original value so single_drizzle WCS does not
# influence final_drizzle WCS
default_wcs = def_wcs.deepcopy()
final_step = configObj[util.getSectionName(configObj,7)]
finalParDict = configObj[util.getSectionName(configObj,'7a')].copy()
if final_step['driz_combine'] and finalParDict['final_wcs']:
del finalParDict['final_wcs']
keyname = 'final_'
for key in finalParDict:
k = key[len(keyname):]
if k != 'refimage':
final_pars[k] = finalParDict[key]
# Now, account for any user-specified reference image
if finalParDict[keyname + 'refimage']:
rootname,extnum = fileutil.parseFilename(finalParDict[keyname+'refimage'])
extnum = util.findWCSExtn(finalParDict[keyname+'refimage'])
print('Creating OUTPUT WCS from {}[{}]'.format(rootname,extnum))
default_wcs = wcsutil.HSTWCS('{}[{}]'.format(rootname,extnum))
### Create single_wcs instance based on user parameters
outwcs.final_wcs = mergeWCS(default_wcs, final_pars)
outwcs.wcs = outwcs.final_wcs.copy()
# Apply user settings to create custom output_wcs instances
# for each drizzle step
updateImageWCS(imageObjectList, outwcs)
return outwcs
|
Computes the full output WCS based on the set of input imageObjects
provided as input, along with the pre-determined output name from
process_input. The user specified output parameters are then used to
modify the default WCS to produce the final desired output frame.
The input imageObjectList has the outputValues dictionary
updated with the information from the computed output WCS.
It then returns this WCS as a WCSObject(imageObject)
instance.
|
entailment
|
def make_perfect_cd(wcs):
""" Create a perfect (square, orthogonal, undistorted) CD matrix from the
input WCS.
"""
def_scale = (wcs.pscale) / 3600.
def_orientat = np.deg2rad(wcs.orientat)
perfect_cd = def_scale * np.array(
[[-np.cos(def_orientat),np.sin(def_orientat)],
[np.sin(def_orientat),np.cos(def_orientat)]]
)
return perfect_cd
|
Create a perfect (square, orthogonal, undistorted) CD matrix from the
input WCS.
|
entailment
|
def calcNewEdges(wcs, shape):
"""
This method will compute sky coordinates for all the pixels around
the edge of an image AFTER applying the geometry model.
Parameters
----------
wcs : obj
HSTWCS object for image
shape : tuple
numpy shape tuple for size of image
Returns
-------
border : arr
array which contains the new positions for
all pixels around the border of the edges in alpha,dec
"""
naxis1 = shape[1]
naxis2 = shape[0]
# build up arrays for pixel positions for the edges
# These arrays need to be: array([(x,y),(x1,y1),...])
numpix = naxis1*2 + naxis2*2
border = np.zeros(shape=(numpix,2),dtype=np.float64)
# Now determine the appropriate values for this array
# We also need to account for any subarray offsets
xmin = 1.
xmax = naxis1
ymin = 1.
ymax = naxis2
# Build range of pixel values for each side
# Add 1 to make them consistent with pixel numbering in IRAF
# Also include the LTV offsets to represent position in full chip
# since the model works relative to full chip positions.
xside = np.arange(naxis1) + xmin
yside = np.arange(naxis2) + ymin
#Now apply them to the array to generate the appropriate tuples
#bottom
_range0 = 0
_range1 = naxis1
border[_range0:_range1,0] = xside
border[_range0:_range1,1] = ymin
#top
_range0 = _range1
_range1 = _range0 + naxis1
border[_range0:_range1,0] = xside
border[_range0:_range1,1] = ymax
#left
_range0 = _range1
_range1 = _range0 + naxis2
border[_range0:_range1,0] = xmin
border[_range0:_range1,1] = yside
#right
_range0 = _range1
_range1 = _range0 + naxis2
border[_range0:_range1,0] = xmax
border[_range0:_range1,1] = yside
edges = wcs.all_pix2world(border[:,0],border[:,1],1)
return edges
|
This method will compute sky coordinates for all the pixels around
the edge of an image AFTER applying the geometry model.
Parameters
----------
wcs : obj
HSTWCS object for image
shape : tuple
numpy shape tuple for size of image
Returns
-------
border : arr
array which contains the new positions for
all pixels around the border of the edges in alpha,dec
|
entailment
|
def createWCSObject(output,default_wcs,imageObjectList):
"""Converts a PyWCS WCS object into a WCSObject(baseImageObject) instance."""
from . import imageObject
outwcs = imageObject.WCSObject(output)
outwcs.default_wcs = default_wcs
outwcs.wcs = default_wcs.copy()
outwcs.final_wcs = default_wcs.copy()
outwcs.single_wcs = default_wcs.copy()
outwcs.updateContextImage(imageObjectList[0].createContext)
#
# Add exptime information for use with drizzle
#
outwcs._exptime,outwcs._expstart,outwcs._expend = util.compute_texptime(imageObjectList)
outwcs.nimages = util.countImages(imageObjectList)
return outwcs
|
Converts a PyWCS WCS object into a WCSObject(baseImageObject) instance.
|
entailment
|
def removeAllAltWCS(hdulist,extlist):
"""
Removes all alternate WCS solutions from the header
"""
original_logging_level = log.level
log.setLevel(logutil.logging.WARNING)
try:
hdr = hdulist[extlist[0]].header
wkeys = altwcs.wcskeys(hdr)
if ' ' in wkeys:
wkeys.remove(' ')
for extn in extlist:
for wkey in wkeys:
if wkey == 'O':
continue
altwcs.deleteWCS(hdulist,extn,wkey)
# Forcibly remove OPUS WCS Keywords, since deleteWCS will not do it
hwcs = readAltWCS(hdulist,extn,wcskey='O')
if hwcs is None:
continue
for k in hwcs.keys():
if k not in ['DATE-OBS','MJD-OBS'] and k in hdr:
try:
del hdr[k]
except KeyError:
pass
except:
raise
finally:
log.setLevel(original_logging_level)
|
Removes all alternate WCS solutions from the header
|
entailment
|
def restoreDefaultWCS(imageObjectList, output_wcs):
""" Restore WCS information to default values, and update imageObject
accordingly.
"""
if not isinstance(imageObjectList,list):
imageObjectList = [imageObjectList]
output_wcs.restoreWCS()
updateImageWCS(imageObjectList, output_wcs)
|
Restore WCS information to default values, and update imageObject
accordingly.
|
entailment
|
def _py2round(x):
"""
This function returns a rounded up value of the argument, similar
to Python 2.
"""
if hasattr(x, '__iter__'):
rx = np.empty_like(x)
m = x >= 0.0
rx[m] = np.floor(x[m] + 0.5)
m = np.logical_not(m)
rx[m] = np.ceil(x[m] - 0.5)
return rx
else:
if x >= 0.0:
return np.floor(x + 0.5)
else:
return np.ceil(x - 0.5)
|
This function returns a rounded up value of the argument, similar
to Python 2.
|
entailment
|
def mergeWCS(default_wcs, user_pars):
""" Merges the user specified WCS values given as dictionary derived from
the input configObj object with the output PyWCS object computed
using distortion.output_wcs().
The user_pars dictionary needs to have the following set of keys::
user_pars = {'ra':None,'dec':None,'scale':None,'rot':None,
'outnx':None,'outny':None,'crpix1':None,'crpix2':None}
"""
#
# Start by making a copy of the input WCS...
#
outwcs = default_wcs.deepcopy()
# If there are no user set parameters, just return a copy of
# the original WCS:
if all([upar is None for upar in user_pars.values()]):
return outwcs
if _check_custom_WCS_pars('ra', 'dec', user_pars):
_crval = (user_pars['ra'], user_pars['dec'])
else:
_crval = None
if ('scale' in user_pars and user_pars['scale'] is not None and
not _check_close_scale(user_pars['scale'], outwcs.pscale)):
_scale = user_pars['scale']
_ratio = outwcs.pscale / _scale
else:
_ratio = None
_scale = None
if ('rot' not in user_pars) or user_pars['rot'] is None:
_delta_rot = None
else:
_delta_rot = outwcs.orientat - user_pars['rot']
if _delta_rot == 0.0:
_delta_rot = None
if _check_custom_WCS_pars('crpix1', 'crpix2', user_pars):
_crpix = (user_pars['crpix1'], user_pars['crpix2'])
else:
_crpix = None
shape = None
if _check_custom_WCS_pars('outnx', 'outny', user_pars):
shape = (
int(_py2round(user_pars['outnx'])),
int(_py2round(user_pars['outny']))
)
if shape[0] < 1 or shape[1] < 1:
raise ValueError("Custom WCS output image size smaller than 1")
if _crpix is None:
# make sure new image is centered on the CRPIX of the old WCS:
_crpix = ((shape[0] + 1.0) / 2.0, (shape[1] + 1.0) / 2.0)
else:
naxis1, naxis2 = outwcs.pixel_shape
if _delta_rot is None:
# no rotation is involved
if _ratio is not None:
# apply scale only:
# compute output image shape:
shape = (
max(1, int(_py2round(_ratio * naxis1))),
max(1, int(_py2round(_ratio * naxis2)))
)
# update CRPIX:
if _crpix is None:
_crpix = 1.0 + _ratio * (outwcs.wcs.crpix - 1.0)
else:
_corners = np.array(
[[0.5, 0.5],
[naxis1 + 0.5, 0.5],
[0.5, naxis2 + 0.5],
[naxis1 + 0.5, naxis2 + 0.5]]
) - outwcs.wcs.crpix
if _ratio is not None:
# scale corners:
_corners *= _ratio
# rotate corners and find new image range:
((_xmin, _xmax), (_ymin, _ymax)) = util.getRotatedSize(_corners,
_delta_rot)
# compute output image shape:
# NOTE: _py2round may be replaced with np.ceil
shape = (
max(1, int(_py2round(_xmax - _xmin))),
max(1, int(_py2round(_ymax - _ymin)))
)
if _crpix is None:
# update CRPIX:
_crpix = (-_xmin + 0.5, -_ymin + 0.5)
# Set up the new WCS based on values from old one:
if _ratio is not None:
# Update plate scale
outwcs.wcs.cd = outwcs.wcs.cd / _ratio
outwcs.pscale = _scale
# update orientation
if _delta_rot is not None:
outwcs.wcs.cd = _rotateCD(outwcs.wcs.cd, _delta_rot)
outwcs.orientat -= _delta_rot
if shape is not None:
# update size:
outwcs.pixel_shape = shape
# update reference position
if _crpix is not None:
outwcs.wcs.crpix = np.array(_crpix, dtype=np.float64)
if _crval is not None:
outwcs.wcs.crval = np.array(_crval, dtype=np.float64)
return outwcs
|
Merges the user specified WCS values given as dictionary derived from
the input configObj object with the output PyWCS object computed
using distortion.output_wcs().
The user_pars dictionary needs to have the following set of keys::
user_pars = {'ra':None,'dec':None,'scale':None,'rot':None,
'outnx':None,'outny':None,'crpix1':None,'crpix2':None}
|
entailment
|
def convertWCS(inwcs,drizwcs):
""" Copy WCSObject WCS into Drizzle compatible array."""
drizwcs[0] = inwcs.crpix[0]
drizwcs[1] = inwcs.crval[0]
drizwcs[2] = inwcs.crpix[1]
drizwcs[3] = inwcs.crval[1]
drizwcs[4] = inwcs.cd[0][0]
drizwcs[5] = inwcs.cd[1][0]
drizwcs[6] = inwcs.cd[0][1]
drizwcs[7] = inwcs.cd[1][1]
return drizwcs
|
Copy WCSObject WCS into Drizzle compatible array.
|
entailment
|
def updateWCS(drizwcs,inwcs):
""" Copy output WCS array from Drizzle into WCSObject."""
crpix = np.array([drizwcs[0],drizwcs[2]], dtype=np.float64)
crval = np.array([drizwcs[1],drizwcs[3]], dtype=np.float64)
cd = np.array([[drizwcs[4],drizwcs[6]],[drizwcs[5],drizwcs[7]]], dtype=np.float64)
inwcs.cd = cd
inwcs.crval = crval
inwc.crpix = crpix
inwcs.pscale = N.sqrt(N.power(inwcs.cd[0][0],2)+N.power(inwcs.cd[1][0],2)) * 3600.
inwcs.orient = N.arctan2(inwcs.cd[0][1],inwcs.cd[1][1]) * 180./N.pi
|
Copy output WCS array from Drizzle into WCSObject.
|
entailment
|
def wcsfit(img_wcs, ref_wcs):
"""
Perform a linear fit between 2 WCS for shift, rotation and scale.
Based on the WCSLIN function from 'drutil.f'(Drizzle V2.9) and modified to
allow for differences in reference positions assumed by PyDrizzle's
distortion model and the coeffs used by 'drizzle'.
Parameters
----------
img : obj
ObsGeometry instance for input image
ref_wcs : obj
Undistorted WCSObject instance for output frame
"""
# Define objects that we need to use for the fit...
#in_refpix = img_geom.model.refpix
wmap = WCSMap(img_wcs,ref_wcs)
cx, cy = coeff_converter.sip2idc(img_wcs)
# Convert the RA/Dec positions back to X/Y in output product image
#_cpix_xyref = np.zeros((4,2),dtype=np.float64)
# Start by setting up an array of points +/-0.5 pixels around CRVAL1,2
# However, we must shift these positions by 1.0pix to match what
# drizzle will use as its reference position for 'align=center'.
_cpix = (img_wcs.wcs.crpix[0],img_wcs.wcs.crpix[1])
_cpix_arr = np.array([_cpix,(_cpix[0],_cpix[1]+1.),
(_cpix[0]+1.,_cpix[1]+1.),(_cpix[0]+1.,_cpix[1])], dtype=np.float64)
# Convert these positions to RA/Dec
_cpix_rd = wmap.xy2rd(img_wcs,_cpix_arr[:,0],_cpix_arr[:,1])
#for pix in xrange(len(_cpix_rd[0])):
_cpix_xref,_cpix_yref = wmap.rd2xy(ref_wcs,_cpix_rd[0],_cpix_rd[1])
_cpix_xyref = np.zeros((4,2),dtype=np.float64)
_cpix_xyref[:,0] = _cpix_xref
_cpix_xyref[:,1] = _cpix_yref
"""
# needed to handle correctly subarrays and wfpc2 data
if img_wcs.delta_refx == 0.0 and img_wcs.delta_refy == 0.0:
offx, offy = (0.0,0.0)
else:
offx, offy = (1.0, 1.0)
"""
offx, offy = (0.0,0.0)
# Now, apply distortion model to input image XY positions
#_cpix_xyc = np.zeros((4,2),dtype=np.float64)
_cpix_xyc = utils.apply_idc(_cpix_arr, cx, cy, img_wcs.wcs.crpix, img_wcs.pscale, order=1)
# Need to get the XDELTA,YDELTA values included here in order to get this
# to work with MDTng.
#if in_refpix:
# _cpix_xyc += (in_refpix['XDELTA'], in_refpix['YDELTA'])
# Perform a fit between:
# - undistorted, input positions: _cpix_xyc
# - X/Y positions in reference frame: _cpix_xyref
abxt,cdyt = fitlin(_cpix_xyc,_cpix_xyref)
# This correction affects the final fit when you are fitting
# a WCS to itself (no distortion coeffs), so it needs to be
# taken out in the coeffs file by modifying the zero-point value.
# WJH 17-Mar-2005
abxt[2] -= ref_wcs.wcs.crpix[0] + offx
cdyt[2] -= ref_wcs.wcs.crpix[1] + offy
return abxt,cdyt
|
Perform a linear fit between 2 WCS for shift, rotation and scale.
Based on the WCSLIN function from 'drutil.f'(Drizzle V2.9) and modified to
allow for differences in reference positions assumed by PyDrizzle's
distortion model and the coeffs used by 'drizzle'.
Parameters
----------
img : obj
ObsGeometry instance for input image
ref_wcs : obj
Undistorted WCSObject instance for output frame
|
entailment
|
def fitlin(imgarr,refarr):
""" Compute the least-squares fit between two arrays.
A Python translation of 'FITLIN' from 'drutil.f' (Drizzle V2.9).
"""
# Initialize variables
_mat = np.zeros((3,3),dtype=np.float64)
_xorg = imgarr[0][0]
_yorg = imgarr[0][1]
_xoorg = refarr[0][0]
_yoorg = refarr[0][1]
_sigxox = 0.
_sigxoy = 0.
_sigxo = 0.
_sigyox = 0.
_sigyoy = 0.
_sigyo = 0.
_npos = len(imgarr)
# Populate matrices
for i in range(_npos):
_mat[0][0] += np.power((imgarr[i][0] - _xorg),2)
_mat[0][1] += (imgarr[i][0] - _xorg) * (imgarr[i][1] - _yorg)
_mat[0][2] += (imgarr[i][0] - _xorg)
_mat[1][1] += np.power((imgarr[i][1] - _yorg),2)
_mat[1][2] += imgarr[i][1] - _yorg
_sigxox += (refarr[i][0] - _xoorg)*(imgarr[i][0] - _xorg)
_sigxoy += (refarr[i][0] - _xoorg)*(imgarr[i][1] - _yorg)
_sigxo += refarr[i][0] - _xoorg
_sigyox += (refarr[i][1] - _yoorg)*(imgarr[i][0] -_xorg)
_sigyoy += (refarr[i][1] - _yoorg)*(imgarr[i][1] - _yorg)
_sigyo += refarr[i][1] - _yoorg
_mat[2][2] = _npos
_mat[1][0] = _mat[0][1]
_mat[2][0] = _mat[0][2]
_mat[2][1] = _mat[1][2]
# Now invert this matrix
_mat = linalg.inv(_mat)
_a = _sigxox*_mat[0][0]+_sigxoy*_mat[0][1]+_sigxo*_mat[0][2]
_b = -1*(_sigxox*_mat[1][0]+_sigxoy*_mat[1][1]+_sigxo*_mat[1][2])
#_x0 = _sigxox*_mat[2][0]+_sigxoy*_mat[2][1]+_sigxo*_mat[2][2]
_c = _sigyox*_mat[1][0]+_sigyoy*_mat[1][1]+_sigyo*_mat[1][2]
_d = _sigyox*_mat[0][0]+_sigyoy*_mat[0][1]+_sigyo*_mat[0][2]
#_y0 = _sigyox*_mat[2][0]+_sigyoy*_mat[2][1]+_sigyo*_mat[2][2]
_xt = _xoorg - _a*_xorg+_b*_yorg
_yt = _yoorg - _d*_xorg-_c*_yorg
return [_a,_b,_xt],[_c,_d,_yt]
|
Compute the least-squares fit between two arrays.
A Python translation of 'FITLIN' from 'drutil.f' (Drizzle V2.9).
|
entailment
|
def fitlin_rscale(xy,uv,verbose=False):
""" Performs a linear, orthogonal fit between matched
lists of positions 'xy' (input) and 'uv' (output).
Output: (same as for fit_arrays_general)
"""
mu = uv[:,0].mean()
mv = uv[:,1].mean()
mx = xy[:,0].mean()
my = xy[:,1].mean()
u = uv[:,0] - mu
v = uv[:,1] - mv
x = xy[:,0] - mx
y = xy[:,1] - my
Sxx = np.dot(x,x)
Syy = np.dot(y,y)
Sux = np.dot(u,x)
Suy = np.dot(u,y)
Svx = np.dot(v,x)
Svy = np.dot(v,y)
# implement parity check
if (np.dot(Sux,Svy) > 0):
p = 1
else:
p = -1
XX = p*Sux + Svy
YY = Suy - p*Svx
# derive output values
theta_deg = np.rad2deg(np.arctan2(YY,XX))% 360.0
scale = np.sqrt(XX**2 + YY**2) / (Sxx+Syy)
shift = (mu-mx,mv-my)
if verbose:
print('Linear RSCALE fit: rotation = ',theta_deg,' scale = ',scale,' offset = ',shift)
coeffs = scale * fileutil.buildRotMatrix(-theta_deg)
P = [coeffs[0,0],coeffs[0,1],shift[0]]
Q = [coeffs[1,1],coeffs[1,0],shift[1]]
return P,Q
|
Performs a linear, orthogonal fit between matched
lists of positions 'xy' (input) and 'uv' (output).
Output: (same as for fit_arrays_general)
|
entailment
|
def fitlin_clipped(xy,uv,verbose=False,mode='rscale',nclip=3,reject=3):
""" Perform a clipped fit based on the number of iterations and rejection limit
(in sigma) specified by the user. This will more closely replicate the results
obtained by 'geomap' using 'maxiter' and 'reject' parameters.
"""
fitting_funcs = {'rscale':fitlin_rscale,'general':fitlin}
# Get the fitting function to be used
fit_func = fitting_funcs[mode.lower()]
# Perform the initial fit
P,Q = fit_func(xy,uv)
xyc = apply_fitlin(xy,P,Q)
# compute residuals from fit for input positions
dx = uv[:,0] - xyc[0]
dy = uv[:,1] - xyc[1]
fit_rms = [dx.std(),dy.std()]
if nclip > 0:
data = xy.copy()
outdata = uv.copy()
numclipped = 0
for i in range(nclip):
iterclipped = 0
xyc = apply_fitlin(data,P,Q)
# compute residuals from fit for input positions
dx = outdata[:,0] - xyc[0]
dy = outdata[:,1] - xyc[1]
# find indices of outliers in x and y
xout = np.where(np.abs(dx - dx.mean()) > reject*dx.std())
yout = np.where(np.abs(dy - dy.mean()) > reject*dy.std())
# concatenate those indices and sort them
outliers_indx = xout[0].tolist()+yout[0].tolist()
outliers_indx.sort()
# define the full range of indices for the data points left
full_indx = list(range(data.shape[0]))
# remove all unique indices specified in outliers from full range
for o in outliers_indx:
# only remove if it has not been removed already
# accounts for the same point being an outlier in both x and y
if full_indx.count(o) > 0:
full_indx.remove(o)
iterclipped += 1
if iterclipped == 0:
break
numclipped += iterclipped
if verbose:
print('Removed a total of ',numclipped,' points through iteration ',i+1)
# create clipped data
data_iter = np.zeros([len(full_indx),2],dtype=data.dtype)
if verbose:
print('Iter #',i+1,' data:',data.shape,data_iter.shape,len(full_indx))
data_iter[:,0] = data[:,0][full_indx]
data_iter[:,1] = data[:,1][full_indx]
outdata_iter = np.zeros([len(full_indx),2],dtype=data.dtype)
outdata_iter[:,0] = outdata[:,0][full_indx]
outdata_iter[:,1] = outdata[:,1][full_indx]
# perform the fit again with the clipped data and go to the next iteration
data = data_iter
outdata = outdata_iter
P,Q = fit_func(data,outdata)
# compute residuals from fit for input positions
xyc = apply_fitlin(data,P,Q)
dx = outdata[:,0] - xyc[0]
dy = outdata[:,1] - xyc[1]
fit_rms = [dx.std(),dy.std()]
if verbose:
print('Fit clipped ',numclipped,' points over ',nclip,' iterations.')
return P,Q,fit_rms
|
Perform a clipped fit based on the number of iterations and rejection limit
(in sigma) specified by the user. This will more closely replicate the results
obtained by 'geomap' using 'maxiter' and 'reject' parameters.
|
entailment
|
def readAltWCS(fobj, ext, wcskey=' ', verbose=False):
"""
Reads in alternate primary WCS from specified extension.
Parameters
----------
fobj : str, `astropy.io.fits.HDUList`
fits filename or fits file object
containing alternate/primary WCS(s) to be converted
wcskey : str
[" ",A-Z]
alternate/primary WCS key that will be replaced by the new key
ext : int
fits extension number
Returns
-------
hdr: fits.Header
header object with ONLY the keywords for specified alternate WCS
"""
if isinstance(fobj, str):
fobj = fits.open(fobj, memmap=False)
hdr = altwcs._getheader(fobj, ext)
try:
original_logging_level = log.level
log.setLevel(logutil.logging.WARNING)
nwcs = pywcs.WCS(hdr, fobj=fobj, key=wcskey)
except KeyError:
if verbose:
print('readAltWCS: Could not read WCS with key %s' % wcskey)
print(' Skipping %s[%s]' % (fobj.filename(), str(ext)))
return None
finally:
log.setLevel(original_logging_level) # restore original logging level
hwcs = nwcs.to_header()
if nwcs.wcs.has_cd():
hwcs = altwcs.pc2cd(hwcs, key=wcskey)
return hwcs
|
Reads in alternate primary WCS from specified extension.
Parameters
----------
fobj : str, `astropy.io.fits.HDUList`
fits filename or fits file object
containing alternate/primary WCS(s) to be converted
wcskey : str
[" ",A-Z]
alternate/primary WCS key that will be replaced by the new key
ext : int
fits extension number
Returns
-------
hdr: fits.Header
header object with ONLY the keywords for specified alternate WCS
|
entailment
|
def forward(self,pixx,pixy):
""" Transform the input pixx,pixy positions in the input frame
to pixel positions in the output frame.
This method gets passed to the drizzle algorithm.
"""
# This matches WTRAXY results to better than 1e-4 pixels.
skyx,skyy = self.input.all_pix2world(pixx,pixy,self.origin)
result= self.output.wcs_world2pix(skyx,skyy,self.origin)
return result
|
Transform the input pixx,pixy positions in the input frame
to pixel positions in the output frame.
This method gets passed to the drizzle algorithm.
|
entailment
|
def backward(self,pixx,pixy):
""" Transform pixx,pixy positions from the output frame back onto their
original positions in the input frame.
"""
skyx,skyy = self.output.wcs_pix2world(pixx,pixy,self.origin)
result = self.input.all_world2pix(skyx,skyy,self.origin)
return result
|
Transform pixx,pixy positions from the output frame back onto their
original positions in the input frame.
|
entailment
|
def createMask(input=None, static_sig=4.0, group=None, editpars=False, configObj=None, **inputDict):
""" The user can input a list of images if they like to create static masks
as well as optional values for static_sig and inputDict.
The configObj.cfg file will set the defaults and then override them
with the user options.
"""
if input is not None:
inputDict["static_sig"]=static_sig
inputDict["group"]=group
inputDict["updatewcs"]=False
inputDict["input"]=input
else:
print >> sys.stderr, "Please supply an input image\n"
raise ValueError
#this accounts for a user-called init where config is not defined yet
configObj = util.getDefaultConfigObj(__taskname__,configObj,inputDict,loadOnly=(not editpars))
if configObj is None:
return
if not editpars:
run(configObj)
|
The user can input a list of images if they like to create static masks
as well as optional values for static_sig and inputDict.
The configObj.cfg file will set the defaults and then override them
with the user options.
|
entailment
|
def constructFilename(signature):
"""Construct an output filename for the given signature::
signature=[instr+detector,(nx,ny),detnum]
The signature is in the image object.
"""
suffix = buildSignatureKey(signature)
filename = os.path.join('.', suffix)
return filename
|
Construct an output filename for the given signature::
signature=[instr+detector,(nx,ny),detnum]
The signature is in the image object.
|
entailment
|
def addMember(self, imagePtr=None):
"""
Combines the input image with the static mask that
has the same signature.
Parameters
----------
imagePtr : object
An imageObject reference
Notes
-----
The signature parameter consists of the tuple::
(instrument/detector, (nx,ny), chip_id)
The signature is defined in the image object for each chip
"""
numchips=imagePtr._numchips
log.info("Computing static mask:\n")
chips = imagePtr.group
if chips is None:
chips = imagePtr.getExtensions()
#for chip in range(1,numchips+1,1):
for chip in chips:
chipid=imagePtr.scienceExt + ','+ str(chip)
chipimage=imagePtr.getData(chipid)
signature=imagePtr[chipid].signature
# If this is a new signature, create a new Static Mask file which is empty
# only create a new mask if one doesn't already exist
if ((signature not in self.masklist) or (len(self.masklist) == 0)):
self.masklist[signature] = self._buildMaskArray(signature)
maskname = constructFilename(signature)
self.masknames[signature] = maskname
else:
chip_sig = buildSignatureKey(signature)
for s in self.masknames:
if chip_sig in self.masknames[s]:
maskname = self.masknames[s]
break
imagePtr[chipid].outputNames['staticMask'] = maskname
stats = ImageStats(chipimage,nclip=3,fields='mode')
mode = stats.mode
rms = stats.stddev
nbins = len(stats.histogram)
del stats
log.info(' mode = %9f; rms = %7f; static_sig = %0.2f' %
(mode, rms, self.static_sig))
if nbins >= 2: # only combine data from new image if enough data to mask
sky_rms_diff = mode - (self.static_sig*rms)
np.bitwise_and(self.masklist[signature],
np.logical_not(np.less(chipimage, sky_rms_diff)),
self.masklist[signature])
del chipimage
|
Combines the input image with the static mask that
has the same signature.
Parameters
----------
imagePtr : object
An imageObject reference
Notes
-----
The signature parameter consists of the tuple::
(instrument/detector, (nx,ny), chip_id)
The signature is defined in the image object for each chip
|
entailment
|
def getMaskArray(self, signature):
""" Returns the appropriate StaticMask array for the image. """
if signature in self.masklist:
mask = self.masklist[signature]
else:
mask = None
return mask
|
Returns the appropriate StaticMask array for the image.
|
entailment
|
def getFilename(self,signature):
"""Returns the name of the output mask file that
should reside on disk for the given signature. """
filename=constructFilename(signature)
if(fileutil.checkFileExists(filename)):
return filename
else:
print("\nmMask file for ", str(signature), " does not exist on disk", file=sys.stderr)
return None
|
Returns the name of the output mask file that
should reside on disk for the given signature.
|
entailment
|
def close(self):
""" Deletes all static mask objects. """
for key in self.masklist.keys():
self.masklist[key] = None
self.masklist = {}
|
Deletes all static mask objects.
|
entailment
|
def deleteMask(self,signature):
""" Delete just the mask that matches the signature given."""
if signature in self.masklist:
self.masklist[signature] = None
else:
log.warning("No matching mask")
|
Delete just the mask that matches the signature given.
|
entailment
|
def saveToFile(self,imageObjectList):
""" Saves the static mask to a file
it uses the signatures associated with each
mask to contruct the filename for the output mask image.
"""
virtual = imageObjectList[0].inmemory
for key in self.masklist.keys():
#check to see if the file already exists on disk
filename = self.masknames[key]
#create a new fits image with the mask array and a standard header
#open a new header and data unit
newHDU = fits.PrimaryHDU()
newHDU.data = self.masklist[key]
if virtual:
for img in imageObjectList:
img.saveVirtualOutputs({filename:newHDU})
else:
try:
newHDU.writeto(filename, overwrite=True)
log.info("Saving static mask to disk: %s" % filename)
except IOError:
log.error("Problem saving static mask file: %s to "
"disk!\n" % filename)
raise IOError
|
Saves the static mask to a file
it uses the signatures associated with each
mask to contruct the filename for the output mask image.
|
entailment
|
def expand_image(image, shape):
""" Expand image from original shape to requested shape. Output shape
must be an integer multiple of input image shape for each axis. """
if (shape[0] % image.shape[0]) or (shape[1] % image.shape[1]):
raise ValueError("Output shape must be an integer multiple of input "
"image shape.")
sx = shape[1] // image.shape[1]
sy = shape[0] // image.shape[0]
ox = (sx - 1.0) / (2.0 * sx)
oy = (sy - 1.0) / (2.0 * sy)
# generate output coordinates:
y, x = np.indices(shape, dtype=np.float)
x = x / sx - ox
y = y / sy - oy
# interpolate:
return bilinear_interp(image, x, y)
|
Expand image from original shape to requested shape. Output shape
must be an integer multiple of input image shape for each axis.
|
entailment
|
def bilinear_interp(data, x, y):
""" Interpolate input ``data`` at "pixel" coordinates ``x`` and ``y``. """
x = np.asarray(x)
y = np.asarray(y)
if x.shape != y.shape:
raise ValueError("X- and Y-coordinates must have identical shapes.")
out_shape = x.shape
out_size = x.size
x = x.ravel()
y = y.ravel()
x0 = np.empty(out_size, dtype=np.int)
y0 = np.empty(out_size, dtype=np.int)
np.clip(x, 0, data.shape[1] - 2, out=x0)
np.clip(y, 0, data.shape[0] - 2, out=y0)
x1 = x0 + 1
y1 = y0 + 1
f00 = data[(y0, x0)]
f10 = data[(y1, x0)]
f01 = data[(y0, x1)]
f11 = data[(y1, x1)]
w00 = (x1 - x) * (y1 - y)
w10 = (x1 - x) * (y - y0)
w01 = (x - x0) * (y1 - y)
w11 = (x - x0) * (y - y0)
interp = w00 * f00 + w10 * f10 + w01 * f01 + w11 * f11
return interp.reshape(out_shape).astype(data.dtype.type)
|
Interpolate input ``data`` at "pixel" coordinates ``x`` and ``y``.
|
entailment
|
def getflat(self, chip):
"""
Method for retrieving a detector's flat field. For STIS there are three.
This method will return an array the same shape as the image.
"""
sci_chip = self._image[self.scienceExt,chip]
exten = self.errExt+','+str(chip)
# The keyword for STIS flat fields in the primary header of the flt
lflatfile = fileutil.osfn(self._image["PRIMARY"].header['LFLTFILE'])
pflatfile = fileutil.osfn(self._image["PRIMARY"].header['PFLTFILE'])
# Try to open the file in the location specified by LFLTFILE.
try:
handle = fileutil.openImage(lflatfile, mode='readonly', memmap=False)
hdu = fileutil.getExtn(handle,extn=exten)
lfltdata = hdu.data
if lfltdata.shape != self.full_shape:
lfltdata = expand_image(lfltdata, self.full_shape)
except IOError:
lfltdata = np.ones(self.full_shape, dtype=sci_chip.data.dtype)
print("Cannot find file '{:s}'. Treating flatfield constant value "
"of '1'.\n".format(lflatfile))
# Try to open the file in the location specified by PFLTFILE.
try:
handle = fileutil.openImage(pflatfile, mode='readonly', memmap=False)
hdu = fileutil.getExtn(handle,extn=exten)
pfltdata = hdu.data
except IOError:
pfltdata = np.ones(self.full_shape, dtype=sci_chip.data.dtype)
print("Cannot find file '{:s}'. Treating flatfield constant value "
"of '1'.\n".format(pflatfile))
flat = lfltdata * pfltdata
return flat
|
Method for retrieving a detector's flat field. For STIS there are three.
This method will return an array the same shape as the image.
|
entailment
|
def _assignSignature(self, chip):
"""Assign a unique signature for the image based
on the instrument, detector, chip, and size
this will be used to uniquely identify the appropriate
static mask for the image.
This also records the filename for the static mask to the outputNames dictionary.
"""
sci_chip = self._image[self.scienceExt,chip]
ny=sci_chip._naxis1
nx=sci_chip._naxis2
detnum = sci_chip.detnum
instr=self._instrument
sig=(instr+self._detector,(nx,ny),int(detnum)) #signature is a tuple
sci_chip.signature=sig
|
Assign a unique signature for the image based
on the instrument, detector, chip, and size
this will be used to uniquely identify the appropriate
static mask for the image.
This also records the filename for the static mask to the outputNames dictionary.
|
entailment
|
def getReadNoise(self):
"""
Method for returning the readnoise of a detector (in DN).
:units: DN
This should work on a chip, since different chips to be consistant with other
detector classes where different chips have different gains.
"""
if self.proc_unit == 'native':
return self._rdnoise / self._gain()
return self._rdnoise
|
Method for returning the readnoise of a detector (in DN).
:units: DN
This should work on a chip, since different chips to be consistant with other
detector classes where different chips have different gains.
|
entailment
|
def setInstrumentParameters(self, instrpars):
""" This method overrides the superclass to set default values into
the parameter dictionary, in case empty entries are provided.
"""
pri_header = self._image[0].header
if self._isNotValid (instrpars['gain'], instrpars['gnkeyword']):
instrpars['gnkeyword'] = 'ATODGAIN'
if self._isNotValid (instrpars['rdnoise'], instrpars['rnkeyword']):
instrpars['rnkeyword'] = 'READNSE'
if self._isNotValid (instrpars['exptime'], instrpars['expkeyword']):
instrpars['expkeyword'] = 'EXPTIME'
for chip in self.returnAllChips(extname=self.scienceExt):
chip._gain = self.getInstrParameter(instrpars['gain'], pri_header,
instrpars['gnkeyword'])
chip._rdnoise = self.getInstrParameter(instrpars['rdnoise'], pri_header,
instrpars['rnkeyword'])
chip._exptime = self.getInstrParameter(instrpars['exptime'], chip.header,
instrpars['expkeyword'])
if chip._gain is None or chip._rdnoise is None or chip._exptime is None:
print('ERROR: invalid instrument task parameter')
raise ValueError
chip._effGain = chip._gain
self._assignSignature(chip._chip) #this is used in the static mask
self.doUnitConversions()
|
This method overrides the superclass to set default values into
the parameter dictionary, in case empty entries are provided.
|
entailment
|
def doUnitConversions(self):
"""Convert the data to electrons.
This converts all science data extensions and saves
the results back to disk. We need to make sure
the data inside the chips already in memory is altered as well.
"""
for det in range(1,self._numchips+1,1):
chip=self._image[self.scienceExt,det]
conversionFactor = self.effGain
chip._gain = self.effGain #1.
chip.effGain = self.effGain
chip._conversionFactor = conversionFactor
|
Convert the data to electrons.
This converts all science data extensions and saves
the results back to disk. We need to make sure
the data inside the chips already in memory is altered as well.
|
entailment
|
def setInstrumentParameters(self, instrpars):
""" This method overrides the superclass to set default values into
the parameter dictionary, in case empty entries are provided.
"""
pri_header = self._image[0].header
usingDefaultGain = False
usingDefaultReadnoise = False
if self._isNotValid (instrpars['gain'], instrpars['gnkeyword']):
instrpars['gnkeyword'] = None
if self._isNotValid (instrpars['rdnoise'], instrpars['rnkeyword']):
instrpars['rnkeyword'] = None
if self._isNotValid (instrpars['exptime'], instrpars['expkeyword']):
instrpars['expkeyword'] = 'EXPTIME'
for chip in self.returnAllChips(extname=self.scienceExt):
#pri_header=chip.header #stis stores stuff in the science data header
chip.cte_dir=0
chip._exptime = self.getInstrParameter(
instrpars['exptime'], chip.header, instrpars['expkeyword']
)
if chip._exptime is None:
print('ERROR: invalid instrument task parameter')
raise ValueError
if instrpars['rnkeyword'] is not None:
chip._rdnoise = self.getInstrParameter(
instrpars['rdnoise'], pri_header, instrpars['rnkeyword']
)
else:
chip._rdnoise = None
usingDefaultReadnoise = True
if instrpars['gnkeyword'] is not None:
chip._gain = self.getInstrParameter(
instrpars['gain'], pri_header, instrpars['gnkeyword']
)
else:
chip._gain = None
usingDefaultGain = True
if chip._exptime is None:
print('ERROR: invalid instrument task parameter')
raise ValueError
# We need to determine if the user has used the default readnoise/gain value
# since if not, they will need to supply a gain/readnoise value as well
if usingDefaultReadnoise:
chip._rdnoise= self._setMAMADefaultReadnoise()
if usingDefaultGain:
chip._gain = self._setMAMADefaultGain()
self._assignSignature(chip._chip) #this is used in the static mask
chip._effGain=chip._gain
# Convert the science data to electrons if specified by the user.
self.doUnitConversions()
|
This method overrides the superclass to set default values into
the parameter dictionary, in case empty entries are provided.
|
entailment
|
def analyze_data(inputFileList, **kwargs):
"""
Determine if images within the dataset can be aligned
Parameters
==========
inputFileList: list
List containing FLT and/or FLC filenames for all input images which comprise an associated
dataset where 'associated dataset' may be a single image, multiple images, an HST association,
or a number of HST associations
Returns
=======
outputTable: object
Astropy Table object containing data pertaining to the associated dataset, including
the doProcess bool. It is intended this table is updated by subsequent functions for
bookkeeping purposes.
Notes
=====
The keyword/value pairs below define the "cannot process categories".
OBSTYPE : is not IMAGING
MTFLAG : T
SCAN-TYP : C or D (or !N)
FILTER : G*, *POL*, *PRISM*
FILTER1 : G*, *POL*, *PRISM*
FILTER2 : G*, *POL*, *PRISM*
APERTURE : *GRISM*, G*-REF, RAMP, *POL*, *PRISM*
TARGNAME : DARK, TUNGSTEN, BIAS, FLAT, EARTH-CALIB, DEUTERIUM
EXPTIME : 0
CHINJECT : is not NONE
The keyword/value pairs below define the category which the data can be processed, but
the results may be compromised
FGSLOCK : FINE/GYRO, FINE/GY, COARSE, GYROS
FITS Keywords only for WFC3 data: SCAN_TYP, FILTER, and CHINJECT (UVIS)
FITS Keywords only for ACS data: FILTER1 and FILTER2
Please be aware of the FITS keyword value NONE vs the Python None.
FIX: improve robustness when analyzing filter and aperture names, possibly use PHOTMODE instead
"""
OBSKEY = 'OBSTYPE'
MTKEY = 'MTFLAG'
SCNKEY = 'SCAN_TYP'
FILKEY = 'FILTER'
FILKEY1 = 'FILTER1'
FILKEY2 = 'FILTER2'
APKEY = 'APERTURE'
TARKEY = 'TARGNAME'
EXPKEY = 'EXPTIME'
FGSKEY = 'FGSLOCK'
CHINKEY = 'CHINJECT'
acsFiltNameList = [FILKEY1, FILKEY2]
catalog = None # Astrometric catalog used for alignment
catalogSources = 0 # Number of astrometric catalog sources determined based upon coordinate overlap with image WCS
foundSources = 0 # Number of sources detected in images
matchSources = 0 # Number of sources cross matched between astrometric catalog and detected in image
offset_x = None
offset_y = None
rot = None
scale = None
rms_x = -1.0
rms_y = -1.0
rms_ra = -1.0
rms_dec = -1.0
chisq_x = -1.0
chisq_y = -1.0
completed = False # If true, there was no exception and the processing completed all logic
dateObs = None # Human readable date
mjdutc = -1.0 # MJD UTC start of exposure
fgslock = None
processMsg = None
status = 9999
compromised = 0
headerletFile = None
fit_qual = -1
fit_rms = -1.0
total_rms = -1.0
datasetKey = -1.0
namesArray = ('imageName', 'instrument', 'detector', 'filter', 'aperture', 'obstype',
'subarray', 'dateObs', 'mjdutc', 'doProcess', 'processMsg', 'catalog', 'foundSources',
'catalogSources','matchSources',
'offset_x', 'offset_y', 'rotation','scale',
'rms_x', 'rms_y', 'rms_ra', 'rms_dec', 'completed',
'fit_rms', 'total_rms', 'datasetKey', 'status', 'fit_qual', 'headerletFile')
dataType = ('S20', 'S20', 'S20', 'S20', 'S20', 'S20', 'b', 'S20', 'f8', 'b', 'S30',
'S20', 'i4', 'i4', 'i4', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8',
'b', 'f8', 'f8', 'i8', 'i4', 'i4', 'S30')
# Create an astropy table
outputTable = Table(names=namesArray,dtype=dataType)
# Loop over the list of images to determine viability for alignment processing
#
# Capture the data characteristics before any evaluation so the information is
# available for the output table regardless of which keyword is used to
# to determine the data is not viable for alignment.
for inputFile in inputFileList:
header_hdu = 0
header_data = getheader(inputFile, header_hdu)
# Keywords to use potentially for downstream analysis
instrume = (header_data['INSTRUME']).upper()
detector = (header_data['DETECTOR']).upper()
subarray = header_data['SUBARRAY']
dateObs = header_data['DATE-OBS']
mjdutc = header_data['EXPSTART']
# Obtain keyword values for analysis of viability
obstype = (header_data[OBSKEY]).upper()
mtflag = (header_data[MTKEY]).upper()
scan_typ = ''
if instrume == 'WFC3':
scan_typ = (header_data[SCNKEY]).upper()
sfilter = ''
if instrume == 'WFC3':
sfilter = (header_data[FILKEY]).upper()
# Concatenate the two ACS filter names together with an underscore
# If the filter name is blank, skip it
if instrume == 'ACS':
for filtname in acsFiltNameList:
# The filter keyword value could be zero or more blank spaces
# Strip off any leading or trailing blanks
if len(header_data[filtname].upper().strip()) > 0:
# If the current filter variable already has some content,
# need to append an underscore before adding more text
if len(sfilter) > 0:
sfilter += '_'
sfilter += header_data[filtname].upper().strip()
aperture = (header_data[APKEY]).upper()
targname = (header_data[TARKEY]).upper()
exptime = header_data[EXPKEY]
fgslock = (header_data[FGSKEY]).upper()
chinject = 'NONE'
if instrume == 'WFC3' and detector == 'UVIS':
chinject = (header_data[CHINKEY]).upper()
# Determine if the image has one of these conditions. The routine
# will exit processing upon the first satisfied condition.
noProcKey = None
noProcValue = None
doProcess = True
# Imaging vs spectroscopic or coronagraphic
if obstype != 'IMAGING':
noProcKey = OBSKEY
noProcValue = obstype
# Moving target
elif mtflag == 'T':
noProcKey = MTKEY
noProcValue = mtflag
# Bostrophidon without or with dwell (WFC3 only)
elif any ([scan_typ == 'C', scan_typ == 'D']):
noProcKey = SCNKEY
noProcValue = scan_typ
# Ramp, polarizer, grism, or prism
elif any (x in aperture for x in ['RAMP', 'POL', 'GRISM', '-REF', 'PRISM']):
noProcKey = APKEY
noProcValue = aperture
# Calibration target
elif any (x in targname for x in ['DARK', 'TUNG', 'BIAS', 'FLAT', 'DEUT', 'EARTH-CAL']):
noProcKey = TARKEY
noProcValue = targname
# Exposure time of effectively zero
elif math.isclose(exptime, 0.0, abs_tol=1e-5):
noProcKey = EXPKEY
noProcValue = exptime
# Commanded FGS lock
elif any (x in fgslock for x in ['GY', 'COARSE']):
noProcKey = FGSKEY
noProcValue = fgslock
# Charge injection mode
elif chinject != 'NONE':
noProcKey = CHINKEY
noProcValue = chinject
# Filter which does not begin with: 'F'(F###), 'C'(CLEAR), 'N'(N/A), and is not blank
# The sfilter variable may be the concatenation of two filters (F160_CLEAR)
elif sfilter and not sfilter.startswith(('F', 'C', 'N')):
noProcKey = FILKEY
noProcValue = sfilter
elif '_' in sfilter:
pos = sfilter.index('_')
pos += 1
if sfilter[pos] != 'F' and sfilter[pos] != '' and sfilter[pos] != 'C' and sfilter[pos] != 'N':
noProcKey = FILKEY
noProcValue = sfilter
# If noProcKey is set to a keyword, then this image has been found to not be viable for
# alignment purposes.
if (noProcKey is not None):
if (noProcKey != FGSKEY):
doProcess = False
msgType = Messages.NOPROC.value
else:
msgType = Messages.WARN.value
processMsg = noProcKey + '=' + str(noProcValue)
# Issue message to log file for this data indicating no processing to be done or
# processing should be allowed, but there may be some issue with the result (e.g.,
# GYROS mode so some drift)
generate_msg(inputFile, msgType, noProcKey, noProcValue)
# Populate a row of the table
outputTable.add_row([inputFile, instrume, detector, sfilter, aperture, obstype,
subarray, dateObs, mjdutc, doProcess, processMsg, catalog,
foundSources, catalogSources, matchSources,
offset_x, offset_y, rot, scale, rms_x, rms_y,
rms_ra, rms_dec, completed, fit_rms, total_rms, datasetKey,
status, fit_qual, headerletFile])
processMsg = None
#outputTable.pprint(max_width=-1)
return(outputTable)
|
Determine if images within the dataset can be aligned
Parameters
==========
inputFileList: list
List containing FLT and/or FLC filenames for all input images which comprise an associated
dataset where 'associated dataset' may be a single image, multiple images, an HST association,
or a number of HST associations
Returns
=======
outputTable: object
Astropy Table object containing data pertaining to the associated dataset, including
the doProcess bool. It is intended this table is updated by subsequent functions for
bookkeeping purposes.
Notes
=====
The keyword/value pairs below define the "cannot process categories".
OBSTYPE : is not IMAGING
MTFLAG : T
SCAN-TYP : C or D (or !N)
FILTER : G*, *POL*, *PRISM*
FILTER1 : G*, *POL*, *PRISM*
FILTER2 : G*, *POL*, *PRISM*
APERTURE : *GRISM*, G*-REF, RAMP, *POL*, *PRISM*
TARGNAME : DARK, TUNGSTEN, BIAS, FLAT, EARTH-CALIB, DEUTERIUM
EXPTIME : 0
CHINJECT : is not NONE
The keyword/value pairs below define the category which the data can be processed, but
the results may be compromised
FGSLOCK : FINE/GYRO, FINE/GY, COARSE, GYROS
FITS Keywords only for WFC3 data: SCAN_TYP, FILTER, and CHINJECT (UVIS)
FITS Keywords only for ACS data: FILTER1 and FILTER2
Please be aware of the FITS keyword value NONE vs the Python None.
FIX: improve robustness when analyzing filter and aperture names, possibly use PHOTMODE instead
|
entailment
|
def generate_msg(filename, msg, key, value):
""" Generate a message for the output log indicating the file/association will not
be processed as the characteristics of the data are known to be inconsistent
with alignment.
"""
log.info('Dataset ' + filename + ' has (keyword = value) of (' + key + ' = ' + str(value) + ').')
if msg == Messages.NOPROC.value:
log.info('Dataset cannot be aligned.')
else:
log.info('Dataset can be aligned, but the result may be compromised.')
|
Generate a message for the output log indicating the file/association will not
be processed as the characteristics of the data are known to be inconsistent
with alignment.
|
entailment
|
def drizCR(input=None, configObj=None, editpars=False, **inputDict):
""" Look for cosmic rays. """
log.debug(inputDict)
inputDict["input"] = input
configObj = util.getDefaultConfigObj(__taskname__, configObj, inputDict,
loadOnly=(not editpars))
if configObj is None:
return
if not editpars:
run(configObj)
|
Look for cosmic rays.
|
entailment
|
def _driz_cr(sciImage, virtual_outputs, paramDict):
"""mask blemishes in dithered data by comparison of an image
with a model image and the derivative of the model image.
- ``sciImage`` is an imageObject which contains the science data
- ``blotImage`` is inferred from the ``sciImage`` object here which knows
the name of its blotted image
- ``chip`` should be the science chip that corresponds to the blotted
image that was sent
- ``paramDict`` contains the user parameters derived from the full
``configObj`` instance
- ``dqMask`` is inferred from the ``sciImage`` object, the name of the mask
file to combine with the generated Cosmic ray mask
Here are the options you can override in ``configObj``
``gain`` = 7 # Detector gain, e-/ADU
``grow`` = 1 # Radius around CR pixel to mask
# [default=1 for 3x3 for non-NICMOS]
``ctegrow`` = 0 # Length of CTE correction to be applied
``rn`` = 5 # Read noise in electrons
``snr`` = "4.0 3.0" # Signal-to-noise ratio
``scale`` = "0.5 0.4" # scaling factor applied to the derivative
``backg`` = 0 # Background value
``expkey`` = "exptime" # exposure time keyword
Blot images are saved out to simple fits files with 1 chip in them
so for example in ACS, there will be 1 image file with 2 chips that is
the original image and 2 blotted image files, each with 1 chip
So I'm imagining calling this function twice, once for each chip,
but both times with the same original science image file, output files
and some input (output from previous steps) are referenced in the
imageobject itself
"""
grow = paramDict["driz_cr_grow"]
ctegrow = paramDict["driz_cr_ctegrow"]
crcorr_list = []
cr_mask_dict = {}
for chip in range(1, sciImage._numchips + 1, 1):
exten = sciImage.scienceExt + ',' + str(chip)
sci_chip = sciImage[exten]
if not sci_chip.group_member:
continue
blot_image_name = sci_chip.outputNames['blotImage']
if sciImage.inmemory:
blot_data = sciImage.virtualOutputs[blot_image_name][0].data
else:
if not os.path.isfile(blot_image_name):
raise IOError("Blotted image not found: {:s}"
.format(blot_image_name))
try:
blot_data = fits.getdata(blot_image_name, ext=0)
except IOError:
print("Problem opening blot images")
raise
# Scale blot image, as needed, to match original input data units.
blot_data *= sci_chip._conversionFactor
input_image = sciImage.getData(exten)
# Apply any unit conversions to input image here for comparison
# with blotted image in units of electrons
input_image *= sci_chip._conversionFactor
# make the derivative blot image
blot_deriv = quickDeriv.qderiv(blot_data)
# Boolean mask needs to take into account any crbits values
# specified by the user to be ignored when converting DQ array.
dq_mask = sciImage.buildMask(chip, paramDict['crbit'])
# parse out the SNR information
snr1, snr2 = map(
float, filter(None, re.split("[,;\s]+", paramDict["driz_cr_snr"]))
)
# parse out the scaling information
mult1, mult2 = map(
float, filter(
None, re.split("[,;\s]+", paramDict["driz_cr_scale"])
)
)
gain = sci_chip._effGain
rn = sci_chip._rdnoise
backg = sci_chip.subtractedSky * sci_chip._conversionFactor
# Set scaling factor (used by MultiDrizzle) to 1 since scaling has
# already been accounted for in blotted image
# expmult = 1.
# ################# COMPUTATION PART I ###################
# Create a temporary array mask
t1 = np.absolute(input_image - blot_data)
# ta = np.sqrt(gain * np.abs((blot_data + backg) * expmult) + rn**2)
ta = np.sqrt(gain * np.abs(blot_data + backg) + rn**2)
t2 = (mult1 * blot_deriv + snr1 * ta / gain) # / expmult
tmp1 = t1 <= t2
# Create a convolution kernel that is 3 x 3 of 1's
kernel = np.ones((3, 3), dtype=np.uint16)
# Convolve the mask with the kernel
tmp2 = signal.convolve2d(tmp1, kernel, boundary='symm', mode='same')
# ################# COMPUTATION PART II ###################
# Create the CR Mask
t2 = (mult2 * blot_deriv + snr2 * ta / gain) # / expmult
cr_mask = (t1 <= t2) | (tmp2 >= 9)
# ################# COMPUTATION PART III ##################
# flag additional cte 'radial' and 'tail' pixels surrounding CR pixels
# as CRs
# In both the 'radial' and 'length' kernels below, 0->good and 1->bad,
# so that upon convolving the kernels with cr_mask, the convolution
# output will have low->bad and high->good from which 2 new arrays are
# created having 0->bad and 1->good. These 2 new arrays are then
# 'anded' to create a new cr_mask.
# make radial convolution kernel and convolve it with original cr_mask
cr_grow_kernel = np.ones((grow, grow), dtype=np.uint16)
cr_grow_kernel_conv = signal.convolve2d(
cr_mask, cr_grow_kernel, boundary='symm', mode='same'
)
# make tail convolution kernel and convolve it with original cr_mask
cr_ctegrow_kernel = np.zeros((2 * ctegrow + 1, 2 * ctegrow + 1))
# which pixels are masked by tail kernel depends on sign of
# sci_chip.cte_dir (i.e.,readout direction):
if sci_chip.cte_dir == 1:
# 'positive' direction: HRC: amp C or D; WFC: chip = sci,1; WFPC2
cr_ctegrow_kernel[0:ctegrow, ctegrow] = 1
elif sci_chip.cte_dir == -1:
# 'negative' direction: HRC: amp A or B; WFC: chip = sci,2
cr_ctegrow_kernel[ctegrow+1:2*ctegrow+1, ctegrow] = 1
# do the convolution
cr_ctegrow_kernel_conv = signal.convolve2d(
cr_mask, cr_ctegrow_kernel, boundary='symm', mode='same'
)
# select high pixels from both convolution outputs;
# then 'and' them to create new cr_mask
cr_grow_mask = cr_grow_kernel_conv >= grow**2 # radial
cr_ctegrow_mask = cr_ctegrow_kernel_conv >= ctegrow # length
cr_mask = cr_grow_mask & cr_ctegrow_mask
# Apply CR mask to the DQ array in place
dq_mask &= cr_mask
# Create the corr file
corrFile = np.where(dq_mask, input_image, blot_data)
corrFile /= sci_chip._conversionFactor
corrDQMask = np.where(dq_mask, 0, paramDict['crbit']).astype(np.uint16)
if paramDict['driz_cr_corr']:
crcorr_list.append({
'sciext': fileutil.parseExtn(exten),
'corrFile': corrFile.copy(),
'dqext': fileutil.parseExtn(sci_chip.dq_extn),
'dqMask': corrDQMask
})
# Save the cosmic ray mask file to disk
cr_mask_image = sci_chip.outputNames["crmaskImage"]
if paramDict['inmemory']:
print('Creating in-memory(virtual) FITS file...')
_pf = util.createFile(cr_mask.astype(np.uint8),
outfile=None, header=None)
cr_mask_dict[cr_mask_image] = _pf
sciImage.saveVirtualOutputs(cr_mask_dict)
else:
# Always write out crmaskimage, as it is required input for
# the final drizzle step. The final drizzle step combines this
# image with the DQ information on-the-fly.
#
# Remove the existing mask file if it exists
if os.path.isfile(cr_mask_image):
os.remove(cr_mask_image)
print("Removed old cosmic ray mask file: '{:s}'"
.format(cr_mask_image))
print("Creating output: {:s}".format(cr_mask_image))
util.createFile(cr_mask.astype(np.uint8),
outfile=cr_mask_image, header=None)
if paramDict['driz_cr_corr']:
createCorrFile(sciImage.outputNames["crcorImage"], crcorr_list,
sciImage._filename)
|
mask blemishes in dithered data by comparison of an image
with a model image and the derivative of the model image.
- ``sciImage`` is an imageObject which contains the science data
- ``blotImage`` is inferred from the ``sciImage`` object here which knows
the name of its blotted image
- ``chip`` should be the science chip that corresponds to the blotted
image that was sent
- ``paramDict`` contains the user parameters derived from the full
``configObj`` instance
- ``dqMask`` is inferred from the ``sciImage`` object, the name of the mask
file to combine with the generated Cosmic ray mask
Here are the options you can override in ``configObj``
``gain`` = 7 # Detector gain, e-/ADU
``grow`` = 1 # Radius around CR pixel to mask
# [default=1 for 3x3 for non-NICMOS]
``ctegrow`` = 0 # Length of CTE correction to be applied
``rn`` = 5 # Read noise in electrons
``snr`` = "4.0 3.0" # Signal-to-noise ratio
``scale`` = "0.5 0.4" # scaling factor applied to the derivative
``backg`` = 0 # Background value
``expkey`` = "exptime" # exposure time keyword
Blot images are saved out to simple fits files with 1 chip in them
so for example in ACS, there will be 1 image file with 2 chips that is
the original image and 2 blotted image files, each with 1 chip
So I'm imagining calling this function twice, once for each chip,
but both times with the same original science image file, output files
and some input (output from previous steps) are referenced in the
imageobject itself
|
entailment
|
def createCorrFile(outfile, arrlist, template):
"""
Create a _cor file with the same format as the original input image.
The DQ array will be replaced with the mask array used to create the _cor
file.
"""
# Remove the existing cor file if it exists
if os.path.isfile(outfile):
os.remove(outfile)
print("Removing old corr file: '{:s}'".format(outfile))
with fits.open(template, memmap=False) as ftemplate:
for arr in arrlist:
ftemplate[arr['sciext']].data = arr['corrFile']
if arr['dqext'][0] != arr['sciext'][0]:
ftemplate[arr['dqext']].data = arr['dqMask']
ftemplate.writeto(outfile)
print("Created CR corrected file: '{:s}'".format(outfile))
|
Create a _cor file with the same format as the original input image.
The DQ array will be replaced with the mask array used to create the _cor
file.
|
entailment
|
def setDefaults(configObj={}):
""" Return a dictionary of the default parameters
which also been updated with the user overrides.
"""
paramDict = {
'gain': 7, # Detector gain, e-/ADU
'grow': 1, # Radius around CR pixel to mask [default=1 for
# 3x3 for non-NICMOS]
'ctegrow': 0, # Length of CTE correction to be applied
'rn': 5, # Read noise in electrons
'snr': '4.0 3.0', # Signal-to-noise ratio
'scale': '0.5 0.4', # scaling factor applied to the derivative
'backg': 0, # Background value
'expkey': 'exptime' # exposure time keyword
}
if len(configObj) > 0:
for key in configObj:
paramDict[key] = configObj[key]
return paramDict
|
Return a dictionary of the default parameters
which also been updated with the user overrides.
|
entailment
|
def help(file=None):
"""
Print out syntax help for running ``astrodrizzle``
Parameters
----------
file : str (Default = None)
If given, write out help to the filename specified by this parameter
Any previously existing file with this name will be deleted before
writing out the help.
"""
helpstr = getHelpAsString(docstring=True, show_ver=True)
if file is None:
print(helpstr)
else:
with open(file, mode='w') as f:
f.write(helpstr)
|
Print out syntax help for running ``astrodrizzle``
Parameters
----------
file : str (Default = None)
If given, write out help to the filename specified by this parameter
Any previously existing file with this name will be deleted before
writing out the help.
|
entailment
|
def getHelpAsString(docstring=False, show_ver=True):
"""
Return useful help from a file in the script directory called
``__taskname__.help``
"""
install_dir = os.path.dirname(__file__)
taskname = util.base_taskname(__taskname__, __package__)
htmlfile = os.path.join(install_dir, 'htmlhelp', taskname + '.html')
helpfile = os.path.join(install_dir, taskname + '.help')
if docstring or (not docstring and not os.path.exists(htmlfile)):
if show_ver:
helpString = "\n{:s} Version {:s} updated on {:s}\n\n".format(
__taskname__, __version__, __version_date__
)
else:
helpString = ''
if os.path.exists(helpfile):
helpString += teal.getHelpFileAsString(taskname, __file__)
elif __doc__ is not None:
helpString += __doc__ + os.linesep
else:
helpString = 'file://' + htmlfile
return helpString
|
Return useful help from a file in the script directory called
``__taskname__.help``
|
entailment
|
def setCommonInput(configObj, createOutwcs=True):
"""
The common interface interpreter for MultiDrizzle tasks which not only runs
'process_input()' but 'createImageObject()' and 'defineOutput()' as well to
fully setup all inputs for use with the rest of the MultiDrizzle steps either
as stand-alone tasks or internally to MultiDrizzle itself.
Parameters
----------
configObj : object
configObj instance or simple dictionary of input parameters
imageObjectList : list of imageObject objects
list of imageObject instances, 1 for each input exposure
outwcs : object
imageObject instance defining the final output frame
Notes
-----
At a minimum, the configObj instance (dictionary) should contain:
configObj = {'input':None,'output':None }
If provided, the configObj should contain the values of all the multidrizzle parameters
as set by the user with TEAL. If no configObj is given, it will retrieve
the default values automatically. In either case, the values from the input_dict
will be merged in with the configObj before being used by the rest of the
code.
Examples
--------
You can set *createOutwcs=False* for the cases where you only want the
images processed and no output wcs information in necessary; as in:
>>> imageObjectList,outwcs = processInput.processCommonInput(configObj)
"""
# make sure 'updatewcs' is set to False when running from GUI or if missing
# from configObj:
if 'updatewcs' not in configObj:
configObj['updatewcs'] = False
if not createOutwcs or not configObj['coeffs']:
# we're probably just working on single images here
configObj['updatewcs']=False
# maybe we can chunk this part up some more so that we can call just the
# parts we want
# Interpret input, read and convert and update input files, then return
# list of input filenames and derived output filename
asndict, ivmlist, output = process_input(
configObj['input'], configObj['output'],
updatewcs=configObj['updatewcs'], wcskey=configObj['wcskey'],
**configObj['STATE OF INPUT FILES'])
if not asndict:
return None, None
# convert the filenames from asndict into a list of full filenames
files = [fileutil.buildRootname(f) for f in asndict['order']]
original_files = asndict['original_file_names']
# interpret MDRIZTAB, if specified, and update configObj accordingly
# This can be done here because MDRIZTAB does not include values for
# input, output, or updatewcs.
if 'mdriztab' in configObj and configObj['mdriztab']:
print("Reading in MDRIZTAB parameters for {} files".format(len(files)))
mdriztab_dict = mdzhandler.getMdriztabParameters(files)
# Update configObj with values from mpars
cfgpars.mergeConfigObj(configObj, mdriztab_dict)
# Convert interpreted list of input files from process_input into a list
# of imageObject instances for use by the MultiDrizzle tasks.
instrpars = configObj['INSTRUMENT PARAMETERS']
# pass in 'proc_unit' to initialize unit conversions as necessary
instrpars['proc_unit'] = configObj['proc_unit']
undistort = True
if not configObj['coeffs']:
undistort = False
# determine whether parallel processing will be performed
use_parallel = False
if util.can_parallel:
# look to see whether steps which can be run using multiprocessing
# have been turned on
for stepnum in parallel_steps:
sname = util.getSectionName(configObj,stepnum[0])
if configObj[sname][stepnum[1]]:
use_parallel = True
break
# interpret all 'bits' related parameters and convert them to integers
configObj['resetbits'] = interpret_bit_flags(configObj['resetbits'])
step3name = util.getSectionName(configObj,3)
configObj[step3name]['driz_sep_bits'] = interpret_bit_flags(
configObj[step3name]['driz_sep_bits']
)
step7name = util.getSectionName(configObj,7)
configObj[step7name]['final_bits'] = interpret_bit_flags(
configObj[step7name]['final_bits']
)
# Verify any refimage parameters to be used
step3aname = util.getSectionName(configObj,'3a')
if not util.verifyRefimage(configObj[step3aname]['driz_sep_refimage']):
msg = 'No refimage with WCS found!\n '+\
' This could be caused by one of 2 problems:\n'+\
' * filename does not specify an extension with a valid WCS.\n'+\
' * can not find the file.\n'+\
'Please check the filename specified in the "refimage" parameter.'
print(textutil.textbox(msg))
return None,None
step7aname = util.getSectionName(configObj,'7a')
if not util.verifyRefimage(configObj[step7aname]['final_refimage']):
msg = 'No refimage with WCS found!\n '+\
' This could be caused by one of 2 problems:\n'+\
' * filename does not specify an extension with a valid WCS.\n'+\
' * can not find the file.\n'+\
'Please check the filename specified in the "refimage" parameter.'
print(textutil.textbox(msg))
return None,None
# Build imageObject list for all the valid, shift-updated input files
log.info('-Creating imageObject List as input for processing steps.')
if 'in_memory' in configObj:
virtual = configObj['in_memory']
else:
virtual = False
imageObjectList = createImageObjectList(files, instrpars,
group=configObj['group'],
undistort=undistort,
inmemory=virtual)
# Add original file names as "hidden" attributes of imageObject
assert(len(original_files) == len(imageObjectList)) #TODO: remove after extensive testing
for i in range(len(imageObjectList)):
imageObjectList[i]._original_file_name = original_files[i]
# apply context parameter
applyContextPar(imageObjectList, configObj['context'])
# reset DQ bits if requested by user
resetDQBits(imageObjectList, cr_bits_value=configObj['resetbits'])
# Add info about input IVM files at this point to the imageObjectList
addIVMInputs(imageObjectList, ivmlist)
if createOutwcs:
log.info('-Creating output WCS.')
# Build output WCS and update imageObjectList with output WCS info
outwcs = wcs_functions.make_outputwcs(imageObjectList, output,
configObj=configObj, perfect=True)
outwcs.final_wcs.printwcs()
else:
outwcs = None
try:
# Provide user with some information on resource usage for this run
# raises ValueError Exception in interactive mode and user quits
num_cores = configObj.get('num_cores') if use_parallel else 1
reportResourceUsage(imageObjectList, outwcs, num_cores)
except ValueError:
imageObjectList = None
return imageObjectList, outwcs
|
The common interface interpreter for MultiDrizzle tasks which not only runs
'process_input()' but 'createImageObject()' and 'defineOutput()' as well to
fully setup all inputs for use with the rest of the MultiDrizzle steps either
as stand-alone tasks or internally to MultiDrizzle itself.
Parameters
----------
configObj : object
configObj instance or simple dictionary of input parameters
imageObjectList : list of imageObject objects
list of imageObject instances, 1 for each input exposure
outwcs : object
imageObject instance defining the final output frame
Notes
-----
At a minimum, the configObj instance (dictionary) should contain:
configObj = {'input':None,'output':None }
If provided, the configObj should contain the values of all the multidrizzle parameters
as set by the user with TEAL. If no configObj is given, it will retrieve
the default values automatically. In either case, the values from the input_dict
will be merged in with the configObj before being used by the rest of the
code.
Examples
--------
You can set *createOutwcs=False* for the cases where you only want the
images processed and no output wcs information in necessary; as in:
>>> imageObjectList,outwcs = processInput.processCommonInput(configObj)
|
entailment
|
def reportResourceUsage(imageObjectList, outwcs, num_cores,
interactive=False):
""" Provide some information to the user on the estimated resource
usage (primarily memory) for this run.
"""
from . import imageObject
if outwcs is None:
output_mem = 0
else:
if isinstance(outwcs,imageObject.WCSObject):
owcs = outwcs.final_wcs
else:
owcs = outwcs
output_mem = np.prod(owcs.pixel_shape) * 4 * 3 # bytes used for output arrays
img1 = imageObjectList[0]
numchips = 0
input_mem = 0
for img in imageObjectList:
numchips += img._nmembers # account for group parameter set by user
# if we have the cpus and s/w, ok, but still allow user to set pool size
pool_size = util.get_pool_size(num_cores, None)
pool_size = pool_size if (numchips >= pool_size) else numchips
inimg = 0
chip_mem = 0
for img in imageObjectList:
for chip in range(1,img._numchips+1):
cmem = img[chip].shape[0]*img[chip].shape[1]*4
inimg += 1
if inimg < pool_size:
input_mem += cmem*2
if chip_mem == 0:
chip_mem = cmem
max_mem = (input_mem + output_mem*pool_size + chip_mem*2)//(1024*1024)
print('*'*80)
print('*')
print('* Estimated memory usage: up to %d Mb.'%(max_mem))
print('* Output image size: {:d} X {:d} pixels. '.format(*owcs.pixel_shape))
print('* Output image file: ~ %d Mb. '%(output_mem//(1024*1024)))
print('* Cores available: %d'%(pool_size))
print('*')
print('*'*80)
if interactive:
print('Continue with processing?')
while True:
if sys.version_info[0] >= 3:
k = input("(y)es or (n)o").strip()[0].lower()
else:
k = raw_input("(y)es or (n)o").strip()[0].lower()
if k not in ['n', 'y']:
continue
if k == 'n':
raise KeyboardInterrupt("Execution aborted")
|
Provide some information to the user on the estimated resource
usage (primarily memory) for this run.
|
entailment
|
def getMdriztabPars(input):
""" High-level function for getting the parameters from MDRIZTAB
Used primarily for TEAL interface.
"""
filelist,output,ivmlist,oldasndict=processFilenames(input,None)
try:
mdrizdict = mdzhandler.getMdriztabParameters(filelist)
except KeyError:
print('No MDRIZTAB found for "%s". Parameters remain unchanged.'%(filelist[0]))
mdrizdict = {}
return mdrizdict
|
High-level function for getting the parameters from MDRIZTAB
Used primarily for TEAL interface.
|
entailment
|
def addIVMInputs(imageObjectList,ivmlist):
""" Add IVM filenames provided by user to outputNames dictionary for each input imageObject.
"""
if ivmlist is None:
return
for img,ivmname in zip(imageObjectList,ivmlist):
img.updateIVMName(ivmname)
|
Add IVM filenames provided by user to outputNames dictionary for each input imageObject.
|
entailment
|
def checkMultipleFiles(input):
""" Evaluates the input to determine whether there is 1 or more than 1 valid input file.
"""
f,i,o,a=buildFileList(input)
return len(f) > 1
|
Evaluates the input to determine whether there is 1 or more than 1 valid input file.
|
entailment
|
def createImageObjectList(files,instrpars,group=None,
undistort=True, inmemory=False):
""" Returns a list of imageObject instances, 1 for each input image in the list of input filenames.
"""
imageObjList = []
mtflag = False
mt_refimg = None
for img in files:
image = _getInputImage(img,group=group)
image.setInstrumentParameters(instrpars)
image.compute_wcslin(undistort=undistort)
if 'MTFLAG' in image._image['PRIMARY'].header:
# check to see whether we are dealing with moving target observations...
_keyval = image._image['PRIMARY'].header['MTFLAG']
if not util.is_blank(_keyval):
if isinstance(_keyval,bool):
mtflag = _keyval
else:
if 'T' in _keyval:
mtflag = True
else:
mtflag = False
else:
mtflag = False
if mtflag:
print("#####\nProcessing Moving Target Observations using reference image as WCS for all inputs!\n#####\n")
if mt_refimg is None:
mt_refimg = image
else:
image.set_mt_wcs(mt_refimg)
image.inmemory = inmemory # set flag for inmemory processing
# Now add (possibly updated) image object to list
imageObjList.append(image)
return imageObjList
|
Returns a list of imageObject instances, 1 for each input image in the list of input filenames.
|
entailment
|
def _getInputImage (input,group=None):
""" Factory function to return appropriate imageObject class instance"""
# extract primary header and SCI,1 header from input image
sci_ext = 'SCI'
if group in [None,'']:
exten = '[sci,1]'
phdu = fits.getheader(input, memmap=False)
else:
# change to use fits more directly here?
if group.find(',') > 0:
grp = group.split(',')
if grp[0].isalpha():
grp = (grp[0],int(grp[1]))
else:
grp = int(grp[0])
else:
grp = int(group)
phdu = fits.getheader(input, memmap=False)
phdu.extend(fits.getheader(input, ext=grp, memmap=False))
# Extract the instrument name for the data that is being processed by Multidrizzle
_instrument = phdu['INSTRUME']
# Determine the instrument detector in use. NICMOS is a special case because it does
# not use the 'DETECTOR' keyword. It instead used 'CAMERA' to identify which of it's
# 3 camera's is in use. All other instruments support the 'DETECTOR' keyword.
if _instrument == 'NICMOS':
_detector = phdu['CAMERA']
else:
try:
_detector = phdu['DETECTOR']
except KeyError:
# using the phdu as set above (fits.getheader) is MUCH faster and
# works for the majority of data; but fileutil handles waivered fits
phdu = fileutil.getHeader(input+exten)
_detector = phdu['DETECTOR'] # if this fails, let it throw
del phdu # just to keep clean
# Match up the instrument and detector with the right class
# only importing the instrument modules as needed.
try:
if _instrument == 'ACS':
from . import acsData
if _detector == 'HRC': return acsData.HRCInputImage(input,group=group)
if _detector == 'WFC': return acsData.WFCInputImage(input,group=group)
if _detector == 'SBC': return acsData.SBCInputImage(input,group=group)
if _instrument == 'NICMOS':
from . import nicmosData
if _detector == 1: return nicmosData.NIC1InputImage(input)
if _detector == 2: return nicmosData.NIC2InputImage(input)
if _detector == 3: return nicmosData.NIC3InputImage(input)
if _instrument == 'WFPC2':
from . import wfpc2Data
return wfpc2Data.WFPC2InputImage(input,group=group)
"""
if _detector == 1: return wfpc2Data.PCInputImage(input)
if _detector == 2: return wfpc2Data.WF2InputImage(input)
if _detector == 3: return wfpc2Data.WF3InputImage(input)
if _detector == 4: return wfpc2Data.WF4InputImage(input)
"""
if _instrument == 'STIS':
from . import stisData
if _detector == 'CCD': return stisData.CCDInputImage(input,group=group)
if _detector == 'FUV-MAMA': return stisData.FUVInputImage(input,group=group)
if _detector == 'NUV-MAMA': return stisData.NUVInputImage(input,group=group)
if _instrument == 'WFC3':
from . import wfc3Data
if _detector == 'UVIS': return wfc3Data.WFC3UVISInputImage(input,group=group)
if _detector == 'IR': return wfc3Data.WFC3IRInputImage(input,group=group)
except ImportError:
msg = 'No module implemented for '+str(_instrument)+'!'
raise ValueError(msg)
# If a supported instrument is not detected, print the following error message
# and raise an exception.
msg = 'Instrument: ' + str(_instrument) + '/' + str(_detector) + ' not yet supported!'
raise ValueError(msg)
|
Factory function to return appropriate imageObject class instance
|
entailment
|
def processFilenames(input=None,output=None,infilesOnly=False):
"""Process the input string which contains the input file information and
return a filelist,output
"""
ivmlist = None
oldasndict = None
if input is None:
print("No input files provided to processInput")
raise ValueError
if not isinstance(input, list) and ('_asn' in input or '_asc' in input):
# Input is an association table
# Get the input files, and run makewcs on them
oldasndict = asnutil.readASNTable(input, prodonly=infilesOnly)
if not infilesOnly:
if output in ["",None,"None"]:
output = oldasndict['output'].lower() # insure output name is lower case
asnhdr = fits.getheader(input, memmap=False)
# Only perform duplication check if not already completed...
dupcheck = asnhdr.get('DUPCHECK',default="PERFORM") == "PERFORM"
#filelist = [fileutil.buildRootname(fname) for fname in oldasndict['order']]
filelist = buildASNList(oldasndict['order'],input,check_for_duplicates=dupcheck)
elif (not isinstance(input, list)) and \
(input[0] == '@') :
# input is an @ file
f = open(input[1:])
# Read the first line in order to determine whether
# IVM files have been specified in a second column...
line = f.readline()
f.close()
# Parse the @-file with irafglob to extract the input filename
filelist = irafglob.irafglob(input, atfile=util.atfile_sci)
# If there is a second column...
if len(line.split()) == 2:
# ...parse out the names of the IVM files as well
ivmlist = irafglob.irafglob(input, atfile=util.atfile_ivm)
if output in ['',None,"None"]:
if len(filelist) == 1:
output = fileutil.buildNewRootname(filelist[0])
else:
output = 'final'
else:
#input is a string or a python list
try:
filelist, output = parseinput.parseinput(input, outputname=output)
if output in ['',None,"None"]:
if len(filelist) == 1:
output = fileutil.buildNewRootname(filelist[0])
else:
output = 'final'
if not isinstance(input, list):
filelist.sort()
except IOError: raise
# sort the list of input files
# this ensures the list of input files has the same order on all platforms
# it can have ifferent order because listdir() uses inode order, not unix type order
#filelist.sort()
return filelist, output, ivmlist, oldasndict
|
Process the input string which contains the input file information and
return a filelist,output
|
entailment
|
def process_input(input, output=None, ivmlist=None, updatewcs=True,
prodonly=False, wcskey=None, **workinplace):
"""
Create the full input list of filenames after verifying and converting
files as needed.
"""
newfilelist, ivmlist, output, oldasndict, origflist = buildFileListOrig(
input, output=output, ivmlist=ivmlist, wcskey=wcskey,
updatewcs=updatewcs, **workinplace)
if not newfilelist:
buildEmptyDRZ(input, output)
return None, None, output
# run all WCS updating -- Now done in buildFileList
#pydr_input = _process_input_wcs(newfilelist, wcskey, updatewcs)
pydr_input = newfilelist
# AsnTable will handle the case when output==None
if not oldasndict:# and output is not None:
oldasndict = asnutil.ASNTable(pydr_input, output=output)
oldasndict.create()
asndict = update_member_names(oldasndict, pydr_input)
asndict['original_file_names'] = origflist
# Build output filename
drz_extn = '_drz.fits'
for img in newfilelist:
# special case logic to automatically recognize when _flc.fits files
# are provided as input and produce a _drc.fits file instead
if '_flc.fits' in img:
drz_extn = '_drc.fits'
break
if output in [None,'']:
output = fileutil.buildNewRootname(asndict['output'],
extn=drz_extn)
else:
if '.fits' in output.lower():
pass
elif drz_extn[:4] not in output.lower():
output = fileutil.buildNewRootname(output, extn=drz_extn)
log.info('Setting up output name: %s' % output)
return asndict, ivmlist, output
|
Create the full input list of filenames after verifying and converting
files as needed.
|
entailment
|
def _process_input_wcs(infiles, wcskey, updatewcs):
"""
This is a subset of process_input(), for internal use only. This is the
portion of input handling which sets/updates WCS data, and is a performance
hit - a target for parallelization. Returns the expanded list of filenames.
"""
# Run parseinput though it's likely already been done in processFilenames
outfiles = parseinput.parseinput(infiles)[0]
# Disable parallel processing here for now until hardware I/O gets "wider".
# Since this part is IO bound, parallelizing doesn't help more than a little
# in most cases, and may actually slow this down on some desktop nodes.
# cfgval_num_cores = None # get this from paramDict
# pool_size = util.get_pool_size(cfgval_num_cores, len(outfiles))
pool_size = 1
# do the WCS updating
if wcskey in ['', ' ', 'INDEF', None]:
if updatewcs:
log.info('Updating input WCS using "updatewcs"')
else:
log.info('Resetting input WCS to be based on WCS key = %s' % wcskey)
if pool_size > 1:
log.info('Executing %d parallel workers' % pool_size)
subprocs = []
for fname in outfiles:
p = multiprocessing.Process(target=_process_input_wcs_single,
name='processInput._process_input_wcs()', # for err msgs
args=(fname, wcskey, updatewcs) )
subprocs.append(p)
mputil.launch_and_wait(subprocs, pool_size) # blocks till all done
else:
log.info('Executing serially')
for fname in outfiles:
_process_input_wcs_single(fname, wcskey, updatewcs)
return outfiles
|
This is a subset of process_input(), for internal use only. This is the
portion of input handling which sets/updates WCS data, and is a performance
hit - a target for parallelization. Returns the expanded list of filenames.
|
entailment
|
def _process_input_wcs_single(fname, wcskey, updatewcs):
"""
See docs for _process_input_wcs.
This is separated to be spawned in parallel.
"""
if wcskey in ['', ' ', 'INDEF', None]:
if updatewcs:
uw.updatewcs(fname, checkfiles=False)
else:
numext = fileutil.countExtn(fname)
extlist = []
for extn in range(1, numext + 1):
extlist.append(('SCI', extn))
if wcskey in string.ascii_uppercase:
wkey = wcskey
wname = ' '
else:
wname = wcskey
wkey = ' '
altwcs.restoreWCS(fname, extlist, wcskey=wkey, wcsname=wname)
# make an asn table at the end
# Make sure there is a WCSCORR table for each input image
if wcskey not in ['', ' ', 'INDEF', None] or updatewcs:
wcscorr.init_wcscorr(fname)
|
See docs for _process_input_wcs.
This is separated to be spawned in parallel.
|
entailment
|
def buildFileList(input, output=None, ivmlist=None,
wcskey=None, updatewcs=True, **workinplace):
"""
Builds a file list which has undergone various instrument-specific
checks for input to MultiDrizzle, including splitting STIS associations.
"""
newfilelist, ivmlist, output, oldasndict, filelist = \
buildFileListOrig(input=input, output=output, ivmlist=ivmlist,
wcskey=wcskey, updatewcs=updatewcs, **workinplace)
return newfilelist, ivmlist, output, oldasndict
|
Builds a file list which has undergone various instrument-specific
checks for input to MultiDrizzle, including splitting STIS associations.
|
entailment
|
def buildFileListOrig(input, output=None, ivmlist=None,
wcskey=None, updatewcs=True, **workinplace):
"""
Builds a file list which has undergone various instrument-specific
checks for input to MultiDrizzle, including splitting STIS associations.
Compared to buildFileList, this version returns the list of the
original file names as specified by the user (e.g., before GEIS->MEF, or
WAIVER FITS->MEF conversion).
"""
# NOTE: original file name is required in order to correctly associate
# user catalog files (e.g., user masks to be used with 'skymatch') with
# corresponding imageObjects.
filelist, output, ivmlist, oldasndict = processFilenames(input,output)
# verify that all input images specified can be updated as needed
filelist = util.verifyFilePermissions(filelist)
if filelist is None or len(filelist) == 0:
return None, None, None, None, None
manageInputCopies(filelist,**workinplace)
# to keep track of the original file names we do the following trick:
# pack filelist with the ivmlist using zip and later unpack the zipped list.
#
# NOTE: this required a small modification of the checkStisFiles function
# in stsci.tools.check_files to be able to handle ivmlists that are tuples.
if ivmlist is None:
ivmlist = len(filelist)*[None]
else:
assert(len(filelist) == len(ivmlist)) #TODO: remove after debugging
ivmlist = list(zip(ivmlist,filelist))
# Check format of FITS files - convert Waiver/GEIS to MEF if necessary
filelist, ivmlist = check_files.checkFITSFormat(filelist, ivmlist)
# check for non-polynomial distortion correction
if not updatewcs:
# with updatewcs turned on, any problems will get resolved
# so we do not need to be concerned about the state of the DGEOFILEs
filelist = checkDGEOFile(filelist)
# run all WCS updating
updated_input = _process_input_wcs(filelist, wcskey, updatewcs)
newfilelist, ivmlist = check_files.checkFiles(updated_input, ivmlist)
if updatewcs:
uw.updatewcs(','.join(set(newfilelist) - set(filelist)))
if len(ivmlist) > 0:
ivmlist, filelist = list(zip(*ivmlist))
else:
filelist = [] # insure that both filelist and ivmlist are defined as empty lists
return newfilelist, ivmlist, output, oldasndict, filelist
|
Builds a file list which has undergone various instrument-specific
checks for input to MultiDrizzle, including splitting STIS associations.
Compared to buildFileList, this version returns the list of the
original file names as specified by the user (e.g., before GEIS->MEF, or
WAIVER FITS->MEF conversion).
|
entailment
|
def buildASNList(rootnames, asnname, check_for_duplicates=True):
"""
Return the list of filenames for a given set of rootnames
"""
# Recognize when multiple valid inputs with the same rootname are present
# this would happen when both CTE-corrected (_flc) and non-CTE-corrected (_flt)
# products are in the same directory as an ASN table
filelist, duplicates = checkForDuplicateInputs(rootnames)
if check_for_duplicates and duplicates:
# Build new ASN tables for each set of input files
origasn = changeSuffixinASN(asnname, 'flt')
dupasn = changeSuffixinASN(asnname, 'flc')
errstr = 'ERROR:\nMultiple valid input files found:\n'
for fname, dname in zip(filelist, duplicates):
errstr += ' %s %s\n' % (fname, dname)
errstr += ('\nNew association files have been generated for each '
'version of these files.\n %s\n %s\n\nPlease '
're-start astrodrizzle using of these new ASN files or '
'use widlcards for the input to only select one type of '
'input file.' % (dupasn, origasn))
print(textutil.textbox(errstr), file=sys.stderr)
# generate new ASN files for each case,
# report this case of duplicate inputs to the user then quit
raise ValueError
return filelist
|
Return the list of filenames for a given set of rootnames
|
entailment
|
def changeSuffixinASN(asnfile, suffix):
"""
Create a copy of the original asn file and change the name of all members
to include the suffix.
"""
# Start by creating a new name for the ASN table
_new_asn = asnfile.replace('_asn.fits','_'+suffix+'_asn.fits')
if os.path.exists(_new_asn):
os.remove(_new_asn)
# copy original ASN table to new table
shutil.copy(asnfile,_new_asn)
# Open up the new copy and convert all MEMNAME's to include suffix
fasn = fits.open(_new_asn, mode='update', memmap=False)
fasn[0].header['DUPCHECK'] = "COMPLETE"
newdata = fasn[1].data.tolist()
for i in range(len(newdata)):
val = newdata[i][0].decode(encoding='UTF-8').strip()
if 'prod' not in newdata[i][1].decode(encoding='UTF-8').lower():
val += '_'+suffix
newdata[i] = (val,newdata[i][1].strip(),newdata[i][2])
# Redefine dtype to support longer strings for MEMNAME
new_dtype = []
d = fasn[1].data.dtype
msize = d.descr[0][1][1:]
new_size = int(msize[1:])+8
mtype = msize[0]
new_dtype.append((d.descr[0][0],d.descr[0][1].replace(msize,'{}{}'.format(mtype,new_size))))
new_dtype.append(d.descr[1])
new_dtype.append(d.descr[2])
# Assign newly created, reformatted array to extension
newasn = np.array(newdata,dtype=new_dtype)
fasn[1].data = newasn
fasn.close()
return _new_asn
|
Create a copy of the original asn file and change the name of all members
to include the suffix.
|
entailment
|
def checkForDuplicateInputs(rootnames):
"""
Check input files specified in ASN table for duplicate versions with
multiple valid suffixes (_flt and _flc, for example).
"""
flist = []
duplist = []
for fname in rootnames:
# Look for any recognized CTE-corrected products
f1 = fileutil.buildRootname(fname,ext=['_flc.fits'])
f2 = fileutil.buildRootname(fname)
flist.append(f2)
if os.path.exists(f1) and f1 != f2:
# More than 1 valid input found for this rootname
duplist.append(f1)
return flist,duplist
|
Check input files specified in ASN table for duplicate versions with
multiple valid suffixes (_flt and _flc, for example).
|
entailment
|
def resetDQBits(imageObjectList, cr_bits_value=4096):
"""Reset the CR bit in each input image's DQ array"""
if cr_bits_value > 0:
for img in imageObjectList:
for chip in range(1,img._numchips+1,1):
sci_chip = img._image[img.scienceExt,chip]
resetbits.reset_dq_bits(sci_chip.dqfile, cr_bits_value,
extver=chip, extname=sci_chip.dq_extn)
|
Reset the CR bit in each input image's DQ array
|
entailment
|
def update_member_names(oldasndict, pydr_input):
"""
Update names in a member dictionary.
Given an association dictionary with rootnames and a list of full
file names, it will update the names in the member dictionary to
contain '_*' extension. For example a rootname of 'u9600201m' will
be replaced by 'u9600201m_c0h' making sure that a MEf file is passed
as an input and not the corresponding GEIS file.
"""
omembers = oldasndict['members'].copy()
nmembers = {}
translated_names = [f.split('.fits')[0] for f in pydr_input]
newkeys = [fileutil.buildNewRootname(file) for file in pydr_input]
keys_map = list(zip(newkeys, pydr_input))
for okey, oval in list(omembers.items()):
if okey in newkeys:
nkey = pydr_input[newkeys.index(okey)]
nmembers[nkey.split('.fits')[0]] = oval
oldasndict.pop('members')
# replace should be always True to cover the case when flt files were removed
# and the case when names were translated
oldasndict.update(members=nmembers, replace=True)
oldasndict['order'] = translated_names
return oldasndict
|
Update names in a member dictionary.
Given an association dictionary with rootnames and a list of full
file names, it will update the names in the member dictionary to
contain '_*' extension. For example a rootname of 'u9600201m' will
be replaced by 'u9600201m_c0h' making sure that a MEf file is passed
as an input and not the corresponding GEIS file.
|
entailment
|
def manageInputCopies(filelist, **workinplace):
"""
Creates copies of all input images in a sub-directory.
The copies are made prior to any processing being done to the images at all,
including updating the WCS keywords. If there are already copies present,
they will NOT be overwritten, but instead will be used to over-write the
current working copies.
"""
# Find out what directory is being used for processing
workingdir = os.getcwd()
# Only create sub-directory for copies of inputs, if copies are requested
# Create name of sub-directory for copies
origdir = os.path.join(workingdir,'OrIg_files')
if workinplace['overwrite'] or workinplace['preserve']:
# if sub-directory does not exist yet, create it
if not os.path.exists(origdir):
os.mkdir(origdir)
printMsg = True
# check to see if copies already exist for each file
for fname in filelist:
copymade = False # If a copy is made, no need to restore
copyname = os.path.join(origdir,fname)
short_copyname = os.path.join('OrIg_files',fname)
if workinplace['overwrite']:
print('Forcibly archiving original of: ',fname, 'as ',short_copyname)
# make a copy of the file in the sub-directory
if os.path.exists(copyname): os.chmod(copyname, 438) # octal 666
shutil.copy(fname,copyname)
os.chmod(copyname,292) # octal 444 makes files read-only
if printMsg:
print('\nTurning OFF "preserve" and "restore" actions...\n')
printMsg = False # We only need to print this one time...
copymade = True
if (workinplace['preserve'] and not os.path.exists(copyname)) \
and not workinplace['overwrite']:
# Preserving a copy of the input, but only if not already archived
print('Preserving original of: ',fname, 'as ',short_copyname)
# make a copy of the file in the sub-directory
shutil.copy(fname,copyname)
os.chmod(copyname,292) # octal 444 makes files read-only
copymade = True
if 'restore' in workinplace and not copymade:
if (os.path.exists(copyname) and workinplace['restore']) and not workinplace['overwrite']:
print('Restoring original input for ',fname,' from ',short_copyname)
# replace current files with original version
os.chmod(fname, 438) # octal 666
shutil.copy(copyname, fname)
os.chmod(fname, 438)
|
Creates copies of all input images in a sub-directory.
The copies are made prior to any processing being done to the images at all,
including updating the WCS keywords. If there are already copies present,
they will NOT be overwritten, but instead will be used to over-write the
current working copies.
|
entailment
|
def buildEmptyDRZ(input, output):
"""
Create an empty DRZ file.
This module creates an empty DRZ file in a valid FITS format so that the HST
pipeline can handle the Multidrizzle zero expossure time exception
where all data has been excluded from processing.
Parameters
----------
input : str
filename of the initial input to process_input
output : str
filename of the default empty _drz.fits file to be generated
"""
# Identify the first input image
inputfile = parseinput.parseinput(input)[0]
if not inputfile:
print('\n******* ERROR *******', file=sys.stderr)
print(
'No input file found! Check specification of parameter '
'"input". ', file=sys.stderr)
print('Quitting...', file=sys.stderr)
print('******* ***** *******\n', file=sys.stderr)
return # raise IOError, "No input file found!"
# Set up output file here...
if output is None:
if len(input) == 1:
oname = fileutil.buildNewRootname(input[0])
else:
oname = 'final'
output = fileutil.buildNewRootname(oname, extn='_drz.fits')
else:
if 'drz' not in output:
output = fileutil.buildNewRootname(output, extn='_drz.fits')
log.info('Setting up output name: %s' % output)
# Open the first image (of the excludedFileList?) to use as a template to build
# the DRZ file.
try :
log.info('Building empty DRZ file from %s' % inputfile[0])
img = fits.open(inputfile[0], memmap=False)
except:
raise IOError('Unable to open file %s \n' % inputfile)
# Create the fitsobject
fitsobj = fits.HDUList()
# Copy the primary header
hdu = img[0].copy()
fitsobj.append(hdu)
# Modify the 'NEXTEND' keyword of the primary header to 3 for the
#'sci, wht, and ctx' extensions of the newly created file.
fitsobj[0].header['NEXTEND'] = 3
# Create the 'SCI' extension
hdu = fits.ImageHDU(header=img['sci', 1].header.copy())
hdu.header['EXTNAME'] = 'SCI'
fitsobj.append(hdu)
# Create the 'WHT' extension
hdu = fits.ImageHDU(header=img['sci', 1].header.copy())
hdu.header['EXTNAME'] = 'WHT'
fitsobj.append(hdu)
# Create the 'CTX' extension
hdu = fits.ImageHDU(header=img['sci', 1].header.copy())
hdu.header['EXTNAME'] = 'CTX'
fitsobj.append(hdu)
# Add HISTORY comments explaining the creation of this file.
fitsobj[0].header.add_history("** AstroDrizzle has created this empty "
"DRZ product because**")
fitsobj[0].header.add_history("** all input images were excluded from "
"processing.**")
# Change the filename in the primary header to reflect the name of the output
# filename.
fitsobj[0].header['FILENAME'] = str(output) # +"_drz.fits"
# Change the ROOTNAME keyword to the ROOTNAME of the output PRODUCT
fitsobj[0].header['ROOTNAME'] = str(output.split('_drz.fits')[0])
# Modify the ASN_MTYP keyword to contain "PROD-DTH" so it can be properly
# ingested into the archive catalog.
# stis has this keyword in the [1] header, so I am directing the code
#t o first look in the primary, then the 1
try:
fitsobj[0].header['ASN_MTYP'] = 'PROD-DTH'
except:
fitsobj[1].header['ASN_MTYP'] = 'PROD-DTH'
# If the file is already on disk delete it and replace it with the
# new file
dirfiles = os.listdir(os.curdir)
if dirfiles.count(output) > 0:
os.remove(output)
log.info(" Replacing %s..." % output)
# Write out the empty DRZ file
fitsobj.writeto(output)
print(textutil.textbox(
'ERROR:\nAstroDrizzle has created an empty DRZ product because all '
'input images were excluded from processing or a user requested the '
'program to stop.') + '\n', file=sys.stderr)
return
|
Create an empty DRZ file.
This module creates an empty DRZ file in a valid FITS format so that the HST
pipeline can handle the Multidrizzle zero expossure time exception
where all data has been excluded from processing.
Parameters
----------
input : str
filename of the initial input to process_input
output : str
filename of the default empty _drz.fits file to be generated
|
entailment
|
def checkDGEOFile(filenames):
"""
Verify that input file has been updated with NPOLFILE
This function checks for the presence of 'NPOLFILE' kw in the primary header
when 'DGEOFILE' kw is present and valid (i.e. 'DGEOFILE' is not blank or 'N/A').
It handles the case of science files downloaded from the archive before the new
software was installed there.
If 'DGEOFILE' is present and 'NPOLFILE' is missing, print a message and let the user
choose whether to (q)uit and update the headers or (c)ontinue and run astrodrizzle
without the non-polynomial correction.
'NPOLFILE' will be populated in the pipeline before astrodrizzle is run.
In the case of WFPC2 the old style dgeo files are used to create detector to image
correction at runtime.
Parameters
----------
filenames : list of str
file names of all images to be checked
"""
msg = """
A 'DGEOFILE' keyword is present in the primary header but 'NPOLFILE' keyword was not found.
This version of the software uses a new format for the residual distortion DGEO files.
Please consult the instrument web pages for which reference files to download.
A small (new style) dgeofile is needed ('_npl.fits' extension) and possibly a
detector to image correction file ('_d2i.fits' extension).
The names of these files must be added to the primary header either using the task XXXX
or manually, for example:
hedit {0:s}[0] npolfile fname_npl.fits add+
hedit {0:s}[0] d2imfile fname_d2i.fits add+
where fname_npl.fits is the name of the new style dgeo file and fname_d2i.fits is
the name of the detector to image correction. After adding these keywords to the
primary header, updatewcs must be run to update the science files:
from stwcs import updatewcs
updatewcs.updatewcs("{0:s}")
Alternatively you may choose to run astrodrizzle without DGEO and detector to image correction.
To stop astrodrizzle and update the dgeo files, type 'q'.
To continue running astrodrizzle without the non-polynomial distortion correction, type 'c':
"""
short_msg = """
To stop astrodrizzle and update the dgeo files, type 'q'.
To continue running astrodrizzle without the non-polynomial distortion correction, type 'c':
"""
for inputfile in filenames:
try:
dgeofile = fits.getval(inputfile, 'DGEOFILE', memmap=False)
except KeyError:
continue
if dgeofile not in ["N/A", "n/a", ""]:
message = msg.format(inputfile)
try:
npolfile = fits.getval(inputfile, 'NPOLFILE', memmap=False)
except KeyError:
ustop = userStop(message)
while ustop is None:
ustop = userStop(short_msg)
if ustop:
return None
return filenames
|
Verify that input file has been updated with NPOLFILE
This function checks for the presence of 'NPOLFILE' kw in the primary header
when 'DGEOFILE' kw is present and valid (i.e. 'DGEOFILE' is not blank or 'N/A').
It handles the case of science files downloaded from the archive before the new
software was installed there.
If 'DGEOFILE' is present and 'NPOLFILE' is missing, print a message and let the user
choose whether to (q)uit and update the headers or (c)ontinue and run astrodrizzle
without the non-polynomial correction.
'NPOLFILE' will be populated in the pipeline before astrodrizzle is run.
In the case of WFPC2 the old style dgeo files are used to create detector to image
correction at runtime.
Parameters
----------
filenames : list of str
file names of all images to be checked
|
entailment
|
def _setDefaults(input_dict={}):
""" Define full set of default values for unit-testing this module.[OBSOLETE]"""
paramDict = {
'input':'*flt.fits',
'output':None,
'mdriztab':None,
'refimage':None,
'runfile':None,
'workinplace':False,
'updatewcs':True,
'proc_unit':'native',
'coeffs':True,
'context':False,
'clean':True,
'group':None,
'ra':None,
'dec':None,
'build':True,
'gain':None,
'gnkeyword':None,
'readnoise':None,
'rnkeyword':None,
'exptime':None,
'expkeyword':None,
'crbitval':4096,
'static':True,
'static_sig':4.0,
'skysub':True,
'skymethod':"globalmin+match",
'skystat':"median",
'skywidth':0.1,
'skylower':None,
'skyupper':None,
'skyclip':5,
'skylsigma':4.0,
'skyusigma':4.0,
"skymask_cat":"",
"use_static":True,
"sky_bits":0,
"skyuser":"",
"skyfile":"",
'driz_separate':True,
'driz_sep_outnx':None,
'driz_sep_outny':None,
'driz_sep_crpix1':None,
'driz_sep_crpix2':None,
'driz_sep_kernel':'turbo',
'driz_sep_scale':None,
'driz_sep_pixfrac':1.0,
'driz_sep_rot':None,
'driz_sep_fillval':None,
'driz_sep_bits':0,
'median':True,
'median_newmasks':True,
'combine_type':"minmed",
'combine_nsigma':"4 3",
'combine_nlow':0,
'combine_nhigh':1,
'combine_lthresh':None,
'combine_hthresh':None,
'combine_grow':1,
'blot':True,
'blot_interp':'poly5',
'blot_sinscl':1.0,
'driz_cr':True,
'driz_cr_corr':False,
'driz_cr_snr':"3.5 3.0",
'driz_cr_scale':"1.2 0.7",
'driz_cr_cteg':0,
'driz_cr_grow':1,
'driz_combine':True,
'final_wht_type':"EXP",
'final_outnx':None,
'final_outny':None,
'final_crpix1':None,
'final_crpix2':None,
'final_kernel':'square',
'final_scale':None,
'final_pixfrac':1.0,
'final_rot':None,
'final_fillval':None,
'final_bits':0}
paramDict.update(input_dict)
print('\nUser Input Parameters for Init Step:')
util.printParams(paramDict)
return paramDict
|
Define full set of default values for unit-testing this module.[OBSOLETE]
|
entailment
|
def getdarkimg(self,chip):
"""
Return an array representing the dark image for the detector.
Returns
-------
dark : array
The dark array in the same shape as the image with **units of cps**.
"""
# Read the temperature dependeant dark file. The name for the file is taken from
# the TEMPFILE keyword in the primary header.
tddobj = readTDD.fromcalfile(self.name)
if tddobj is None:
return np.ones(self.full_shape, dtype=self.image_dtype) * self.getdarkcurrent()
else:
# Create Dark Object from AMPGLOW and Lineark Dark components
darkobj = tddobj.getampglow() + tddobj.getlindark()
# Return the darkimage taking into account an subarray information available
return darkobj[self.ltv2:self.size2,self.ltv1:self.size1]
|
Return an array representing the dark image for the detector.
Returns
-------
dark : array
The dark array in the same shape as the image with **units of cps**.
|
entailment
|
def isCountRate(self):
"""
isCountRate: Method or IRInputObject used to indicate if the
science data is in units of counts or count rate. This method
assumes that the keyword 'BUNIT' is in the header of the input
FITS file.
"""
has_bunit = False
if 'BUNIT' in self._image['sci',1].header :
has_bunit = True
countrate = False
if (self._image[0].header['UNITCORR'].strip() == 'PERFORM') or \
(has_bunit and self._image['sci',1].header['bunit'].find('/') != -1) :
countrate = True
return countrate
|
isCountRate: Method or IRInputObject used to indicate if the
science data is in units of counts or count rate. This method
assumes that the keyword 'BUNIT' is in the header of the input
FITS file.
|
entailment
|
def setInstrumentParameters(self, instrpars):
""" This method overrides the superclass to set default values into
the parameter dictionary, in case empty entries are provided.
"""
pri_header = self._image[0].header
self.proc_unit = instrpars['proc_unit']
if self._isNotValid (instrpars['gain'], instrpars['gnkeyword']):
instrpars['gnkeyword'] = 'ADCGAIN' #gain has been hardcoded below
if self._isNotValid (instrpars['rdnoise'], instrpars['rnkeyword']):
instrpars['rnkeyword'] = None
if self._isNotValid (instrpars['exptime'], instrpars['expkeyword']):
instrpars['expkeyword'] = 'EXPTIME'
for chip in self.returnAllChips(extname=self.scienceExt):
chip._gain= 5.4 #measured gain
chip._rdnoise = self.getInstrParameter(
instrpars['rdnoise'], pri_header, instrpars['rnkeyword']
)
chip._exptime = self.getInstrParameter(
instrpars['exptime'], pri_header, instrpars['expkeyword']
)
if chip._gain is None or self._exptime is None:
print('ERROR: invalid instrument task parameter')
raise ValueError
# We need to treat Read Noise as a special case since it is
# not populated in the NICMOS primary header
if chip._rdnoise is None:
chip._rdnoise = self._getDefaultReadnoise()
chip._darkrate=self._getDarkRate()
chip.darkcurrent = self.getdarkcurrent()
chip._effGain = chip._gain
# this is used in the static mask, static mask name also defined
# here, must be done after outputNames
self._assignSignature(chip._chip)
# Convert the science data to electrons if specified by the user.
self.doUnitConversions()
|
This method overrides the superclass to set default values into
the parameter dictionary, in case empty entries are provided.
|
entailment
|
def photeq(files='*_flt.fits', sciext='SCI', errext='ERR',
ref_phot=None, ref_phot_ext=None,
phot_kwd='PHOTFLAM', aux_phot_kwd='PHOTFNU',
search_primary=True,
readonly=True, clobber=False, logfile='photeq.log'):
"""
Adjust data values of images by equalizing each chip's PHOTFLAM value
to a single common value so that all chips can be treated equally
by ``AstroDrizzle``.
Parameters
----------
files : str (Default = ``'*_flt.fits'``)
A string containing one of the following:
* a comma-separated list of valid science image file names,
e.g.: ``'j1234567q_flt.fits, j1234568q_flt.fits'``;
* an @-file name, e.g., ``'@files_to_match.txt'``. See notes
section for details on the format of the @-files.
.. note::
**Valid science image file names** are:
* file names of existing FITS, GEIS, or WAIVER FITS files;
* partial file names containing wildcard characters, e.g.,
``'*_flt.fits'``;
* Association (ASN) tables (must have ``_asn``, or ``_asc``
suffix), e.g., ``'j12345670_asn.fits'``.
sciext : str (Default = 'SCI')
Extension *name* of extensions whose data and/or headers should
be corrected.
errext : str (Default = 'ERR')
Extension *name* of the extensions containing corresponding error
arrays. Error arrays are corrected in the same way as science data.
ref_phot : float, None (Default = None)
A number indicating the new value of PHOTFLAM or PHOTFNU
(set by 'phot_kwd') to which the data should be adjusted.
ref_phot_ext : int, str, tuple, None (Default = None)
Extension from which the `photeq` should get the reference photometric
value specified by the `phot_kwd` parameter. This parameter is ignored
if `ref_phot` **is not** `None`. When `ref_phot_ext` is `None`, then
the reference inverse sensitivity value will be picked from the
first `sciext` of the first input image containing `phot_kwd`.
phot_kwd : str (Default = 'PHOTFLAM')
Specifies the primary keyword which contains inverse sensitivity
(e.g., PHOTFLAM). It is used to compute conversion factors by
which data should be rescaled.
aux_phot_kwd : str, None, list of str (Default = 'PHOTFNU')
Same as `phot_kwd` but describes *other* photometric keyword(s)
that should be corrected by inverse of the scale factor used to correct
data. These keywords are *not* used to compute conversion factors.
Multiple keywords can be specified as a Python list of strings:
``['PHOTFNU', 'PHOTOHMY']``.
.. note::
If specifying multiple secondary photometric keywords in the TEAL
interface, use a comma-separated list of keywords.
search_primary : bool (Default = True)
Specifies whether to first search the primary header for the
presence of `phot_kwd` keyword and compute conversion factor based on
that value. This is (partially) ignored when `ref_phot` is not `None` in
the sense that the value specified by `ref_phot` will be used as the
reference *but* in all images primary will be searched for `phot_kwd`
and `aux_phot_kwd` and those values will be corrected
(if ``search_primary=True``).
readonly : bool (Default = True)
If `True`, `photeq` will not modify input files (nevertheless, it will
convert input GEIS or WAVERED FITS files to MEF and could overwrite
existing MEF files if `clobber` is set to `True`).
The (console or log file) output however will be identical to the case
when ``readonly=False`` and it can be examined before applying these
changes to input files.
clobber : bool (Default = False)
Overwrite existing MEF files when converting input WAVERED FITS or GEIS
to MEF.
logfile : str, None (Default = 'photeq.log')
File name of the log file.
Notes
-----
By default, `photeq` will search for the first inverse sensitivity
value (given by the header keyword specified by the `phot_kwd` parameter,
e.g., PHOTFLAM or PHOTFNU) found in the input images and it will equalize
all other images to this reference value.
It is possible to tell `photeq` to look for the reference inverse
sensitivity value only in a specific extension of input images, e.g.: 3,
('sci',3), etc. This can be done by setting `ref_phot_ext` to a specific
extension. This may be useful, for example, for WFPC2 images: WF3 chip was
one of the better calibrated chips, and so, if one prefers to have
inverse sensitivities equalized to the inverse sensitivity of the WF3 chip,
one can set ``ref_phot_ext=3``.
Alternatively, one can provide their own reference inverse sensitivity
value to which all other images should be "equalized" through the
parameter `ref_phot`.
.. note::
Default parameter values (except for `files`, `readonly`, and `clobber`)
should be acceptable for most HST images.
.. warning::
If images are intended to be used with ``AstroDrizzle``, it is
recommended that sky background measurement be performed on "equalized"
images as the `photeq` is not aware of sky user keyword in the image
headers and thus it cannot correct sky values already recorded in the
headers.
Examples
--------
#. In most cases the default parameters should suffice:
>>> from drizzlepac import photeq
>>> photeq.photeq(files='*_flt.fits', readonly=False)
#. If the re-calibration needs to be done on PHOTFNU rather than
PHOTFLAM, then:
>>> photeq.photeq(files='*_flt.fits', ref_phot='PHOTFNU',
... aux_phot_kwd='PHOTFLAM')
#. If for WFPC2 data one desires that PHOTFLAM from WF3 be used as the
reference in WFPC2 images, then:
>>> photeq.photeq(files='*_flt.fits', ref_phot_ext=3) # or ('sci',3)
"""
# Time it
runtime_begin = datetime.now()
# check that input file name is a string:
if not isinstance(files, str):
raise TypeError("Argument 'files' must be a comma-separated list of "
" file names")
# Set-up log files:
if isinstance(logfile, str):
# first, in case there are any "leftover" file handlers,
# close and remove them:
for h in _log.handlers:
if h is not _sh_log and isinstance(h, logging.FileHandler):
h.close()
_log.removeHandler(h)
# create file handler:
log_formatter = logging.Formatter('[%(levelname)s:] %(message)s')
log_file_handler = logging.FileHandler(logfile)
log_file_handler.setFormatter(log_formatter)
# add log_file_handler to logger
_log.addHandler(log_file_handler)
elif logfile is not None:
raise TypeError("Unsupported 'logfile' type")
# BEGIN:
_mlinfo("***** {0} started on {1}".format(__taskname__, runtime_begin))
_mlinfo(" Version {0} ({1})".format(__version__, __version_date__))
# check that extension names are strings (or None for error ext):
if sciext is None:
sci_ext4parse = '*'
ext2get = None
else:
if not isinstance(sciext, str):
raise TypeError("Argument 'sciext' must be a string or None")
sciext = sciext.strip()
if sciext.upper() == 'PRIMARY':
sciext = sciext.upper()
ext2get = (sciext, 1)
else:
ext2get = (sciext, '*')
sci_ext4parse = ext2get
if errext is not None and not isinstance(errext, str):
raise TypeError("Argument 'errext' must be a string or None")
# check that phot_kwd is supported:
if not isinstance(phot_kwd, str):
raise TypeError("Argument 'phot_kwd' must be a string")
phot_kwd = phot_kwd.strip().upper()
# check that ref_phot_ext has correct type:
if ref_phot_ext is not None and not \
(isinstance(ref_phot_ext, int) or isinstance(ref_phot_ext, str) \
or (isinstance(ref_phot_ext, tuple) and len(ref_phot_ext) == 2 \
and isinstance(ref_phot_ext[0], str) and \
isinstance(ref_phot_ext[1], int))):
raise TypeError("Unsupported 'ref_phot_ext' type")
if isinstance(ref_phot_ext, str):
ref_phot_ext = (ref_phot_ext, 1)
if aux_phot_kwd is None:
aux_phot_kwd = []
elif isinstance(aux_phot_kwd, str):
aux_phot_kwd = [aux_phot_kwd.strip().upper()]
if phot_kwd == aux_phot_kwd:
raise ValueError("Auxiliary photometric keyword must be different "
"from the main photometric keyword 'phot_kwd'.")
elif hasattr(aux_phot_kwd, '__iter__'):
if not all([isinstance(phot, str) for phot in aux_phot_kwd]):
raise TypeError("Argument 'aux_phot_kwd' must be a string, list of "
"strings, or None")
aux_phot_kwd = [phot.strip().upper() for phot in aux_phot_kwd]
if ref_phot in aux_phot_kwd:
raise ValueError("Auxiliary photometric keyword(s) must be "
"different from the main photometric keyword "
"'phot_kwd'.")
else:
raise TypeError("Argument 'aux_phot_kwd' must be a string, list of "
"strings, or None")
# read input file list:
fl = parseat.parse_cs_line(csline=files, default_ext=sci_ext4parse,
im_fmode='readonly' if readonly else 'update',
clobber=clobber, fnamesOnly=True,
doNotOpenDQ=True)
# check if user supplied file extensions, set them to the sciext,
# and warn that they will be ignored:
for f in fl:
if f.count > 1 or f.fext[0] != sci_ext4parse:
_mlwarn("WARNING: Extension specifications for file {:s} "
"will be ignored. Using all {:s} extensions instead."
.format(f.image, 'image-like' if sciext is None else \
"{:s}".format(utils.ext2str(sciext,
default_extver=None))))
# find the reference PHOTFLAM/PHOTNU:
flc = fl[:]
ref_hdu = None
ref_ext = None
ref_user = True
if ref_phot is None:
ref_user = False
for f in flc:
f.convert2ImageRef()
# get primary hdu:
pri_hdu = f.image.hdu[0]
# find all valid extensions:
if ref_phot_ext is None:
if sciext == 'PRIMARY':
extnum = [0]
else:
extnum = utils.get_ext_list(f.image, sciext)
is_pri_hdu = [f.image.hdu[ext] is pri_hdu for ext in extnum]
# if necessary, add primary header to the hdu list:
if search_primary:
try:
pri_index = is_pri_hdu.index(True)
extnum.insert(0, extnum.pop(pri_index))
except ValueError:
extnum.insert(0, 0)
else:
extnum = [ref_phot_ext]
for ext in extnum:
hdu = f.image.hdu[ext]
if phot_kwd in hdu.header:
ref_phot = hdu.header[phot_kwd]
ref_ext = ext
ref_hdu = hdu
break
if ref_phot is None:
_mlwarn("WARNING: Could not find specified inverse "
" sensitivity keyword '{:s}'\n"
" in any of the {} extensions of file '{}'.\n"
" This input file will be ignored."
.format(phot_kwd, 'image-like' if sciext is None else \
"{:s}".format(utils.ext2str(sciext,
default_extver=None)),
os.path.basename(f.image.original_fname)))
f.release_all_images()
fl.remove(f)
else:
break
if ref_phot is None:
raise RuntimeError("Could not find the inverse sensitivity keyword "
"'{:s}' in the specified headers of "
"the input image(s).\nCannot continue."
.format(phot_kwd))
aux_phot_kwd_list = ','.join(aux_phot_kwd)
_mlinfo("\nPRIMARY PHOTOMETRIC KEYWORD: {:s}".format(phot_kwd))
_mlinfo("SECONDARY PHOTOMETRIC KEYWORD(S): {:s}"
.format(aux_phot_kwd_list if aux_phot_kwd_list else 'None'))
if ref_user:
_mlinfo("REFERENCE VALUE PROVIDED BY USER: '{:s}'={}\n"
.format(phot_kwd, ref_phot))
else:
_mlinfo("REFERENCE VALUE FROM FILE: '{:s}[{:s}]'\n"
.format(os.path.basename(f.image.original_fname),
utils.ext2str(ref_ext)))
_mlinfo("REFERENCE '{:s}' VALUE IS: {}".format(phot_kwd, ref_phot))
# equalize PHOTFLAM/PHOTNU
for f in fl:
# open the file if necessary:
if f.fnamesOnly:
_mlinfo("\nProcessing file '{:s}'".format(f.image))
f.convert2ImageRef()
else:
_mlinfo("\nProcessing file '{:s}'".format(f.image.original_fname))
# first, see if photflam is in the primary header and save this value:
pri_conv = None
if search_primary:
whdu = f.image.hdu[0]
if phot_kwd in whdu.header:
_mlinfo(" * Primary header:")
if whdu is ref_hdu:
pri_conv = 1.0
_mlinfo(" - '{}' = {} found in the primary header."
.format(phot_kwd, whdu.header[phot_kwd]))
_mlinfo(" - Data conversion factor based on primary "
"header: {}".format(pri_conv))
else:
_mlinfo(" - '{}' found in the primary header."
.format(phot_kwd))
pri_conv = whdu.header[phot_kwd] / ref_phot
_mlinfo(" - Setting {:s} in the primary header to {} "
"(old value was {})"
.format(phot_kwd, ref_phot, whdu.header[phot_kwd]))
_mlinfo(" - Data conversion factor based on primary "
"header: {}".format(pri_conv))
whdu.header[phot_kwd] = ref_phot
# correct the "other" photometric keyword, if present:
if pri_conv is not None and whdu is not ref_hdu:
for aux_kwd in aux_phot_kwd:
if aux_kwd in whdu.header:
old_aux_phot = whdu.header[aux_kwd]
new_aux_phot = old_aux_phot / pri_conv
whdu.header[aux_kwd] = new_aux_phot
_mlinfo(" - Setting {:s} in the primary header "
"to {} (old value was {})"
.format(aux_kwd, new_aux_phot, old_aux_phot))
# process data and error arrays when 'sciext' was specifically set to
# 'PRIMARY':
if sciext == 'PRIMARY' and pri_conv is not None:
has_data = (hasattr(whdu, 'data') and
whdu.data is not None)
# correct data:
if has_data:
if np.issubdtype(whdu.data.dtype, np.floating):
whdu.data *= pri_conv
_mlinfo(" - Data have been multiplied by {}"
.format(pri_conv))
else:
_mlwarn("WARNING: Data not converted because it is of "
"non-floating point type.")
# correct error array:
if errext is not None:
eext = (errext, 1)
try:
whdu = f.image.hdu[eext]
except KeyError:
_mlwarn(" - WARNING: Error extension {:s} not found."
.format(utils.ext2str(eext)))
f.release_all_images()
continue
if hasattr(whdu, 'data') and whdu.data is not None:
if np.issubdtype(whdu.data.dtype, np.floating):
whdu.data *= pri_conv
_mlinfo(" - Error array (ext={}) has been "
"multiplied by {}".format(eext, pri_conv))
else:
_mlinfo(" - Error array in extension {:s} "
"contains non-floating point data.\n"
" Skipping this extension"
.format(utils.ext2str(ext)))
f.release_all_images()
continue
# find all valid extensions:
extnum = utils.get_ext_list(f.image, sciext)
for ext in extnum:
whdu = f.image.hdu[ext]
conv = None
if whdu is ref_hdu:
_mlinfo(" * EXT: {} - This is the \"reference\" extension.\n"
" Nothing to do. Skipping this extension..."
.format(ext))
continue
has_data = (hasattr(whdu, 'data') and
whdu.data is not None)
if has_data and not np.issubdtype(whdu.data.dtype, np.floating):
_mlinfo(" * EXT: {} contains non-floating point data. "
"Skipping this extension".format(ext))
# find all auxiliary photometric keywords present in the header:
paux = [aux_kwd for aux_kwd in aux_phot_kwd if aux_kwd \
in whdu.header]
if phot_kwd in whdu.header:
_mlinfo(" * EXT: {}".format(ext))
old_phot = whdu.header[phot_kwd]
conv = old_phot / ref_phot
_mlinfo(" - Setting {:s} to {} (old value was {})"
.format(phot_kwd, ref_phot, old_phot))
whdu.header[phot_kwd] = ref_phot
_mlinfo(" - Computed conversion factor for data: {}"
.format(conv))
elif pri_conv is None:
_mlinfo(" * EXT: {}".format(ext))
_mlinfo(" - '{:s} not found. Skipping this extension..."
.format(phot_kwd))
continue
else:
_mlinfo(" * EXT: {}".format(ext))
# if paux:
# print("ERROR: Primary photometric keyword ('{:s}') is "
# "missing but\n the secondary keywords ('{:s}') "
# "are present. This extension cannot be processed."
# .format(phot_kwd, ','.join(paux)))
# continue
_mlinfo(" - '{:s} not found. Using conversion factor "
"based\n on the primary header: {}"
.format(phot_kwd, pri_conv))
conv = pri_conv
# correct the "other" photometric keyword, if present:
if conv is not None:
for aux_kwd in paux:
old_aux_phot = whdu.header[aux_kwd]
new_aux_phot = old_aux_phot / conv
whdu.header[aux_kwd] = new_aux_phot
_mlinfo(" - Setting {:s} to {} (old value was {})"
.format(aux_kwd, new_aux_phot, old_aux_phot))
# correct data:
if has_data:
if conv is None:
_mlinfo(" * EXT: {}".format(ext))
if np.issubdtype(whdu.data.dtype, np.floating):
whdu.data *= conv
_mlinfo(" - Data have been multiplied by {}"
.format(conv))
else:
_mlinfo("WARNING: Non-floating point data. Data cannot "
"be re-scaled.")
# correct error array:
if errext is not None and isinstance(ext, tuple) and len(ext) == 2:
eext = (errext, ext[1])
try:
whdu = f.image.hdu[eext]
except KeyError:
continue
if hasattr(whdu, 'data') and whdu.data is not None:
if np.issubdtype(whdu.data.dtype, np.floating):
whdu.data *= conv
_mlinfo(" - Error array (ext={}) has been "
"multiplied by {}".format(eext, conv))
else:
_mlinfo(" - Error array in extension {:s} "
"contains non-floating point data.\n"
" Skipping this extension"
.format(utils.ext2str(ext)))
f.release_all_images()
_mlinfo("\nDone.")
if readonly:
_mlinfo("\nNOTE: '{:s}' was run in READONLY mode\n"
" and input image(s)' content WAS NOT MODIFIED."
.format(__taskname__))
# close all log file handlers:
for h in _log.handlers:
if h is not _sh_log and isinstance(h, logging.FileHandler):
h.close()
_log.removeHandler(h)
|
Adjust data values of images by equalizing each chip's PHOTFLAM value
to a single common value so that all chips can be treated equally
by ``AstroDrizzle``.
Parameters
----------
files : str (Default = ``'*_flt.fits'``)
A string containing one of the following:
* a comma-separated list of valid science image file names,
e.g.: ``'j1234567q_flt.fits, j1234568q_flt.fits'``;
* an @-file name, e.g., ``'@files_to_match.txt'``. See notes
section for details on the format of the @-files.
.. note::
**Valid science image file names** are:
* file names of existing FITS, GEIS, or WAIVER FITS files;
* partial file names containing wildcard characters, e.g.,
``'*_flt.fits'``;
* Association (ASN) tables (must have ``_asn``, or ``_asc``
suffix), e.g., ``'j12345670_asn.fits'``.
sciext : str (Default = 'SCI')
Extension *name* of extensions whose data and/or headers should
be corrected.
errext : str (Default = 'ERR')
Extension *name* of the extensions containing corresponding error
arrays. Error arrays are corrected in the same way as science data.
ref_phot : float, None (Default = None)
A number indicating the new value of PHOTFLAM or PHOTFNU
(set by 'phot_kwd') to which the data should be adjusted.
ref_phot_ext : int, str, tuple, None (Default = None)
Extension from which the `photeq` should get the reference photometric
value specified by the `phot_kwd` parameter. This parameter is ignored
if `ref_phot` **is not** `None`. When `ref_phot_ext` is `None`, then
the reference inverse sensitivity value will be picked from the
first `sciext` of the first input image containing `phot_kwd`.
phot_kwd : str (Default = 'PHOTFLAM')
Specifies the primary keyword which contains inverse sensitivity
(e.g., PHOTFLAM). It is used to compute conversion factors by
which data should be rescaled.
aux_phot_kwd : str, None, list of str (Default = 'PHOTFNU')
Same as `phot_kwd` but describes *other* photometric keyword(s)
that should be corrected by inverse of the scale factor used to correct
data. These keywords are *not* used to compute conversion factors.
Multiple keywords can be specified as a Python list of strings:
``['PHOTFNU', 'PHOTOHMY']``.
.. note::
If specifying multiple secondary photometric keywords in the TEAL
interface, use a comma-separated list of keywords.
search_primary : bool (Default = True)
Specifies whether to first search the primary header for the
presence of `phot_kwd` keyword and compute conversion factor based on
that value. This is (partially) ignored when `ref_phot` is not `None` in
the sense that the value specified by `ref_phot` will be used as the
reference *but* in all images primary will be searched for `phot_kwd`
and `aux_phot_kwd` and those values will be corrected
(if ``search_primary=True``).
readonly : bool (Default = True)
If `True`, `photeq` will not modify input files (nevertheless, it will
convert input GEIS or WAVERED FITS files to MEF and could overwrite
existing MEF files if `clobber` is set to `True`).
The (console or log file) output however will be identical to the case
when ``readonly=False`` and it can be examined before applying these
changes to input files.
clobber : bool (Default = False)
Overwrite existing MEF files when converting input WAVERED FITS or GEIS
to MEF.
logfile : str, None (Default = 'photeq.log')
File name of the log file.
Notes
-----
By default, `photeq` will search for the first inverse sensitivity
value (given by the header keyword specified by the `phot_kwd` parameter,
e.g., PHOTFLAM or PHOTFNU) found in the input images and it will equalize
all other images to this reference value.
It is possible to tell `photeq` to look for the reference inverse
sensitivity value only in a specific extension of input images, e.g.: 3,
('sci',3), etc. This can be done by setting `ref_phot_ext` to a specific
extension. This may be useful, for example, for WFPC2 images: WF3 chip was
one of the better calibrated chips, and so, if one prefers to have
inverse sensitivities equalized to the inverse sensitivity of the WF3 chip,
one can set ``ref_phot_ext=3``.
Alternatively, one can provide their own reference inverse sensitivity
value to which all other images should be "equalized" through the
parameter `ref_phot`.
.. note::
Default parameter values (except for `files`, `readonly`, and `clobber`)
should be acceptable for most HST images.
.. warning::
If images are intended to be used with ``AstroDrizzle``, it is
recommended that sky background measurement be performed on "equalized"
images as the `photeq` is not aware of sky user keyword in the image
headers and thus it cannot correct sky values already recorded in the
headers.
Examples
--------
#. In most cases the default parameters should suffice:
>>> from drizzlepac import photeq
>>> photeq.photeq(files='*_flt.fits', readonly=False)
#. If the re-calibration needs to be done on PHOTFNU rather than
PHOTFLAM, then:
>>> photeq.photeq(files='*_flt.fits', ref_phot='PHOTFNU',
... aux_phot_kwd='PHOTFLAM')
#. If for WFPC2 data one desires that PHOTFLAM from WF3 be used as the
reference in WFPC2 images, then:
>>> photeq.photeq(files='*_flt.fits', ref_phot_ext=3) # or ('sci',3)
|
entailment
|
def _managePsets(configobj, section_name, task_name, iparsobj=None, input_dict=None):
""" Read in parameter values from PSET-like configobj tasks defined for
source-finding algorithms, and any other PSET-like tasks under this task,
and merge those values into the input configobj dictionary.
"""
# Merge all configobj instances into a single object
configobj[section_name] = {}
# Load the default full set of configuration parameters for the PSET:
iparsobj_cfg = teal.load(task_name)
# Identify optional parameters in input_dicts that are from this
# PSET and add it to iparsobj:
if input_dict is not None:
for key in list(input_dict.keys()):
if key in iparsobj_cfg:
if iparsobj is not None and key in iparsobj:
raise DuplicateKeyError("Duplicate parameter '{:s}' "
"provided for task {:s}".format(key, task_name))
iparsobj_cfg[key] = input_dict[key]
del input_dict[key]
if iparsobj is not None:
iparsobj_cfg.update(iparsobj)
del iparsobj_cfg['_task_name_']
# merge these parameters into full set
configobj[section_name].merge(iparsobj_cfg)
|
Read in parameter values from PSET-like configobj tasks defined for
source-finding algorithms, and any other PSET-like tasks under this task,
and merge those values into the input configobj dictionary.
|
entailment
|
def edit_imagefindpars():
""" Allows the user to edit the imagefindpars configObj in a TEAL GUI
"""
teal.teal(imagefindpars.__taskname__, returnAs=None,
autoClose=True, loadOnly=False, canExecute=False)
|
Allows the user to edit the imagefindpars configObj in a TEAL GUI
|
entailment
|
def edit_refimagefindpars():
""" Allows the user to edit the refimagefindpars configObj in a TEAL GUI
"""
teal.teal(refimagefindpars.__taskname__, returnAs=None,
autoClose=True, loadOnly=False, canExecute=False)
|
Allows the user to edit the refimagefindpars configObj in a TEAL GUI
|
entailment
|
def run(configobj):
""" Primary Python interface for image registration code
This task replaces 'tweakshifts'
"""
print('TweakReg Version %s(%s) started at: %s \n'%(
__version__,__version_date__,util._ptime()[0]))
util.print_pkg_versions()
# make sure 'updatewcs' is set to False when running from GUI or if missing
# from configObj:
if 'updatewcs' not in configobj:
configobj['updatewcs'] = False
# Check to see whether or not the imagefindpars parameters have
# already been loaded, as done through the python interface.
# Repeat for refimagefindpars
if PSET_SECTION not in configobj:
# Manage PSETs for source finding algorithms
_managePsets(configobj, PSET_SECTION, imagefindpars.__taskname__)
#print configobj[PSET_SECTION]
if PSET_SECTION_REFIMG not in configobj:
# Manage PSETs for source finding algorithms in reference image
_managePsets(configobj, PSET_SECTION_REFIMG,
refimagefindpars.__taskname__)
log.debug('')
log.debug("==== TweakReg was invoked with the following parameters: ====")
log.debug('')
util.print_cfg(configobj, log.debug)
# print out user set input parameter values for running this task
log.info('')
log.info("USER INPUT PARAMETERS common to all Processing Steps:")
util.printParams(configobj, log=log)
# start interpretation of input parameters
input_files = configobj['input']
# Start by interpreting the inputs
use_catfile = True
expand_refcat = configobj['expand_refcat']
enforce_user_order = configobj['enforce_user_order']
filenames, catnames = tweakutils.parse_input(
input_files, sort_wildcards=not enforce_user_order
)
catdict = {}
for indx,f in enumerate(filenames):
if catnames is not None and len(catnames) > 0:
catdict[f] = catnames[indx]
else:
catdict[f] = None
if not filenames:
print('No filenames matching input %r were found.' % input_files)
raise IOError
# Verify that files are writable (based on file permissions) so that
# they can be updated if either 'updatewcs' or 'updatehdr' have
# been turned on (2 cases which require updating the input files)
if configobj['updatewcs'] or configobj['UPDATE HEADER']['updatehdr']:
filenames = util.verifyFilePermissions(filenames)
if filenames is None or len(filenames) == 0:
raise IOError
if configobj['UPDATE HEADER']['updatehdr']:
wname = configobj['UPDATE HEADER']['wcsname']
# verify that a unique WCSNAME has been specified by the user
if not configobj['UPDATE HEADER']['reusename']:
for fname in filenames:
uniq = util.verifyUniqueWcsname(fname,wname)
if not uniq:
errstr = 'WCSNAME "%s" already present in "%s". '%(wname,fname)+\
'A unique value for the "wcsname" parameter needs to be ' + \
'specified. \n\nQuitting!'
print(textutil.textbox(errstr,width=60))
raise IOError
if configobj['updatewcs']:
print('\nRestoring WCS solutions to original state using updatewcs...\n')
updatewcs.updatewcs(filenames)
if catnames in [None,'',' ','INDEF'] or len(catnames) == 0:
catfile_par = configobj['COORDINATE FILE DESCRIPTION']['catfile']
# check to see whether the user specified input catalogs through other parameters
if catfile_par not in [None,'',' ','INDEF']:
# read in catalog file list provided by user
catnames,catdict = tweakutils.parse_atfile_cat('@'+catfile_par)
else:
use_catfile = False
if 'exclusions' in configobj and \
configobj['exclusions'] not in [None,'',' ','INDEF']:
if os.path.exists(configobj['exclusions']):
excl_files, excl_dict = tweakutils.parse_atfile_cat(
'@'+configobj['exclusions'])
# make sure the dictionary is well formed and that keys are base
# file names and that exclusion files have been expanded:
exclusion_files = []
exclusion_dict = {}
rootpath = os.path.abspath(
os.path.split(configobj['exclusions'])[0]
)
for f in excl_dict.keys():
print(f)
bf = os.path.basename(f)
exclusion_files.append(bf)
reglist = excl_dict[f]
if reglist is None:
exclusion_dict[bf] = None
continue
new_reglist = []
for regfile in reglist:
if regfile in [ None, 'None', '', ' ', 'INDEF' ]:
new_reglist.append(None)
else:
abs_regfile = os.path.normpath(
os.path.join(rootpath, regfile)
)
new_reglist.append(abs_regfile)
exclusion_dict[bf] = new_reglist
else:
raise IOError('Could not find specified exclusions file "{:s}"'
.format(configobj['exclusions']))
else:
exclusion_files = [None]*len(filenames)
exclusion_dict = {}
for f in filenames:
exclusion_dict[os.path.basename(f)] = None
# Verify that we have the same number of catalog files as input images
if catnames is not None and (len(catnames) > 0):
missed_files = []
for f in filenames:
if f not in catdict:
missed_files.append(f)
if len(missed_files) > 0:
print('The input catalogs does not contain entries for the following images:')
print(missed_files)
raise IOError
else:
# setup array of None values as input to catalog parameter for Image class
catnames = [None]*len(filenames)
use_catfile = False
# convert input images and any provided catalog file names into Image objects
input_images = []
# copy out only those parameters needed for Image class
catfile_kwargs = tweakutils.get_configobj_root(configobj)
# define default value for 'xyunits' assuming sources to be derived from image directly
catfile_kwargs['xyunits'] = 'pixels' # initialized here, required by Image class
del catfile_kwargs['exclusions']
if use_catfile:
# reset parameters based on parameter settings in this section
catfile_kwargs.update(configobj['COORDINATE FILE DESCRIPTION'])
for sort_par in imgclasses.sortKeys:
catfile_kwargs['sort_'+sort_par] = catfile_kwargs[sort_par]
# Update parameter set with 'SOURCE FINDING PARS' now
catfile_kwargs.update(configobj[PSET_SECTION])
uphdr_par = configobj['UPDATE HEADER']
hdrlet_par = configobj['HEADERLET CREATION']
objmatch_par = configobj['OBJECT MATCHING PARAMETERS']
catfit_pars = configobj['CATALOG FITTING PARAMETERS']
catfit_pars['minobj'] = objmatch_par['minobj']
objmatch_par['residplot'] = catfit_pars['residplot']
hdrlet_par.update(uphdr_par) # default hdrlet name
catfile_kwargs['updatehdr'] = uphdr_par['updatehdr']
shiftpars = configobj['OPTIONAL SHIFTFILE OUTPUT']
# verify a valid hdrname was provided, if headerlet was set to True
imgclasses.verify_hdrname(**hdrlet_par)
print('')
print('Finding shifts for: ')
for f in filenames:
print(' {}'.format(f))
print('')
log.info("USER INPUT PARAMETERS for finding sources for each input image:")
util.printParams(catfile_kwargs, log=log)
log.info('')
try:
minsources = max(1, catfit_pars['minobj'])
omitted_images = []
all_input_images = []
for imgnum in range(len(filenames)):
# Create Image instances for all input images
try:
regexcl = exclusion_dict[os.path.basename(filenames[imgnum])]
except KeyError:
regexcl = None
pass
img = imgclasses.Image(filenames[imgnum],
input_catalogs=catdict[filenames[imgnum]],
exclusions=regexcl,
**catfile_kwargs)
all_input_images.append(img)
if img.num_sources < minsources:
warn_str = "Image '{}' will not be aligned " \
"since it contains fewer than {} sources." \
.format(img.name, minsources)
print('\nWARNING: {}\n'.format(warn_str))
log.warning(warn_str)
omitted_images.append(img)
continue
input_images.append(img)
except KeyboardInterrupt:
for img in input_images:
img.close()
print('Quitting as a result of user request (Ctrl-C)...')
return
# create set of parameters to pass to RefImage class
kwargs = tweakutils.get_configobj_root(configobj)
# Determine a reference image or catalog and
# return the full list of RA/Dec positions
# Determine what WCS needs to be used for reference tangent plane
refcat_par = configobj['REFERENCE CATALOG DESCRIPTION']
if refcat_par['refcat'] not in [None,'',' ','INDEF']: # User specified a catalog to use
# Update kwargs with reference catalog parameters
kwargs.update(refcat_par)
# input_images list can be modified below.
# So, make a copy of the original:
input_images_orig_copy = copy(input_images)
do_match_refimg = False
# otherwise, extract the catalog from the first input image source list
if configobj['refimage'] not in [None, '',' ','INDEF']: # User specified an image to use
# A hack to allow different source finding parameters for
# the reference image:
ref_sourcefind_pars = \
tweakutils.get_configobj_root(configobj[PSET_SECTION_REFIMG])
ref_catfile_kwargs = catfile_kwargs.copy()
ref_catfile_kwargs.update(ref_sourcefind_pars)
ref_catfile_kwargs['updatehdr'] = False
log.info('')
log.info("USER INPUT PARAMETERS for finding sources for "
"the reference image:")
util.printParams(ref_catfile_kwargs, log=log)
#refimg = imgclasses.Image(configobj['refimage'],**catfile_kwargs)
# Check to see whether the user specified a separate catalog
# of reference source positions and replace default source list with it
if refcat_par['refcat'] not in [None,'',' ','INDEF']: # User specified a catalog to use
ref_source = refcat_par['refcat']
cat_src = ref_source
xycat = None
cat_src_type = 'catalog'
else:
try:
regexcl = exclusion_dict[configobj['refimage']]
except KeyError:
regexcl = None
pass
refimg = imgclasses.Image(configobj['refimage'],
exclusions=regexcl,
**ref_catfile_kwargs)
ref_source = refimg.all_radec
cat_src = None
xycat = refimg.xy_catalog
cat_src_type = 'image'
try:
if 'use_sharp_round' in ref_catfile_kwargs:
kwargs['use_sharp_round'] = ref_catfile_kwargs['use_sharp_round']
refimage = imgclasses.RefImage(configobj['refimage'],
ref_source, xycatalog=xycat,
cat_origin=cat_src, **kwargs)
refwcs_fname = refimage.wcs.filename
if cat_src is not None:
refimage.name = cat_src
except KeyboardInterrupt:
refimage.close()
for img in input_images:
img.close()
print('Quitting as a result of user request (Ctrl-C)...')
return
if len(input_images) < 1:
warn_str = "Fewer than two images are available for alignment. " \
"Quitting..."
print('\nWARNING: {}\n'.format(warn_str))
log.warning(warn_str)
for img in input_images:
img.close()
return
image = _max_overlap_image(refimage, input_images, expand_refcat,
enforce_user_order)
elif refcat_par['refcat'] not in [None,'',' ','INDEF']:
# a reference catalog is provided but not the reference image/wcs
if len(input_images) < 1:
warn_str = "No images available for alignment. Quitting..."
print('\nWARNING: {}\n'.format(warn_str))
log.warning(warn_str)
for img in input_images:
img.close()
return
if len(input_images) == 1:
image = input_images.pop(0)
else:
image, image2 = _max_overlap_pair(input_images, expand_refcat,
enforce_user_order)
input_images.insert(0, image2)
# Workaround the defect described in ticket:
# http://redink.stsci.edu/trac/ssb/stsci_python/ticket/1151
refwcs = []
for i in all_input_images:
refwcs.extend(i.get_wcs())
kwargs['ref_wcs_name'] = image.get_wcs()[0].filename
# A hack to allow different source finding parameters for
# the reference image:
ref_sourcefind_pars = \
tweakutils.get_configobj_root(configobj[PSET_SECTION_REFIMG])
ref_catfile_kwargs = catfile_kwargs.copy()
ref_catfile_kwargs.update(ref_sourcefind_pars)
ref_catfile_kwargs['updatehdr'] = False
log.info('')
log.info("USER INPUT PARAMETERS for finding sources for "
"the reference image (not used):")
util.printParams(ref_catfile_kwargs, log=log)
ref_source = refcat_par['refcat']
cat_src = ref_source
xycat = None
try:
if 'use_sharp_round' in ref_catfile_kwargs:
kwargs['use_sharp_round'] = ref_catfile_kwargs['use_sharp_round']
kwargs['find_bounding_polygon'] = True
refimage = imgclasses.RefImage(refwcs,
ref_source, xycatalog=xycat,
cat_origin=cat_src, **kwargs)
refwcs_fname = refimage.wcs.filename
refimage.name = cat_src
cat_src_type = 'catalog'
except KeyboardInterrupt:
refimage.close()
for img in input_images:
img.close()
print('Quitting as a result of user request (Ctrl-C)...')
return
else:
if len(input_images) < 2:
warn_str = "Fewer than two images available for alignment. " \
"Quitting..."
print('\nWARNING: {}\n'.format(warn_str))
log.warning(warn_str)
for img in input_images:
img.close()
return
kwargs['use_sharp_round'] = catfile_kwargs['use_sharp_round']
cat_src = None
refimg, image = _max_overlap_pair(input_images, expand_refcat,
enforce_user_order)
refwcs = []
#refwcs.extend(refimg.get_wcs())
#refwcs.extend(image.get_wcs())
#for i in input_images:
#refwcs.extend(i.get_wcs())
# Workaround the defect described in ticket:
# http://redink.stsci.edu/trac/ssb/stsci_python/ticket/1151
for i in all_input_images:
refwcs.extend(i.get_wcs())
kwargs['ref_wcs_name'] = refimg.get_wcs()[0].filename
try:
ref_source = refimg.all_radec
refimage = imgclasses.RefImage(refwcs, ref_source,
xycatalog=refimg.xy_catalog, **kwargs)
refwcs_fname = refimg.name
cat_src_type = 'image'
except KeyboardInterrupt:
refimage.close()
for img in input_images:
img.close()
print('Quitting as a result of user request (Ctrl-C)...')
return
omitted_images.insert(0, refimg) # refimage *must* be first
do_match_refimg = True
print("\n{0}\nPerforming alignment in the projection plane defined by the "
"WCS\nderived from '{1}'\n{0}\n".format('='*63, refwcs_fname))
if refimage.outxy is not None:
if cat_src is None:
cat_src = refimage.name
try:
log.info("USER INPUT PARAMETERS for matching sources:")
util.printParams(objmatch_par, log=log)
log.info('')
log.info("USER INPUT PARAMETERS for fitting source lists:")
util.printParams(configobj['CATALOG FITTING PARAMETERS'], log=log)
if hdrlet_par['headerlet']:
log.info('')
log.info("USER INPUT PARAMETERS for creating headerlets:")
util.printParams(hdrlet_par, log=log)
if shiftpars['shiftfile']:
log.info('')
log.info("USER INPUT PARAMETERS for creating a shiftfile:")
util.printParams(shiftpars, log=log)
# Now, apply reference WCS to each image's sky positions as well as the
# reference catalog sky positions,
# then perform the fit between the reference catalog positions and
# each image's positions
quit_immediately = False
xycat_lines = ''
xycat_filename = None
for img in input_images_orig_copy:
if xycat_filename is None:
xycat_filename = img.rootname+'_xy_catfile.list'
# Keep a record of all the generated input_xy catalogs
xycat_lines += img.get_xy_catnames()
retry_flags = len(input_images)*[0]
objmatch_par['cat_src_type'] = cat_src_type
while image is not None:
print ('\n'+'='*20)
print ('Performing fit for: {}\n'.format(image.name))
image.match(refimage, quiet_identity=False, **objmatch_par)
assert(len(retry_flags) == len(input_images))
if not image.goodmatch:
# we will try to match it again once reference catalog
# has expanded with new sources:
#if expand_refcat:
input_images.append(image)
retry_flags.append(1)
if len(retry_flags) > 0 and retry_flags[0] == 0:
retry_flags.pop(0)
image = input_images.pop(0)
# try to match next image in the list
continue
else:
# no new sources have been added to the reference
# catalog and we have already tried to match
# images to the existing reference catalog
#input_images.append(image) # <- add it back for later reporting
#retry_flags.append(1)
break
image.performFit(**catfit_pars)
if image.quit_immediately:
quit_immediately = True
image.close()
break
# add unmatched sources to the reference catalog
# (to expand it):
if expand_refcat:
refimage.append_not_matched_sources(image)
image.updateHeader(wcsname=uphdr_par['wcsname'],
reusename=uphdr_par['reusename'])
if hdrlet_par['headerlet']:
image.writeHeaderlet(**hdrlet_par)
if configobj['clean']:
image.clean()
image.close()
if refimage.dirty and len(input_images) > 0:
# The reference catalog has been updated with new sources.
# Clear retry flags and get next image:
image = _max_overlap_image(
refimage, input_images, expand_refcat,
enforce_user_order
)
retry_flags = len(input_images)*[0]
refimage.clear_dirty_flag()
elif len(input_images) > 0 and retry_flags[0] == 0:
retry_flags.pop(0)
image = input_images.pop(0)
else:
break
assert(len(retry_flags) == len(input_images))
if not quit_immediately:
# process images that have not been matched in order to
# update their headers:
si = 0
if do_match_refimg:
image = omitted_images[0]
image.match(refimage, quiet_identity=True, **objmatch_par)
si = 1
# process omitted (from start) images separately:
for image in omitted_images[si:]:
image.match(refimage, quiet_identity=False, **objmatch_par)
# add to the list of omitted images, images that could not
# be matched:
omitted_images.extend(input_images)
if len(input_images) > 0:
print("\nUnable to match the following images:")
print("-------------------------------------")
for image in input_images:
print(image.name)
print("")
# update headers:
for image in omitted_images:
image.performFit(**catfit_pars)
if image.quit_immediately:
quit_immediately = True
image.close()
break
image.updateHeader(wcsname=uphdr_par['wcsname'],
reusename=uphdr_par['reusename'])
if hdrlet_par['headerlet']:
image.writeHeaderlet(**hdrlet_par)
if configobj['clean']:
image.clean()
image.close()
if configobj['writecat'] and not configobj['clean']:
# Write out catalog file recording input XY catalogs used
# This file will be suitable for use as input to 'tweakreg'
# as the 'catfile' parameter
if os.path.exists(xycat_filename):
os.remove(xycat_filename)
f = open(xycat_filename, mode='w')
f.writelines(xycat_lines)
f.close()
if expand_refcat:
base_reg_name = os.path.splitext(
os.path.basename(cat_src))[0]
refimage.write_skycatalog(
'cumulative_sky_refcat_{:s}.coo' \
.format(base_reg_name),
show_flux=True, show_id=True
)
# write out shiftfile (if specified)
if shiftpars['shiftfile']:
tweakutils.write_shiftfile(input_images_orig_copy,
shiftpars['outshifts'],
outwcs=shiftpars['outwcs'])
except KeyboardInterrupt:
refimage.close()
for img in input_images_orig_copy:
img.close()
del img
print ('Quitting as a result of user request (Ctrl-C)...')
return
else:
print ('No valid sources in reference frame. Quitting...')
return
|
Primary Python interface for image registration code
This task replaces 'tweakshifts'
|
entailment
|
def print_rev_id(localRepoPath):
"""prints information about the specified local repository to STDOUT. Expected method of execution: command-line or
shell script call
Parameters
----------
localRepoPath: string
Local repository path.
Returns
=======
Nothing as such. subroutine will exit with a state of 0 if everything ran OK, and a value of '111' if
something went wrong.
"""
start_path = os.getcwd()
try:
log.info("Local repository path: {}".format(localRepoPath))
os.chdir(localRepoPath)
log.info("\n== Remote URL")
os.system('git remote -v')
# log.info("\n== Remote Branches")
# os.system("git branch -r")
log.info("\n== Local Branches")
os.system("git branch")
log.info("\n== Most Recent Commit")
os.system("git log |head -1")
rv = 0
except:
rv = 111
log.info("WARNING! get_git_rev_info.print_rev_id() encountered a problem and cannot continue.")
finally:
os.chdir(start_path)
if rv != 0:
sys.exit(rv)
|
prints information about the specified local repository to STDOUT. Expected method of execution: command-line or
shell script call
Parameters
----------
localRepoPath: string
Local repository path.
Returns
=======
Nothing as such. subroutine will exit with a state of 0 if everything ran OK, and a value of '111' if
something went wrong.
|
entailment
|
def get_rev_id(localRepoPath):
"""returns the current full git revision id of the specified local repository. Expected method of execution: python
subroutine call
Parameters
----------
localRepoPath: string
Local repository path.
Returns
=======
full git revision ID of the specified repository if everything ran OK, and "FAILURE" if something went
wrong.
"""
start_path = os.getcwd()
try:
os.chdir(localRepoPath)
instream = os.popen("git --no-pager log --max-count=1 | head -1")
for streamline in instream.readlines():
streamline = streamline.strip()
if streamline.startswith("commit "):
rv = streamline.replace("commit ","")
else:
raise
except:
rv = "FAILURE: git revision info not found"
finally:
os.chdir(start_path)
return(rv)
|
returns the current full git revision id of the specified local repository. Expected method of execution: python
subroutine call
Parameters
----------
localRepoPath: string
Local repository path.
Returns
=======
full git revision ID of the specified repository if everything ran OK, and "FAILURE" if something went
wrong.
|
entailment
|
def update(input,refdir="jref$",local=None,interactive=False,wcsupdate=True):
"""
Updates headers of files given as input to point to the new reference files
NPOLFILE and D2IMFILE required with the new C version of MultiDrizzle.
Parameters
-----------
input : string or list
Name of input file or files acceptable forms:
- single filename with or without directory
- @-file
- association table
- python list of filenames
- wildcard specification of filenames
refdir : string
Path to directory containing new reference files, either
environment variable or full path.
local : boolean
Specifies whether or not to copy new reference files to local
directory for use with the input files.
interactive : boolean
Specifies whether or not to interactively ask the user for the
exact names of the new reference files instead of automatically
searching a directory for them.
updatewcs : boolean
Specifies whether or not to update the WCS information in this
file to use the new reference files.
Examples
--------
1. A set of associated images specified by an ASN file can be updated to use
the NPOLFILEs and D2IMFILE found in the local directory defined using
the `myjref$` environment variable under PyRAF using::
>>> import updatenpol
>>> updatenpol.update('j8bt06010_asn.fits', 'myref$')
2. Another use under Python would be to feed it a specific list of files
to be updated using::
>>> updatenpol.update(['file1_flt.fits','file2_flt.fits'],'myjref$')
3. Files in another directory can also be processed using::
>>> updatenpol.update('data$*flt.fits','../new/ref/')
Notes
-----
.. warning::
This program requires access to the `jref$` directory in order
to evaluate the DGEOFILE specified in the input image header.
This evaluation allows the program to get the information it
needs to identify the correct NPOLFILE.
The use of this program now requires that a directory be set up with
all the new NPOLFILE and D2IMFILE reference files for ACS (a single
directory for all files for all ACS detectors will be fine, much like
jref). Currently, all the files generated by the ACS team has initially
been made available at::
/grp/hst/acs/lucas/new-npl/
The one known limitation to how this program works comes from
confusion if more than 1 file could possibly be used as the new
reference file. This would only happen when NPOLFILE reference files
have been checked into CDBS multiple times, and there are several
versions that apply to the same detector/filter combination. However,
that can be sorted out later if we get into that situation at all.
"""
print('UPDATENPOL Version',__version__+'('+__version_date__+')')
# expand (as needed) the list of input files
files,fcol = parseinput.parseinput(input)
if not interactive:
# expand reference directory name (if necessary) to
# interpret IRAF or environment variable names
rdir = fu.osfn(refdir)
ngeofiles,ngcol = parseinput.parseinput(os.path.join(rdir,'*npl.fits'))
# Find D2IMFILE in refdir for updating input file header as well
d2ifiles,d2col = parseinput.parseinput(os.path.join(rdir,"*d2i.fits"))
# Now, build a matched list of input files and DGEOFILE reference files
# to use for selecting the appropriate new reference file from the
# refdir directory.
for f in files:
print('Updating: ',f)
fdir = os.path.split(f)[0]
# Open each file...
fimg = fits.open(f, mode='update', memmap=False)
phdr = fimg['PRIMARY'].header
fdet = phdr['detector']
# get header of DGEOFILE
dfile = phdr.get('DGEOFILE','')
if dfile in ['N/A','',' ',None]:
npolname = ''
else:
dhdr = fits.getheader(fu.osfn(dfile), memmap=False)
if not interactive:
# search all new NPOLFILEs for one that matches current DGEOFILE config
npol = find_npolfile(ngeofiles,fdet,[phdr['filter1'],phdr['filter2']])
else:
if sys.version_info[0] >= 3:
npol = input("Enter name of NPOLFILE for %s:"%f)
else:
npol = raw_input("Enter name of NPOLFILE for %s:"%f)
if npol == "": npol = None
if npol is None:
errstr = "No valid NPOLFILE found in "+rdir+" for detector="+fdet+"\n"
errstr += " filters = "+phdr['filter1']+","+phdr['filter2']
raise ValueError(errstr)
npolname = os.path.split(npol)[1]
if local:
npolname = os.path.join(fdir,npolname)
# clobber any previous copies of this reference file
if os.path.exists(npolname): os.remove(npolname)
shutil.copy(npol,npolname)
else:
if '$' in refdir:
npolname = refdir+npolname
else:
npolname = os.path.join(refdir,npolname)
phdr.set('NPOLFILE', value=npolname,
comment="Non-polynomial corrections in Paper IV LUT",
after='DGEOFILE')
# Now find correct D2IFILE
if not interactive:
d2i = find_d2ifile(d2ifiles,fdet)
else:
if sys.version_info[0] >= 3:
d2i = input("Enter name of D2IMFILE for %s:"%f)
else:
d2i = raw_input("Enter name of D2IMFILE for %s:"%f)
if d2i == "": d2i = None
if d2i is None:
print('=============\nWARNING:')
print(" No valid D2IMFILE found in "+rdir+" for detector ="+fdet)
print(" D2IMFILE correction will not be applied.")
print('=============\n')
d2iname = ""
else:
d2iname = os.path.split(d2i)[1]
if local:
# Copy D2IMFILE to local data directory alongside input file as well
d2iname = os.path.join(fdir,d2iname)
# clobber any previous copies of this reference file
if os.path.exists(d2iname): os.remove(d2iname)
shutil.copy(d2i,d2iname)
else:
if '$' in refdir:
d2iname = refdir+d2iname
else:
d2iname = os.path.join(refdir,d2iname)
phdr.set('D2IMFILE', value=d2iname,
comment="Column correction table",
after='DGEOFILE')
# Close this input file header and go on to the next
fimg.close()
if wcsupdate:
updatewcs.updatewcs(f)
|
Updates headers of files given as input to point to the new reference files
NPOLFILE and D2IMFILE required with the new C version of MultiDrizzle.
Parameters
-----------
input : string or list
Name of input file or files acceptable forms:
- single filename with or without directory
- @-file
- association table
- python list of filenames
- wildcard specification of filenames
refdir : string
Path to directory containing new reference files, either
environment variable or full path.
local : boolean
Specifies whether or not to copy new reference files to local
directory for use with the input files.
interactive : boolean
Specifies whether or not to interactively ask the user for the
exact names of the new reference files instead of automatically
searching a directory for them.
updatewcs : boolean
Specifies whether or not to update the WCS information in this
file to use the new reference files.
Examples
--------
1. A set of associated images specified by an ASN file can be updated to use
the NPOLFILEs and D2IMFILE found in the local directory defined using
the `myjref$` environment variable under PyRAF using::
>>> import updatenpol
>>> updatenpol.update('j8bt06010_asn.fits', 'myref$')
2. Another use under Python would be to feed it a specific list of files
to be updated using::
>>> updatenpol.update(['file1_flt.fits','file2_flt.fits'],'myjref$')
3. Files in another directory can also be processed using::
>>> updatenpol.update('data$*flt.fits','../new/ref/')
Notes
-----
.. warning::
This program requires access to the `jref$` directory in order
to evaluate the DGEOFILE specified in the input image header.
This evaluation allows the program to get the information it
needs to identify the correct NPOLFILE.
The use of this program now requires that a directory be set up with
all the new NPOLFILE and D2IMFILE reference files for ACS (a single
directory for all files for all ACS detectors will be fine, much like
jref). Currently, all the files generated by the ACS team has initially
been made available at::
/grp/hst/acs/lucas/new-npl/
The one known limitation to how this program works comes from
confusion if more than 1 file could possibly be used as the new
reference file. This would only happen when NPOLFILE reference files
have been checked into CDBS multiple times, and there are several
versions that apply to the same detector/filter combination. However,
that can be sorted out later if we get into that situation at all.
|
entailment
|
def find_d2ifile(flist,detector):
""" Search a list of files for one that matches the detector specified.
"""
d2ifile = None
for f in flist:
fdet = fits.getval(f, 'detector', memmap=False)
if fdet == detector:
d2ifile = f
return d2ifile
|
Search a list of files for one that matches the detector specified.
|
entailment
|
def find_npolfile(flist,detector,filters):
""" Search a list of files for one that matches the configuration
of detector and filters used.
"""
npolfile = None
for f in flist:
fdet = fits.getval(f, 'detector', memmap=False)
if fdet == detector:
filt1 = fits.getval(f, 'filter1', memmap=False)
filt2 = fits.getval(f, 'filter2', memmap=False)
fdate = fits.getval(f, 'date', memmap=False)
if filt1 == 'ANY' or \
(filt1 == filters[0] and filt2 == filters[1]):
npolfile = f
return npolfile
|
Search a list of files for one that matches the configuration
of detector and filters used.
|
entailment
|
def run(configobj=None,editpars=False):
""" Teal interface for running this code.
"""
if configobj is None:
configobj =teal.teal(__taskname__,loadOnly=(not editpars))
update(configobj['input'],configobj['refdir'],
local=configobj['local'],interactive=configobj['interactive'],
wcsupdate=configobj['wcsupdate'])
|
Teal interface for running this code.
|
entailment
|
def retrieve_observation(obsid, suffix=['FLC'], archive=False,clobber=False):
"""Simple interface for retrieving an observation from the MAST archive
If the input obsid is for an association, it will request all members with
the specified suffixes.
Parameters
-----------
obsid : string
ID for observation to be retrieved from the MAST archive. Only the
IPPSSOOT (rootname) of exposure or ASN needs to be provided; eg., ib6v06060.
suffix : list
List containing suffixes of files which should be requested from MAST.
path : string
Directory to use for writing out downloaded files. If `None` (default),
the current working directory will be used.
archive : Boolean
Retain copies of the downloaded files in the astroquery created sub-directories? Default is 'False'.
clobber : Boolean
Download and Overwrite existing files? Default is 'False'.
Returns
-------
local_files : list
List of filenames
"""
local_files = []
# Query MAST for the data with an observation type of either "science" or "calibration"
obsTable = Observations.query_criteria(obs_id=obsid, obstype='all')
# Catch the case where no files are found for download
if len(obsTable) == 0:
log.info("WARNING: Query for {} returned NO RESULTS!".format(obsid))
return local_files
dpobs = Observations.get_product_list(obsTable)
dataProductsByID = Observations.filter_products(dpobs,
productSubGroupDescription=suffix,
extension='fits',
mrp_only=False)
# After the filtering has been done, ensure there is still data in the table for download.
# If the table is empty, look for FLT images in lieu of FLC images. Only want one
# or the other (not both!), so just do the filtering again.
if len(dataProductsByID) == 0:
log.info("WARNING: No FLC files found for {} - will look for FLT files instead.".format(obsid))
suffix = ['FLT']
dataProductsByID = Observations.filter_products(dpobs,
productSubGroupDescription=suffix,
extension='fits',
mrp_only=False)
# If still no data, then return. An exception will eventually be thrown in
# the higher level code.
if len(dataProductsByID) == 0:
log.info("WARNING: No FLC or FLT files found for {}.".format(obsid))
return local_files
allImages = []
for tableLine in dataProductsByID:
allImages.append(tableLine['productFilename'])
log.info(allImages)
if not clobber:
rowsToRemove = []
for rowCtr in range(0,len(dataProductsByID)):
if os.path.exists(dataProductsByID[rowCtr]['productFilename']):
log.info("{} already exists. File download skipped.".format(dataProductsByID[rowCtr]['productFilename']))
rowsToRemove.append(rowCtr)
if rowsToRemove:
rowsToRemove.reverse()
for rowNum in rowsToRemove:
dataProductsByID.remove_row(rowNum)
manifest = Observations.download_products(dataProductsByID, mrp_only=False)
if not clobber:
rowsToRemove.reverse()
for rownum in rowsToRemove:
if not manifest:
local_files = allImages
return local_files
else:
manifest.insert_row(rownum,vals=[allImages[rownum],"LOCAL","None","None"])
download_dir = None
for file,fileStatus in zip(manifest['Local Path'],manifest['Status']):
if fileStatus != "LOCAL":
# Identify what sub-directory was created by astroquery for the download
if download_dir is None:
file_path = file.split(os.sep)
file_path.remove('.')
download_dir = file_path[0]
# Move or copy downloaded file to current directory
local_file = os.path.abspath(os.path.basename(file))
if archive:
shutil.copy(file, local_file)
else:
shutil.move(file, local_file)
# Record what files were downloaded and their current location
local_files.append(os.path.basename(local_file))
else:
local_files.append(file)
if not archive:
# Remove astroquery created sub-directories
shutil.rmtree(download_dir)
return local_files
|
Simple interface for retrieving an observation from the MAST archive
If the input obsid is for an association, it will request all members with
the specified suffixes.
Parameters
-----------
obsid : string
ID for observation to be retrieved from the MAST archive. Only the
IPPSSOOT (rootname) of exposure or ASN needs to be provided; eg., ib6v06060.
suffix : list
List containing suffixes of files which should be requested from MAST.
path : string
Directory to use for writing out downloaded files. If `None` (default),
the current working directory will be used.
archive : Boolean
Retain copies of the downloaded files in the astroquery created sub-directories? Default is 'False'.
clobber : Boolean
Download and Overwrite existing files? Default is 'False'.
Returns
-------
local_files : list
List of filenames
|
entailment
|
def reset_dq_bits(input,bits,extver=None,extname='dq'):
""" This function resets bits in the integer array(s) of a FITS file.
Parameters
----------
filename : str
full filename with path
bits : str
sum or list of integers corresponding to all the bits to be reset
extver : int, optional
List of version numbers of the DQ arrays
to be corrected [Default Value: None, will do all]
extname : str, optional
EXTNAME of the DQ arrays in the FITS file
[Default Value: 'dq']
Notes
-----
The default value of None for the 'extver' parameter specifies that all
extensions with EXTNAME matching 'dq' (as specified by the 'extname'
parameter) will have their bits reset.
Examples
--------
1. The following command will reset the 4096 bits in all
the DQ arrays of the file input_file_flt.fits::
reset_dq_bits("input_file_flt.fits", 4096)
2. To reset the 2,32,64 and 4096 bits in the second DQ array,
specified as 'dq,2', in the file input_file_flt.fits::
reset_dq_bits("input_file_flt.fits", "2,32,64,4096", extver=2)
"""
# Interpret bits value
bits = interpret_bit_flags(bits)
flist, fcol = parseinput.parseinput(input)
for filename in flist:
# open input file in write mode to allow updating the DQ array in-place
p = fits.open(filename, mode='update', memmap=False)
# Identify the DQ array to be updated
# If no extver is specified, build a list of all DQ arrays in the file
if extver is None:
extver = []
for hdu in p:
# find only those extensions which match the input extname
# using case-insensitive name comparisons for 'extname'
if 'extver' in hdu.header and \
hdu.header['extname'].lower() == extname.lower():
extver.append(int(hdu.header['extver']))
else:
# Otherwise, insure that input extver values are a list
if not isinstance(extver, list): extver = [extver]
# for each DQ array identified in the file...
for extn in extver:
dqarr = p[extname,extn].data
dqdtype = dqarr.dtype
# reset the desired bits
p[extname,extn].data = (dqarr & ~bits).astype(dqdtype) # preserve original dtype
log.info('Reset bit values of %s to a value of 0 in %s[%s,%s]' %
(bits, filename, extname, extn))
# close the file with the updated DQ array(s)
p.close()
|
This function resets bits in the integer array(s) of a FITS file.
Parameters
----------
filename : str
full filename with path
bits : str
sum or list of integers corresponding to all the bits to be reset
extver : int, optional
List of version numbers of the DQ arrays
to be corrected [Default Value: None, will do all]
extname : str, optional
EXTNAME of the DQ arrays in the FITS file
[Default Value: 'dq']
Notes
-----
The default value of None for the 'extver' parameter specifies that all
extensions with EXTNAME matching 'dq' (as specified by the 'extname'
parameter) will have their bits reset.
Examples
--------
1. The following command will reset the 4096 bits in all
the DQ arrays of the file input_file_flt.fits::
reset_dq_bits("input_file_flt.fits", 4096)
2. To reset the 2,32,64 and 4096 bits in the second DQ array,
specified as 'dq,2', in the file input_file_flt.fits::
reset_dq_bits("input_file_flt.fits", "2,32,64,4096", extver=2)
|
entailment
|
def replace(input, **pars):
""" Replace pixels in `input` that have a value of `pixvalue`
with a value given by `newvalue`.
"""
pixvalue = pars.get('pixvalue', np.nan)
if pixvalue is None: pixvalue = np.nan # insure that None == np.nan
newvalue = pars.get('newvalue', 0.0)
ext = pars.get('ext',None)
if ext in ['',' ','None',None]:
ext = None
files = parseinput.parseinput(input)[0]
for f in files:
fimg = fits.open(f, mode='update', memmap=False)
if ext is None:
# replace pixels in ALL extensions
extn = [i for i in fimg]
else:
if type(ext) == type([]):
extn = [fimg[e] for e in ext]
else:
extn = [fimg[ext]]
for e in extn:
if e.data is not None and e.is_image: # ignore empty Primary HDUs
print("Converting {}[{},{}] value of {} to {}".format(
f,e.name,e.ver,pixvalue,newvalue))
if np.isnan(pixvalue):
e.data[np.isnan(e.data)] = newvalue
else:
e.data[np.where(e.data == pixvalue)] = newvalue
fimg.close()
|
Replace pixels in `input` that have a value of `pixvalue`
with a value given by `newvalue`.
|
entailment
|
def tweakback(drzfile, input=None, origwcs = None,
newname = None, wcsname = None,
extname='SCI', force=False, verbose=False):
"""
Apply WCS solution recorded in drizzled file to distorted input images
(``_flt.fits`` files) used to create the drizzled file. This task relies on
the original WCS and updated WCS to be recorded in the drizzled image's
header as the last 2 alternate WCSs.
Parameters
----------
drzfile : str (Default = '')
filename of undistorted image which contains the new WCS
and WCS prior to being updated
newname : str (Default = None)
Value of ``WCSNAME`` to be used to label the updated solution in the
output (eq., ``_flt.fits``) files. If left blank or None, it will
default to using the current ``WCSNAME`` value from the input drzfile.
input : str (Default = '')
filenames of distorted images to be updated using new WCS
from 'drzfile'. These can be provided either as an ``@-file``,
a comma-separated list of filenames or using wildcards.
.. note:: A blank value will indicate that the task should derive the
filenames from the 'drzfile' itself, if possible. The filenames will be
derived from the ``D*DATA`` keywords written out by
``AstroDrizzle``. If they can not be found, the task will quit.
origwcs : str (Default = None)
Value of ``WCSNAME`` keyword prior to the drzfile image being updated
by ``TweakReg``. If left blank or None, it will default to using the
second to last ``WCSNAME*`` keyword value found in the header.
wcsname : str (Default = None)
Value of WCSNAME for updated solution written out by ``TweakReg`` as
specified by the `wcsname` parameter from ``TweakReg``. If this is
left blank or `None`, it will default to the current ``WCSNAME``
value from the input drzfile.
extname : str (Default = 'SCI')
Name of extension in `input` files to be updated with new WCS
force : bool (Default = False)
This parameters specified whether or not to force an update of the WCS
even though WCS already exists with this solution or `wcsname`?
verbose : bool (Default = False)
This parameter specifies whether or not to print out additional
messages during processing.
Notes
-----
The algorithm used by this function is based on linearization of
the exact compound operator that converts input image coordinates
to the coordinates (in the input image) that would result in
alignment with the new drizzled image WCS.
If no input distorted files are specified as input, this task will attempt
to generate the list of filenames from the drizzled input file's own
header.
EXAMPLES
--------
An image named ``acswfc_mos2_drz.fits`` was created from 4 images using
astrodrizzle. This drizzled image was then aligned to another image using
tweakreg and the header was updated using the ``WCSNAME`` = ``TWEAK_DRZ``.
The new WCS can then be used to update each of the 4 images that were
combined to make up this drizzled image using:
>>> from drizzlepac import tweakback
>>> tweakback.tweakback('acswfc_mos2_drz.fits')
If the same WCS should be applied to a specific set of images, those images
can be updated using:
>>> tweakback.tweakback('acswfc_mos2_drz.fits',
... input='img_mos2a_flt.fits,img_mos2e_flt.fits')
See Also
--------
stwcs.wcsutil.altwcs: Alternate WCS implementation
"""
print("TweakBack Version {:s}({:s}) started at: {:s}\n"
.format(__version__,__version_date__,util._ptime()[0]))
# Interpret input list/string into list of filename(s)
fltfiles = parseinput.parseinput(input)[0]
if fltfiles is None or len(fltfiles) == 0:
# try to extract the filenames from the drizzled file's header
fltfiles = extract_input_filenames(drzfile)
if fltfiles is None:
print('*'*60)
print('*')
print('* ERROR:')
print('* No input filenames found! ')
print('* Please specify "fltfiles" or insure that input drizzled')
print('* image contains D*DATA keywords. ')
print('*')
print('*'*60)
raise ValueError
if not isinstance(fltfiles,list):
fltfiles = [fltfiles]
sciext = determine_extnum(drzfile, extname='SCI')
scihdr = fits.getheader(drzfile, ext=sciext, memmap=False)
### Step 1: Read in updated and original WCS solutions
# determine keys for all alternate WCS solutions in drizzled image header
wkeys = wcsutil.altwcs.wcskeys(drzfile, ext=sciext)
wnames = wcsutil.altwcs.wcsnames(drzfile, ext=sciext)
if not util.is_blank(newname):
final_name = newname
else:
final_name = wnames[wkeys[-1]]
# Read in HSTWCS objects for final,updated WCS and previous WCS from
# from drizzled image header
# The final solution also serves as reference WCS when using updatehdr
if not util.is_blank(wcsname):
for k in wnames:
if wnames[k] == wcsname:
wcskey = k
break
else:
wcskey = wkeys[-1]
final_wcs = wcsutil.HSTWCS(drzfile, ext=sciext, wcskey=wkeys[-1])
if not util.is_blank(origwcs):
for k in wnames:
if wnames[k] == origwcs:
orig_wcskey = k
orig_wcsname = origwcs
break
else:
orig_wcsname,orig_wcskey = determine_orig_wcsname(scihdr,wnames,wkeys)
orig_wcs = wcsutil.HSTWCS(drzfile,ext=sciext,wcskey=orig_wcskey)
# read in RMS values reported for new solution
crderr1kw = 'CRDER1'+wkeys[-1]
crderr2kw = 'CRDER2'+wkeys[-1]
if crderr1kw in scihdr:
crderr1 = fits.getval(drzfile, crderr1kw, ext=sciext, memmap=False)
else:
crderr1 = 0.0
if crderr2kw in scihdr:
crderr2 = fits.getval(drzfile, crderr2kw, ext=sciext, memmap=False)
else:
crderr2 = 0.0
del scihdr
### Step 2: Apply solution to input file headers
for fname in fltfiles:
logstr = "....Updating header for {:s}...".format(fname)
if verbose:
print("\n{:s}\n".format(logstr))
else:
log.info(logstr)
# reset header WCS keywords to original (OPUS generated) values
imhdulist = fits.open(fname, mode='update', memmap=False)
extlist = get_ext_list(imhdulist, extname='SCI')
if not extlist:
extlist = [0]
# insure that input PRIMARY WCS has been archived before overwriting
# with new solution
wcsutil.altwcs.archiveWCS(imhdulist, extlist, reusekey=True)
# Process MEF images...
for ext in extlist:
logstr = "Processing {:s}[{:s}]".format(imhdulist.filename(),
ext2str(ext))
if verbose:
print("\n{:s}\n".format(logstr))
else:
log.info(logstr)
chip_wcs = wcsutil.HSTWCS(imhdulist, ext=ext)
update_chip_wcs(chip_wcs, orig_wcs, final_wcs,
xrms=crderr1, yrms = crderr2)
# Update FITS file with newly updated WCS for this chip
extnum = imhdulist.index(imhdulist[ext])
updatehdr.update_wcs(imhdulist, extnum, chip_wcs,
wcsname=final_name, reusename=False,
verbose=verbose)
imhdulist.close()
|
Apply WCS solution recorded in drizzled file to distorted input images
(``_flt.fits`` files) used to create the drizzled file. This task relies on
the original WCS and updated WCS to be recorded in the drizzled image's
header as the last 2 alternate WCSs.
Parameters
----------
drzfile : str (Default = '')
filename of undistorted image which contains the new WCS
and WCS prior to being updated
newname : str (Default = None)
Value of ``WCSNAME`` to be used to label the updated solution in the
output (eq., ``_flt.fits``) files. If left blank or None, it will
default to using the current ``WCSNAME`` value from the input drzfile.
input : str (Default = '')
filenames of distorted images to be updated using new WCS
from 'drzfile'. These can be provided either as an ``@-file``,
a comma-separated list of filenames or using wildcards.
.. note:: A blank value will indicate that the task should derive the
filenames from the 'drzfile' itself, if possible. The filenames will be
derived from the ``D*DATA`` keywords written out by
``AstroDrizzle``. If they can not be found, the task will quit.
origwcs : str (Default = None)
Value of ``WCSNAME`` keyword prior to the drzfile image being updated
by ``TweakReg``. If left blank or None, it will default to using the
second to last ``WCSNAME*`` keyword value found in the header.
wcsname : str (Default = None)
Value of WCSNAME for updated solution written out by ``TweakReg`` as
specified by the `wcsname` parameter from ``TweakReg``. If this is
left blank or `None`, it will default to the current ``WCSNAME``
value from the input drzfile.
extname : str (Default = 'SCI')
Name of extension in `input` files to be updated with new WCS
force : bool (Default = False)
This parameters specified whether or not to force an update of the WCS
even though WCS already exists with this solution or `wcsname`?
verbose : bool (Default = False)
This parameter specifies whether or not to print out additional
messages during processing.
Notes
-----
The algorithm used by this function is based on linearization of
the exact compound operator that converts input image coordinates
to the coordinates (in the input image) that would result in
alignment with the new drizzled image WCS.
If no input distorted files are specified as input, this task will attempt
to generate the list of filenames from the drizzled input file's own
header.
EXAMPLES
--------
An image named ``acswfc_mos2_drz.fits`` was created from 4 images using
astrodrizzle. This drizzled image was then aligned to another image using
tweakreg and the header was updated using the ``WCSNAME`` = ``TWEAK_DRZ``.
The new WCS can then be used to update each of the 4 images that were
combined to make up this drizzled image using:
>>> from drizzlepac import tweakback
>>> tweakback.tweakback('acswfc_mos2_drz.fits')
If the same WCS should be applied to a specific set of images, those images
can be updated using:
>>> tweakback.tweakback('acswfc_mos2_drz.fits',
... input='img_mos2a_flt.fits,img_mos2e_flt.fits')
See Also
--------
stwcs.wcsutil.altwcs: Alternate WCS implementation
|
entailment
|
def extract_input_filenames(drzfile):
"""
Generate a list of filenames from a drizzled image's header
"""
data_kws = fits.getval(drzfile, 'd*data', ext=0, memmap=False)
if len(data_kws) == 0:
return None
fnames = []
for kw in data_kws.cards:
f = kw.value.split('[')[0]
if f not in fnames:
fnames.append(f)
return fnames
|
Generate a list of filenames from a drizzled image's header
|
entailment
|
def determine_orig_wcsname(header, wnames, wkeys):
"""
Determine the name of the original, unmodified WCS solution
"""
orig_wcsname = None
orig_key = None
if orig_wcsname is None:
for k,w in wnames.items():
if w[:4] == 'IDC_':
orig_wcsname = w
orig_key = k
break
if orig_wcsname is None:
# No IDC_ wcsname found... revert to second to last if available
if len(wnames) > 1:
orig_key = wkeys[-2]
orig_wcsname = wnames[orig_key]
return orig_wcsname,orig_key
|
Determine the name of the original, unmodified WCS solution
|
entailment
|
def parse_atfile_cat(input):
"""
Return the list of catalog filenames specified as part of the input @-file
"""
with open(input[1:]) as f:
catlist = []
catdict = {}
for line in f.readlines():
if line[0] == '#' or not line.strip():
continue
lspl = line.split()
if len(lspl) > 1:
catdict[lspl[0]] = lspl[1:]
catlist.append(lspl[1:])
else:
catdict[lspl[0]] = None
catlist.append(None)
return catlist, catdict
|
Return the list of catalog filenames specified as part of the input @-file
|
entailment
|
def parse_skypos(ra, dec):
"""
Function to parse RA and Dec input values and turn them into decimal
degrees
Input formats could be:
["nn","nn","nn.nn"]
"nn nn nn.nnn"
"nn:nn:nn.nn"
"nnH nnM nn.nnS" or "nnD nnM nn.nnS"
nn.nnnnnnnn
"nn.nnnnnnn"
"""
rval = make_val_float(ra)
dval = make_val_float(dec)
if rval is None:
rval, dval = radec_hmstodd(ra, dec)
return rval, dval
|
Function to parse RA and Dec input values and turn them into decimal
degrees
Input formats could be:
["nn","nn","nn.nn"]
"nn nn nn.nnn"
"nn:nn:nn.nn"
"nnH nnM nn.nnS" or "nnD nnM nn.nnS"
nn.nnnnnnnn
"nn.nnnnnnn"
|
entailment
|
def radec_hmstodd(ra, dec):
""" Function to convert HMS values into decimal degrees.
This function relies on the astropy.coordinates package to perform the
conversion to decimal degrees.
Parameters
----------
ra : list or array
List or array of input RA positions
dec : list or array
List or array of input Dec positions
Returns
-------
pos : arr
Array of RA,Dec positions in decimal degrees
Notes
-----
This function supports any specification of RA and Dec as HMS or DMS;
specifically, the formats::
["nn","nn","nn.nn"]
"nn nn nn.nnn"
"nn:nn:nn.nn"
"nnH nnM nn.nnS" or "nnD nnM nn.nnS"
See Also
--------
astropy.coordinates
"""
hmstrans = string.maketrans(string.ascii_letters,
' ' * len(string.ascii_letters))
if isinstance(ra, list):
rastr = ':'.join(ra)
elif isinstance(ra, float):
rastr = None
pos_ra = ra
elif ra.find(':') < 0:
# convert any non-numeric characters to spaces
# (we already know the units)
rastr = ra.translate(hmstrans).strip()
rastr = rastr.replace(' ', ' ')
# convert 'nn nn nn.nn' to final 'nn:nn:nn.nn' string
rastr = rastr.replace(' ', ':')
else:
rastr = ra
if isinstance(dec, list):
decstr = ':'.join(dec)
elif isinstance(dec, float):
decstr = None
pos_dec = dec
elif dec.find(':') < 0:
decstr = dec.translate(hmstrans).strip()
decstr = decstr.replace(' ', ' ')
decstr = decstr.replace(' ', ':')
else:
decstr = dec
if rastr is None:
pos = (pos_ra, pos_dec)
else:
pos_coord = coords.SkyCoord(rastr + ' ' + decstr,
unit=(u.hourangle, u.deg))
pos = (pos_coord.ra.deg, pos_coord.dec.deg)
return pos
|
Function to convert HMS values into decimal degrees.
This function relies on the astropy.coordinates package to perform the
conversion to decimal degrees.
Parameters
----------
ra : list or array
List or array of input RA positions
dec : list or array
List or array of input Dec positions
Returns
-------
pos : arr
Array of RA,Dec positions in decimal degrees
Notes
-----
This function supports any specification of RA and Dec as HMS or DMS;
specifically, the formats::
["nn","nn","nn.nn"]
"nn nn nn.nnn"
"nn:nn:nn.nn"
"nnH nnM nn.nnS" or "nnD nnM nn.nnS"
See Also
--------
astropy.coordinates
|
entailment
|
def parse_exclusions(exclusions):
""" Read in exclusion definitions from file named by 'exclusions'
and return a list of positions and distances
"""
fname = fileutil.osfn(exclusions)
if os.path.exists(fname):
with open(fname) as f:
flines = f.readlines()
else:
print('No valid exclusions file "', fname, '" could be found!')
print('Skipping application of exclusions files to source catalogs.')
return None
# Parse out lines which can be interpreted as positions and distances
exclusion_list = []
units = None
for line in flines:
if line[0] == '#' or 'global' in line[:6]:
continue
# Only interpret the part of the line prior to the comment
# if a comment has been attached to the line
if '#' in line:
line = line.split('#')[0].rstrip()
if units is None:
units = 'pixels'
if line[:3] in ['fk4', 'fk5', 'sky']:
units = 'sky'
if line[:5] in ['image', 'physi', 'pixel']:
units = 'pixels'
continue
if 'circle(' in line:
nline = line.replace('circle(', '')
nline = nline.replace(')', '')
nline = nline.replace('"', '')
vals = nline.split(',')
if ':' in vals[0]:
posval = vals[0] + ' ' + vals[1]
else:
posval = (float(vals[0]), float(vals[1]))
else:
# Try to interpret unformatted line
if ',' in line:
split_tok = ','
else:
split_tok = ' '
vals = line.split(split_tok)
if len(vals) == 3:
if ':' in vals[0]:
posval = vals[0] + ' ' + vals[1]
else:
posval = (float(vals[0]), float(vals[1]))
else:
continue
exclusion_list.append(
{'pos': posval, 'distance': float(vals[2]), 'units': units}
)
return exclusion_list
|
Read in exclusion definitions from file named by 'exclusions'
and return a list of positions and distances
|
entailment
|
def parse_colname(colname):
""" Common function to interpret input column names provided by the user.
This function translates column specification provided by the user
into a column number.
Notes
-----
This function will understand the following inputs::
'1,2,3' or 'c1,c2,c3' or ['c1','c2','c3']
'1-3' or 'c1-c3'
'1:3' or 'c1:c3'
'1 2 3' or 'c1 c2 c3'
'1' or 'c1'
1
Parameters
----------
colname :
Column name or names to be interpreted
Returns
-------
cols : list
The return value will be a list of strings.
"""
if isinstance(colname, list):
cname = ''
for c in colname:
cname += str(c) + ','
cname = cname.rstrip(',')
elif isinstance(colname, int) or colname.isdigit():
cname = str(colname)
else:
cname = colname
if 'c' in cname[0]:
cname = cname.replace('c', '')
ctok = None
cols = None
if '-' in cname:
ctok = '-'
if ':' in cname:
ctok = ':'
if ctok is not None:
cnums = cname.split(ctok)
c = list(range(int(cnums[0]), int(cnums[1]) + 1))
cols = [str(i) for i in c]
if cols is None:
ctok = ',' if ',' in cname else ' '
cols = cname.split(ctok)
return cols
|
Common function to interpret input column names provided by the user.
This function translates column specification provided by the user
into a column number.
Notes
-----
This function will understand the following inputs::
'1,2,3' or 'c1,c2,c3' or ['c1','c2','c3']
'1-3' or 'c1-c3'
'1:3' or 'c1:c3'
'1 2 3' or 'c1 c2 c3'
'1' or 'c1'
1
Parameters
----------
colname :
Column name or names to be interpreted
Returns
-------
cols : list
The return value will be a list of strings.
|
entailment
|
def readcols(infile, cols=None):
""" Function which reads specified columns from either FITS tables or
ASCII files
This function reads in the columns specified by the user into numpy
arrays regardless of the format of the input table (ASCII or FITS
table).
Parameters
----------
infile : string
Filename of the input file
cols : string or list of strings
Columns to be read into arrays
Returns
-------
outarr : array
Numpy array or arrays of columns from the table
"""
if _is_str_none(infile) is None:
return None
if infile.endswith('.fits'):
outarr = read_FITS_cols(infile, cols=cols)
else:
outarr = read_ASCII_cols(infile, cols=cols)
return outarr
|
Function which reads specified columns from either FITS tables or
ASCII files
This function reads in the columns specified by the user into numpy
arrays regardless of the format of the input table (ASCII or FITS
table).
Parameters
----------
infile : string
Filename of the input file
cols : string or list of strings
Columns to be read into arrays
Returns
-------
outarr : array
Numpy array or arrays of columns from the table
|
entailment
|
def read_FITS_cols(infile, cols=None): # noqa: N802
""" Read columns from FITS table """
with fits.open(infile, memmap=False) as ftab:
extnum = 0
extfound = False
for extn in ftab:
if 'tfields' in extn.header:
extfound = True
break
extnum += 1
if not extfound:
print('ERROR: No catalog table found in ', infile)
raise ValueError
# Now, read columns from the table in this extension if no column names
# were provided by user, simply read in all columns from table
if _is_str_none(cols[0]) is None:
cols = ftab[extnum].data.names
# Define the output
outarr = [ftab[extnum].data.field(c) for c in cols]
return outarr
|
Read columns from FITS table
|
entailment
|
def read_ASCII_cols(infile, cols=[1, 2, 3]): # noqa: N802
""" Interpret input ASCII file to return arrays for specified columns.
Notes
-----
The specification of the columns should be expected to have lists for
each 'column', with all columns in each list combined into a single
entry.
For example::
cols = ['1,2,3','4,5,6',7]
where '1,2,3' represent the X/RA values, '4,5,6' represent the Y/Dec
values and 7 represents the flux value for a total of 3 requested
columns of data to be returned.
Returns
-------
outarr : list of arrays
The return value will be a list of numpy arrays, one for each
'column'.
"""
# build dictionary representing format of each row
# Format of dictionary: {'colname':col_number,...}
# This provides the mapping between column name and column number
coldict = {}
with open(infile, 'r') as f:
flines = f.readlines()
for l in flines: # interpret each line from catalog file
if l[0].lstrip() == '#' or l.lstrip() == '':
continue
else:
# convert first row of data into column definitions using indices
coldict = {str(i + 1): i for i, _ in enumerate(l.split())}
break
numcols = len(cols)
outarr = [[] for _ in range(numcols)]
convert_radec = False
# Now, map specified columns to columns in file and populate output arrays
for l in flines: # interpret each line from catalog file
l = l.strip()
lspl = l.split()
# skip blank lines, comment lines, or lines with
# fewer columns than requested by user
if not l or len(lspl) < numcols or l[0] == '#' or "INDEF" in l:
continue
# For each 'column' requested by user, pull data from row
for c, i in zip(cols, list(range(numcols))):
cnames = parse_colname(c)
if len(cnames) > 1:
# interpret multi-column specification as one value
outval = ''
for cn in cnames:
cnum = coldict[cn]
cval = lspl[cnum]
outval += cval + ' '
outarr[i].append(outval)
convert_radec = True
else:
# pull single value from row for this column
cnum = coldict[cnames[0]]
if isfloat(lspl[cnum]):
cval = float(lspl[cnum])
else:
cval = lspl[cnum]
# Check for multi-column values given as "nn:nn:nn.s"
if ':' in cval:
cval = cval.replace(':', ' ')
convert_radec = True
outarr[i].append(cval)
# convert multi-column RA/Dec specifications
if convert_radec:
outra = []
outdec = []
for ra, dec in zip(outarr[0], outarr[1]):
radd, decdd = radec_hmstodd(ra, dec)
outra.append(radd)
outdec.append(decdd)
outarr[0] = outra
outarr[1] = outdec
# convert all lists to numpy arrays
for c in range(len(outarr)):
outarr[c] = np.array(outarr[c])
return outarr
|
Interpret input ASCII file to return arrays for specified columns.
Notes
-----
The specification of the columns should be expected to have lists for
each 'column', with all columns in each list combined into a single
entry.
For example::
cols = ['1,2,3','4,5,6',7]
where '1,2,3' represent the X/RA values, '4,5,6' represent the Y/Dec
values and 7 represents the flux value for a total of 3 requested
columns of data to be returned.
Returns
-------
outarr : list of arrays
The return value will be a list of numpy arrays, one for each
'column'.
|
entailment
|
def write_shiftfile(image_list, filename, outwcs='tweak_wcs.fits'):
""" Write out a shiftfile for a given list of input Image class objects
"""
rows = ''
nrows = 0
for img in image_list:
row = img.get_shiftfile_row()
if row is not None:
rows += row
nrows += 1
if nrows == 0: # If there are no fits to report, do not write out a file
return
# write out reference WCS now
if os.path.exists(outwcs):
os.remove(outwcs)
p = fits.HDUList()
p.append(fits.PrimaryHDU())
p.append(createWcsHDU(image_list[0].refWCS))
p.writeto(outwcs)
# Write out shiftfile to go with reference WCS
with open(filename, 'w') as f:
f.write('# frame: output\n')
f.write('# refimage: %s[wcs]\n' % outwcs)
f.write('# form: delta\n')
f.write('# units: pixels\n')
f.write(rows)
print('Writing out shiftfile :', filename)
|
Write out a shiftfile for a given list of input Image class objects
|
entailment
|
def createWcsHDU(wcs): # noqa: N802
""" Generate a WCS header object that can be used to populate a reference
WCS HDU.
For most applications, stwcs.wcsutil.HSTWCS.wcs2header()
will work just as well.
"""
header = wcs.to_header()
header['EXTNAME'] = 'WCS'
header['EXTVER'] = 1
# Now, update original image size information
header['NPIX1'] = (wcs.pixel_shape[0], "Length of array axis 1")
header['NPIX2'] = (wcs.pixel_shape[1], "Length of array axis 2")
header['PIXVALUE'] = (0.0, "values of pixels in array")
if hasattr(wcs, 'orientat'):
orientat = wcs.orientat
else:
# find orientat from CD or PC matrix
if wcs.wcs.has_cd():
cd12 = wcs.wcs.cd[0][1]
cd22 = wcs.wcs.cd[1][1]
elif wcs.wcs.has_pc():
cd12 = wcs.wcs.cdelt[0] * wcs.wcs.pc[0][1]
cd22 = wcs.wcs.cdelt[1] * wcs.wcs.pc[1][1]
else:
raise ValueError("Invalid WCS: WCS does not contain neither "
"a CD nor a PC matrix.")
orientat = np.rad2deg(np.arctan2(cd12, cd22))
header['ORIENTAT'] = (orientat, "position angle of "
"image y axis (deg. e of n)")
return fits.ImageHDU(None, header)
|
Generate a WCS header object that can be used to populate a reference
WCS HDU.
For most applications, stwcs.wcsutil.HSTWCS.wcs2header()
will work just as well.
|
entailment
|
def gauss_array(nx, ny=None, fwhm=1.0, sigma_x=None, sigma_y=None,
zero_norm=False):
""" Computes the 2D Gaussian with size nx*ny.
Parameters
----------
nx : int
ny : int [Default: None]
Size of output array for the generated Gaussian. If ny == None,
output will be an array nx X nx pixels.
fwhm : float [Default: 1.0]
Full-width, half-maximum of the Gaussian to be generated
sigma_x : float [Default: None]
sigma_y : float [Default: None]
Sigma_x and sigma_y are the stddev of the Gaussian functions.
zero_norm : bool [Default: False]
The kernel will be normalized to a sum of 1 when True.
Returns
-------
gauss_arr : array
A numpy array with the generated gaussian function
"""
if ny is None:
ny = nx
if sigma_x is None:
if fwhm is None:
print('A value for either "fwhm" or "sigma_x" needs to be '
'specified!')
raise ValueError
else:
# Convert input FWHM into sigma
sigma_x = fwhm / (2 * np.sqrt(2 * np.log(2)))
if sigma_y is None:
sigma_y = sigma_x
xradius = nx // 2
yradius = ny // 2
# Create grids of distance from center in X and Y
xarr = np.abs(np.arange(-xradius, xradius + 1))
yarr = np.abs(np.arange(-yradius, yradius + 1))
hnx = gauss(xarr, sigma_x)
hny = gauss(yarr, sigma_y)
hny = hny.reshape((ny, 1))
h = hnx * hny
# Normalize gaussian kernel to a sum of 1
h = h / np.abs(h).sum()
if zero_norm:
h -= h.mean()
return h
|
Computes the 2D Gaussian with size nx*ny.
Parameters
----------
nx : int
ny : int [Default: None]
Size of output array for the generated Gaussian. If ny == None,
output will be an array nx X nx pixels.
fwhm : float [Default: 1.0]
Full-width, half-maximum of the Gaussian to be generated
sigma_x : float [Default: None]
sigma_y : float [Default: None]
Sigma_x and sigma_y are the stddev of the Gaussian functions.
zero_norm : bool [Default: False]
The kernel will be normalized to a sum of 1 when True.
Returns
-------
gauss_arr : array
A numpy array with the generated gaussian function
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.