sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def sortSkyCatalog(self):
""" Sort and clip the source catalog based on the flux range specified
by the user. It keeps a copy of the original full list in order to
support iteration.
"""
if len(self.all_radec_orig[2].nonzero()[0]) == 0:
warn_str = "Source catalog NOT trimmed by flux/mag. No fluxes read in for sources!"
print('\nWARNING: ',warn_str,'\n')
log.warning(warn_str)
return
clip_catalog = False
clip_prefix = ''
for k in sortKeys:
for p in self.pars.keys():
pindx = p.find(k)
if pindx >= 0 and self.pars[p] is not None:
log.info('found a match for %s to %s'%(
str(p),str(self.pars[p])))
# find prefix (if any)
clip_prefix = p[:pindx].strip()
#Only clip the catalog if one of the keys is specified
# in the catalog parameters, not the source finding pars
if clip_prefix and 'units' not in p:
clip_catalog = True
break
if clip_catalog:
break
all_radec = None
if clip_catalog:
# Start by clipping by any specified flux range
if self.pars[clip_prefix+'maxflux'] is not None or \
self.pars[clip_prefix+'minflux'] is not None:
clip_catalog = True
if self.pars[clip_prefix+'minflux'] is not None:
fluxmin = self.pars[clip_prefix+'minflux']
else:
fluxmin = self.all_radec[2].min()
if self.pars[clip_prefix+'maxflux'] is not None:
fluxmax = self.pars[clip_prefix+'maxflux']
else:
fluxmax = self.all_radec[2].max()
# apply flux limit clipping
minindx = self.all_radec_orig[2] >= fluxmin
maxindx = self.all_radec_orig[2] <= fluxmax
flux_indx = np.bitwise_and(minindx,maxindx)
all_radec = []
all_radec.append(self.all_radec_orig[0][flux_indx])
all_radec.append(self.all_radec_orig[1][flux_indx])
all_radec.append(self.all_radec_orig[2][flux_indx])
all_radec.append(np.arange(len(self.all_radec_orig[0][flux_indx])))
if clip_prefix+'nbright' in self.pars and \
self.pars[clip_prefix+'nbright'] is not None:
clip_catalog = True
nbright = self.pars[clip_prefix+'nbright']
# pick out only the brightest 'nbright' sources
if self.pars[clip_prefix+'fluxunits'] == 'mag':
nbslice = slice(None,nbright)
else:
nbslice = slice(nbright,None)
if all_radec is None:
# work on copy of all original data
all_radec = copy.deepcopy(self.all_radec_orig)
# find indices of brightest
nbright_indx = np.argsort(all_radec[2])[nbslice]
self.all_radec[0] = all_radec[0][nbright_indx]
self.all_radec[1] = all_radec[1][nbright_indx]
self.all_radec[2] = all_radec[2][nbright_indx]
self.all_radec[3] = np.arange(len(all_radec[0][nbright_indx]))
else:
if all_radec is not None:
self.all_radec = copy.deepcopy(all_radec)
|
Sort and clip the source catalog based on the flux range specified
by the user. It keeps a copy of the original full list in order to
support iteration.
|
entailment
|
def match(self,refimage, quiet_identity, **kwargs):
""" Uses xyxymatch to cross-match sources between this catalog and
a reference catalog (refCatalog).
"""
ref_outxy = refimage.outxy
refWCS = refimage.wcs
refname = refimage.name
ref_inxy = refimage.xy_catalog
cat_src_type = kwargs['cat_src_type']
del kwargs['cat_src_type']
if not quiet_identity:
print("Matching sources from \'{}\' with sources from "
"reference {} \'{}\'"
.format(self.name, cat_src_type, refname))
#self.sortSkyCatalog() # apply any catalog sorting specified by the user
self.transformToRef(refWCS)
self.refWCS = refWCS
# extract xyxymatch parameters from input parameters
matchpars = kwargs.copy()
self.match_pars = matchpars
minobj = matchpars['minobj'] # needed for later
del matchpars['minobj'] # not needed in xyxymatch
self.goodmatch = True
# Check to see whether or not it is being matched to itself
if refname.strip() == self.name.strip():
self.identityfit = True
if not quiet_identity:
log.info('NO fit performed for reference image: %s\n'%self.name)
else:
# convert tolerance from units of arcseconds to pixels, as needed
radius = matchpars['searchrad']
if matchpars['searchunits'] == 'arcseconds':
radius /= refWCS.pscale
# Determine xyoff (X,Y offset) and tolerance to be used with xyxymatch
if matchpars['use2dhist']:
xsh, ysh, maxval, flux, zpmat, qual = _estimate_2dhist_shift(
self.outxy,
ref_outxy,
searchrad=radius
)
xyoff = (xsh, ysh)
if matchpars['see2dplot']:
zpstd = max(10, flux // 5) if qual else 10
title_str = ("Histogram of offsets: Peak has {:d} matches "
"at ({:0.4g}, {:0.4g})"
.format(maxval, xsh, ysh))
hist_name = None
if not self.interactive:
hist_name = 'hist2d_{0}.png'.format(self.rootname)
plot_pars = {
'data': zpmat,
'figure_id': self.figure_id,
'vmax': zpstd,
'xp': xsh,
'yp': ysh,
'searchrad': radius,
'title_str': title_str,
'plotname': hist_name,
'interactive': self.interactive
}
tweakutils.plot_zeropoint(plot_pars)
if matchpars['see2dplot'] and ('residplot' in matchpars and
'No' in matchpars['residplot']):
if self.interactive:
prompt = ("Press ENTER for next image, \n" +
" 'n' to continue without updating header or \n" +
" 'q' to quit immediately...\n")
if sys.version_info[0] >= 3:
a = input(prompt)
else:
a = raw_input(prompt)
else:
a = ' '
if 'n' in a.lower():
self.perform_update = False
if 'q' in a.lower():
self.quit_immediately = True
if matchpars['see2dplot']:
self.figure_id += 1
else:
xoff = 0.
yoff = 0.
if not util.is_blank(matchpars['xoffset']):
xoff = matchpars['xoffset']
if not util.is_blank(matchpars['yoffset']):
yoff = matchpars['yoffset']
xyoff = (xoff, yoff)
matches = xyxymatch(self.outxy, ref_outxy, origin=xyoff,
tolerance=matchpars['tolerance'],
separation=matchpars['separation'])
if len(matches) > minobj:
self.matches['image'] = np.column_stack([matches['input_x'][:,
np.newaxis],matches['input_y'][:,np.newaxis]])
self.matches['ref'] = np.column_stack([matches['ref_x'][:,
np.newaxis],matches['ref_y'][:,np.newaxis]])
self.matches['ref_idx'] = matches['ref_idx']
self.matches['img_idx'] = self.all_radec[3][matches['input_idx']]
self.matches['input_idx'] = matches['input_idx']
self.matches['img_RA'] = self.all_radec[0][matches['input_idx']]
self.matches['img_DEC'] = self.all_radec[1][matches['input_idx']]
self.matches['ref_orig_xy'] = np.column_stack([
np.array(ref_inxy[0])[matches['ref_idx']][:,np.newaxis],
np.array(ref_inxy[1])[matches['ref_idx']][:,np.newaxis]])
self.matches['img_orig_xy'] = np.column_stack([
np.array(self.xy_catalog[0])[matches['input_idx']][:,np.newaxis],
np.array(self.xy_catalog[1])[matches['input_idx']][:,np.newaxis]])
self.matches['src_origin'] = ref_inxy[-1][matches['ref_idx']]
print('Found %d matches for %s...'%(len(matches),self.name))
if self.pars['writecat']:
matchfile = open(self.catalog_names['match'],mode='w+')
matchfile.write('#Reference: %s\n'%refname)
matchfile.write('#Input: %s\n'%self.name)
title = '#Ref_X Ref_Y '
title += 'Input_X Input_Y '
title += 'Ref_X0 Ref_Y0 '
title += 'Input_X0 Input_Y0 '
title += 'Ref_ID Input_ID '
title += 'Ref_Source\n'
fmtstr = '%0.6f %0.6f '*4
fmtstr += '%d %d %s\n'
matchfile.write(title)
for i in range(len(matches['input_x'])):
linestr = fmtstr%\
(matches['ref_x'][i],matches['ref_y'][i],\
matches['input_x'][i],matches['input_y'][i],
self.matches['ref_orig_xy'][:,0][i],
self.matches['ref_orig_xy'][:,1][i],
self.matches['img_orig_xy'][:,0][i],
self.matches['img_orig_xy'][:,1][i],
matches['ref_idx'][i],matches['input_idx'][i],
self.matches['src_origin'][i])
matchfile.write(linestr)
matchfile.close()
else:
warnstr = textutil.textbox('WARNING: \n'+
'Not enough matches (< %d) found for input image: %s'%(minobj,self.name))
for line in warnstr.split('\n'):
log.warning(line)
print(warnstr)
self.goodmatch = False
|
Uses xyxymatch to cross-match sources between this catalog and
a reference catalog (refCatalog).
|
entailment
|
def performFit(self,**kwargs):
""" Perform a fit between the matched sources.
Parameters
----------
kwargs : dict
Parameter necessary to perform the fit; namely, *fitgeometry*.
Notes
-----
This task still needs to implement (eventually) interactive iteration of
the fit to remove outliers.
"""
assert(self.refWCS is not None)
pars = kwargs.copy()
self.fit_pars = pars
self.fit = {'offset':[0.0,0.0],'rot':0.0,'scale':[1.0],'rms':[0.0,0.0],
'rms_keys':{'RMS_RA':0.0,'RMS_DEC':0.0,'NMATCH':0},
'fit_matrix':[[1.0,0.0],[0.0,1.0]], 'src_origin':[None]}
if not self.identityfit:
if self.matches is not None and self.goodmatch:
self.fit = linearfit.iter_fit_all(
self.matches['image'],self.matches['ref'],
self.matches['img_idx'],self.matches['ref_idx'],
xyorig=self.matches['img_orig_xy'],
uvorig=self.matches['ref_orig_xy'],
mode=pars['fitgeometry'],nclip=pars['nclip'],
sigma=pars['sigma'],minobj=pars['minobj'],
center=self.refWCS.wcs.crpix,
verbose=self.verbose)
self.fit['rms_keys'] = self.compute_fit_rms()
radec_fit = self.refWCS.all_pix2world(self.fit['fit_xy'],1)
self.fit['fit_RA'] = radec_fit[:,0]
self.fit['fit_DEC'] = radec_fit[:,1]
self.fit['src_origin'] = self.matches['src_origin']
print('Computed ',pars['fitgeometry'],' fit for ',self.name,': ')
if pars['fitgeometry'] == 'shift':
print("XSH: {:.4f} YSH: {:.4f}"
.format(self.fit['offset'][0],
self.fit['offset'][1]))
elif pars['fitgeometry'] == 'rscale' and self.fit['proper']:
print("XSH: {:.4f} YSH: {:.4f} ROT: {:.10g} "
"SCALE: {:.6f}".format(
self.fit['offset'][0],
self.fit['offset'][1],
self.fit['rot'],
self.fit['scale'][0]))
elif pars['fitgeometry'] == 'general' or \
(pars['fitgeometry'] == 'rscale' and not self.fit['proper']):
print("XSH: {:.4f} YSH: {:.4f} PROPER ROT: {:.10g} "
"".format(
self.fit['offset'][0],
self.fit['offset'][1],
self.fit['rot']))
print("<ROT>: {:.10g} SKEW: {:.10g} ROT_X: {:.10g} "
"ROT_Y: {:.10g}".format(
self.fit['rotxy'][2],
self.fit['skew'],
self.fit['rotxy'][0],
self.fit['rotxy'][1]))
print("<SCALE>: {:.10g} SCALE_X: {:.10g} "
"SCALE_Y: {:.10g}".format(
self.fit['scale'][0],
self.fit['scale'][1],
self.fit['scale'][2]))
else:
assert(False)
print('FIT XRMS: {:<7.2g} FIT YRMS: {:<7.2g}'
.format(*self.fit['rms']))
print('FIT RMSE: {:<7.2g} FIT MAE: {:<7.2g}\n'
.format(self.fit['rmse'], self.fit['mae']))
print('RMS_RA: %.2g (deg) RMS_DEC: %.2g (deg)\n'%(
self.fit['rms_keys']['RMS_RA'],
self.fit['rms_keys']['RMS_DEC']))
print('Final solution based on ',self.fit['rms_keys']['NMATCH'],' objects.')
self.write_fit_catalog()
# Plot residuals, if requested by the user
if 'residplot' in pars and "No" not in pars['residplot']:
xy = self.fit['img_coords']
resids = self.fit['resids']
xy_fit = xy + resids
title_str = 'Residuals\ for\ {0}\ using\ {1:6d}\ sources'.format(
self.name.replace('_','\_'),self.fit['rms_keys']['NMATCH'])
if not self.interactive:
resid_name = 'residuals_{0}.png'.format(self.rootname)
vector_name = resid_name.replace('residuals','vector')
else:
resid_name = None
vector_name = None
if pars['residplot'] == 'both':
tweakutils.make_vector_plot(None,
data=[xy[:,0],xy[:,1],xy_fit[:,0],xy_fit[:,1]],
figure_id=self.figure_id, vector=True,
labelsize=pars['labelsize'],
plotname=vector_name, title=title_str)
ptype=False # Setup
self.figure_id += 1
elif pars['residplot'] == 'vector':
ptype = True
else:
ptype = False
# Generate new plot
tweakutils.make_vector_plot(None,
data=[xy[:,0],xy[:,1],xy_fit[:,0],xy_fit[:,1]],
figure_id=self.figure_id, vector=ptype,
ylimit=pars['ylimit'], labelsize=pars['labelsize'],
plotname=resid_name, title=title_str)
if self.interactive:
prompt = ("Press ENTER for next image, \n" +
" 'n' to continue without updating header or \n" +
" 'q' to quit immediately...\n")
if sys.version_info[0] >= 3:
a = input(prompt)
else:
a = raw_input(prompt)
else:
a = ' '
if 'n' in a.lower():
self.perform_update = False
if 'q' in a.lower():
self.quit_immediately = True
else:
self.fit['offset'] = [np.nan,np.nan]
self.fit['rot'] = np.nan
self.fit['scale'] = [np.nan]
|
Perform a fit between the matched sources.
Parameters
----------
kwargs : dict
Parameter necessary to perform the fit; namely, *fitgeometry*.
Notes
-----
This task still needs to implement (eventually) interactive iteration of
the fit to remove outliers.
|
entailment
|
def updateHeader(self, wcsname=None, reusename=False):
""" Update header of image with shifts computed by *perform_fit()*.
"""
# Insure filehandle is open and available...
self.openFile()
verbose_level = 1
if not self.perform_update:
verbose_level = 0
# Create WCSCORR table to keep track of WCS revisions anyway
if self.perform_update:
wcscorr.init_wcscorr(self._im.hdu)
extlist = []
wcscorr_extname = self.ext_name
if self.ext_name == "PRIMARY":
extlist = [0]
else:
for ext in range(1,self.nvers+1):
extlist.append((self.ext_name,ext))
# add WCSNAME to SCI headers, if not provided (such as for
# drizzled images directly obtained from the archive pre-AD)
if ('wcsname' not in self._im.hdu[self.ext_name,ext].header and
self._im.hdu.fileinfo(0)['filemode'] == 'update'):
self._im.hdu[self.ext_name,ext].header['wcsname'] = 'Default'
if not self.identityfit and self.goodmatch and \
self.fit['offset'][0] != np.nan:
updatehdr.updatewcs_with_shift(self._im.hdu, self.refWCS,
wcsname=wcsname, reusename=reusename,
fitgeom=self.fit_pars['fitgeometry'],
xsh=self.fit['offset'][0],ysh=self.fit['offset'][1],
rot=self.fit['rot'],scale=self.fit['scale'][0],
fit=self.fit['fit_matrix'], verbose=verbose_level,
xrms=self.fit['rms_keys']['RMS_RA'],
yrms=self.fit['rms_keys']['RMS_DEC'])
wnames = altwcs.wcsnames(self._im.hdu,ext=extlist[0])
altkeys = []
for k in wnames:
if wnames[k] == wcsname:
altkeys.append(k)
if len(altkeys) > 1 and ' ' in altkeys:
altkeys.remove(' ')
if len(altkeys) == 0:
next_key = ' '
else:
next_key = altkeys[-1]
if self.perform_update:
log.info(' Writing out new WCS to alternate WCS: "%s"'%next_key)
self.next_key = next_key
else: #if self.identityfit or not self.goodmatch:
if reusename:
# Look for key of WCS with this name
next_key = altwcs.getKeyFromName(self._im.hdu[extlist[0]].header,wcsname)
# This wcsname is new, so start fresh
if next_key is None:
next_key = altwcs.next_wcskey(self._im.hdu[extlist[0]].header)
else:
# Find key for next WCS and save again to replicate an updated solution
next_key = altwcs.next_wcskey(self._im.hdu[extlist[0]].header)
if self.perform_update:
# archive current WCS as alternate WCS with specified WCSNAME
# Start by archiving original PRIMARY WCS
wnames = altwcs.wcsnames(self._im.hdu,ext=extlist[0])
# Define a default WCSNAME in the case that the file to be
# updated did not have the WCSNAME keyword defined already
# (as will happen when updating images that have not been
# updated using updatewcs).
if len(wnames) == 0:
pri_wcsname = None
else:
# Safeguard against headers not having WCSNAME defined
# This would occur if they were written out by something
# other than stwcs.updatewcs v
if ' ' not in wnames:
self._im.hdu[extlist[0]].header['wscname'] = ''
wnames[' '] = ''
pri_wcsname = wnames[' ']
next_pkey = altwcs.getKeyFromName(fits.getheader(self.name, extlist[0], memmap=False),pri_wcsname)
log.info(' Saving Primary WCS to alternate WCS: "%s"'%next_pkey)
altwcs.archiveWCS(self._im.hdu, extlist,
wcskey=next_pkey, wcsname=pri_wcsname,
reusekey=True)
if reusename:
# Look for key of WCS with this name
next_key = altwcs.getKeyFromName(self._im.hdu[extlist[0]].header,wcsname)
# This wcsname is new, so start fresh
if next_key is None:
next_key = altwcs.next_wcskey(self._im.hdu[extlist[0]].header)
else:
# Find key for next WCS and save again to replicate an updated solution
next_key = altwcs.next_wcskey(self._im.hdu[extlist[0]].header)
# update WCSNAME to be the new name
for ext in extlist:
self._im.hdu[ext].header['WCSNAME'] = wcsname
# save again using new WCSNAME
altwcs.archiveWCS(self._im.hdu, extlist,
wcskey=next_key,wcsname=wcsname, reusekey=reusename)
self.next_key = ' '
# add FIT values to image's PRIMARY header
fimg = self._im.hdu
if wcsname in ['',' ',None,"INDEF"]:
wcsname = 'TWEAK'
# Record values for the fit with both the PRIMARY WCS being updated
# and the alternate WCS which will be created.
assert(not self._im.closed)
for ext in extlist:
self._im.hdu[ext].header['FITNAME'+next_key] = wcsname
for kw in self.fit['rms_keys']:
self._im.hdu[ext].header.set(kw+next_key,
self.fit['rms_keys'][kw],
after='FITNAME'+next_key)
if self.perform_update:
log.info('Updating WCSCORR table with new WCS solution "%s"'%wcsname)
wcscorr.update_wcscorr(self._im.hdu, wcs_id=wcsname,
extname=self.ext_name)
|
Update header of image with shifts computed by *perform_fit()*.
|
entailment
|
def writeHeaderlet(self,**kwargs):
""" Write and/or attach a headerlet based on update to PRIMARY WCS
"""
# Insure filehandle is open and available...
self.openFile()
pars = kwargs.copy()
rms_pars = self.fit['rms_keys']
str_kw = ['descrip','history','author','hdrfile']
for kw in str_kw:
if pars[kw] == '': pars[kw] = None
# Call function with properly interpreted input parameters
# Syntax: write_headerlet(filename, hdrname, output, sciext='SCI',
# wcsname=None, wcskey=None, destim=None,
# sipname=None, npolfile=None, d2imfile=None,
# author=None, descrip=None, history=None,
# rms_ra=None, rms_dec=None, nmatch=None, catalog=None,
# attach=True, clobber=False):
headerlet.write_headerlet(self._im.hdu, pars['hdrname'],
output=pars['hdrfile'],
wcsname=None, wcskey=self.next_key, destim=None,
sipname=None, npolfile=None, d2imfile=None,
author=pars['author'], descrip=pars['descrip'],
history=pars['history'],
nmatch=rms_pars['NMATCH'],catalog=pars['catalog'],
attach=pars['attach'], clobber=pars['clobber']
)
|
Write and/or attach a headerlet based on update to PRIMARY WCS
|
entailment
|
def write_skycatalog(self,filename):
""" Write out the all_radec catalog for this image to a file.
"""
if self.all_radec is None:
return
ralist = self.all_radec[0]#.tolist()
declist = self.all_radec[1]#.tolist()
f = open(filename,'w')
f.write("#Sky positions for: "+self.name+'\n')
f.write("#RA Dec\n")
f.write("#(deg) (deg)\n")
for i in range(len(ralist)):
f.write('%0.12f %0.12f\n'%(ralist[i],declist[i]))
f.close()
|
Write out the all_radec catalog for this image to a file.
|
entailment
|
def get_xy_catnames(self):
""" Return a string with the names of input_xy catalog names
"""
catstr = self.name+' '
if 'input_xy' in self.catalog_names:
for xycat in self.catalog_names['input_xy']:
catstr += ' '+xycat
return catstr + '\n'
|
Return a string with the names of input_xy catalog names
|
entailment
|
def write_fit_catalog(self):
""" Write out the catalog of all sources and resids used in the final fit.
"""
if self.pars['writecat']:
log.info('Creating catalog for the fit: {:s}'.format(self.catalog_names['fitmatch']))
f = open(self.catalog_names['fitmatch'],'w')
f.write('# Input image: {:s}\n'.format(self.filename))
f.write('# Coordinate mapping parameters: \n')
f.write('# X and Y rms: {:.2g} {:.2g}\n'
.format(self.fit['rms'][0], self.fit['rms'][1]))
f.write('# X and Y shift: {:.4f} {:.4f}\n'
.format(self.fit['offset'][0], self.fit['offset'][1]))
f.write('# X and Y scale: {:.10g} {:.10g}\n'
.format(self.fit['scale'][1], self.fit['scale'][2]))
f.write('# X and Y rotation: {:.10g} {:.10g}\n'
.format(self.fit['rotxy'][0], self.fit['rotxy'][1]))
f.write('# <rotation> and skew: {:.10g} {:.10g}\n'
.format(self.fit['rotxy'][2], self.fit['skew']))
f.write('# \n# Input Coordinate Listing\n')
f.write('# Column 1: X (reference)\n')
f.write('# Column 2: Y (reference)\n')
f.write('# Column 3: X (input)\n')
f.write('# Column 4: Y (input)\n')
f.write('# Column 5: X (fit)\n')
f.write('# Column 6: Y (fit)\n')
f.write('# Column 7: X (residual)\n')
f.write('# Column 8: Y (residual)\n')
f.write('# Column 9: Original X (reference)\n')
f.write('# Column 10: Original Y (reference)\n')
f.write('# Column 11: Original X (input)\n')
f.write('# Column 12: Original Y (input)\n')
f.write('# Column 13: Ref ID\n')
f.write('# Column 14: Input ID\n')
f.write('# Column 15: Input EXTVER ID \n')
f.write('# Column 16: RA (fit)\n')
f.write('# Column 17: Dec (fit)\n')
f.write('# Column 18: Ref source provenience\n')
#
# Need to add chip ID for each matched source to the fitmatch file
# The chip information can be extracted from the following source:
#
# self.chip_catalogs[sci_extn] = {'catalog':catalog,'wcs':wcs}
# xypos = catalog.xypos
img_chip_id = self.fit['img_indx'].copy()
for sci_extn in range(1,self.nvers+1):
catalog = self.chip_catalogs[sci_extn]['catalog']
if catalog.xypos is not None:
img_indx_orig = self.chip_catalogs[sci_extn]['catalog'].xypos[3]
chip_min = img_indx_orig.min()
chip_max = img_indx_orig.max()
cid = np.logical_and((img_chip_id >= chip_min),(img_chip_id <= chip_max))
img_chip_id[cid] = sci_extn
#
f.write('#\n')
f.close()
xydata = [[self.fit['ref_coords'][:,0],self.fit['ref_coords'][:,1],
self.fit['img_coords'][:,0],self.fit['img_coords'][:,1],
self.fit['fit_xy'][:,0],self.fit['fit_xy'][:,1],
self.fit['resids'][:,0],self.fit['resids'][:,1],
self.fit['ref_orig_xy'][:,0],
self.fit['ref_orig_xy'][:,1],
self.fit['img_orig_xy'][:,0],
self.fit['img_orig_xy'][:,1]],
[self.fit['ref_indx'],self.fit['img_indx'],img_chip_id],
[self.fit['fit_RA'],self.fit['fit_DEC']],
[self.fit['src_origin']]
]
tweakutils.write_xy_file(self.catalog_names['fitmatch'],xydata,
append=True,format=["%15.6f","%8d","%20.12f"," %s"])
|
Write out the catalog of all sources and resids used in the final fit.
|
entailment
|
def write_outxy(self,filename):
""" Write out the output(transformed) XY catalog for this image to a file.
"""
f = open(filename,'w')
f.write("#Pixel positions for: "+self.name+'\n')
f.write("#X Y\n")
f.write("#(pix) (pix)\n")
for i in range(self.all_radec[0].shape[0]):
f.write('%f %f\n'%(self.outxy[i,0],self.outxy[i,1]))
f.close()
|
Write out the output(transformed) XY catalog for this image to a file.
|
entailment
|
def get_shiftfile_row(self):
""" Return the information for a shiftfile for this image to provide
compatability with the IRAF-based MultiDrizzle.
"""
if self.fit is not None:
rowstr = '%s %0.6f %0.6f %0.6f %0.6f %0.6f %0.6f\n'%(
self.name,self.fit['offset'][0],self.fit['offset'][1],
self.fit['rot'],self.fit['scale'][0],
self.fit['rms'][0],self.fit['rms'][1])
else:
rowstr = None
return rowstr
|
Return the information for a shiftfile for this image to provide
compatability with the IRAF-based MultiDrizzle.
|
entailment
|
def clean(self):
""" Remove intermediate files created.
"""
#TODO: add cleaning of mask files, *if* created ...
for f in self.catalog_names:
if 'match' in f:
if os.path.exists(self.catalog_names[f]):
log.info('Deleting intermediate match file: %s'%
self.catalog_names[f])
os.remove(self.catalog_names[f])
else:
for extn in f:
if os.path.exists(extn):
log.info('Deleting intermediate catalog: %d'%extn)
os.remove(extn)
|
Remove intermediate files created.
|
entailment
|
def write_skycatalog(self, filename, show_flux=False, show_id=False):
""" Write out the all_radec catalog for this image to a file.
"""
f = open(filename,'w')
f.write("#Sky positions for cumulative reference catalog. Initial catalog from: "+self.name+'\n')
header1 = "#RA Dec"
header2 = "#(deg) (deg)"
if show_flux:
header1 += " Flux"
header2 += " (counts)"
if show_id:
header1 += " ID Origin"
header2 += ""
header1 += "\n"
header2 += "\n"
f.write(header1)
f.write(header2)
show_details = show_flux or show_id
flux_end_char = ''
if show_details and show_flux:
if show_id:
flux_end_char = '\t'
for i in range(self.all_radec[0].shape[0]):
src_line = "{:.7f} {:.7f}" \
.format(self.all_radec[0][i], self.all_radec[1][i])
if show_details:
#src_line += " #"
if show_flux:
#src_line += " flux: {:.5g}{:s}" \
#.format(self.xy_catalog[2][i], flux_end_char)
src_line += " {:.5g}".format(self.xy_catalog[2][i])
if show_id:
#src_line += " ID: {:d}\torigin: '{:s}'" \
#.format(self.xy_catalog[3][i], self.xy_catalog[-1][i])
src_line += " {:d} {:s}".format(self.xy_catalog[3][i],
self.xy_catalog[-1][i])
f.write(src_line + '\n')
f.close()
|
Write out the all_radec catalog for this image to a file.
|
entailment
|
def transformToRef(self):
""" Transform reference catalog sky positions (self.all_radec)
to reference tangent plane (self.wcs) to create output X,Y positions.
"""
if 'refxyunits' in self.pars and self.pars['refxyunits'] == 'pixels':
log.info('Creating RA/Dec positions for reference sources...')
self.outxy = np.column_stack([self.all_radec[0][:,np.newaxis],self.all_radec[1][:,np.newaxis]])
skypos = self.wcs.wcs_pix2world(self.all_radec[0],self.all_radec[1],self.origin)
self.all_radec[0] = skypos[0]
self.all_radec[1] = skypos[1]
else:
log.info('Converting RA/Dec positions of reference sources from "%s" to '%self.name+
'X,Y positions in reference WCS...')
self.refWCS = self.wcs
outxy = self.wcs.wcs_world2pix(self.all_radec[0],self.all_radec[1],self.origin)
# convert outxy list to a Nx2 array
self.outxy = np.column_stack([outxy[0][:,np.newaxis],outxy[1][:,np.newaxis]])
|
Transform reference catalog sky positions (self.all_radec)
to reference tangent plane (self.wcs) to create output X,Y positions.
|
entailment
|
def clean(self):
""" Remove intermediate files created
"""
if not util.is_blank(self.catalog.catname) and os.path.exists(self.catalog.catname):
os.remove(self.catalog.catname)
|
Remove intermediate files created
|
entailment
|
def close(self):
""" Close the object nicely and release all the data
arrays from memory YOU CANT GET IT BACK, the pointers
and data are gone so use the getData method to get
the data array returned for future use. You can use
putData to reattach a new data array to the imageObject.
"""
if self._image is None:
return
# mcara: I think the code below is not necessary but in order to
# preserve the same functionality as the code removed below,
# I make an empty copy of the image object:
empty_image = fits.HDUList()
for u in self._image:
empty_image.append(u.__class__(data=None, header=None))
# mcara: END unnecessary code
self._image.close() #calls fits.close()
self._image = empty_image
|
Close the object nicely and release all the data
arrays from memory YOU CANT GET IT BACK, the pointers
and data are gone so use the getData method to get
the data array returned for future use. You can use
putData to reattach a new data array to the imageObject.
|
entailment
|
def clean(self):
""" Deletes intermediate products generated for this imageObject.
"""
clean_files = ['blotImage','crmaskImage','finalMask',
'staticMask','singleDrizMask','outSky',
'outSContext','outSWeight','outSingle',
'outMedian','dqmask','tmpmask',
'skyMatchMask']
log.info('Removing intermediate files for %s' % self._filename)
# We need to remove the combined products first; namely, median image
util.removeFileSafely(self.outputNames['outMedian'])
# Now remove chip-specific intermediate files, if any were created.
for chip in self.returnAllChips(extname='SCI'):
for fname in clean_files:
if fname in chip.outputNames:
util.removeFileSafely(chip.outputNames[fname])
|
Deletes intermediate products generated for this imageObject.
|
entailment
|
def getData(self,exten=None):
""" Return just the data array from the specified extension
fileutil is used instead of fits to account for non-
FITS input images. openImage returns a fits object.
"""
if exten.lower().find('sci') > -1:
# For SCI extensions, the current file will have the data
fname = self._filename
else:
# otherwise, the data being requested may need to come from a
# separate file, as is the case with WFPC2 DQ data.
#
# convert exten to 'sci',extver to get the DQ info for that chip
extn = exten.split(',')
sci_chip = self._image[self.scienceExt,int(extn[1])]
fname = sci_chip.dqfile
extnum = self._interpretExten(exten)
if self._image[extnum].data is None:
if os.path.exists(fname):
_image=fileutil.openImage(fname, clobber=False, memmap=False)
_data=fileutil.getExtn(_image, extn=exten).data
_image.close()
del _image
self._image[extnum].data = _data
else:
_data = None
else:
_data = self._image[extnum].data
return _data
|
Return just the data array from the specified extension
fileutil is used instead of fits to account for non-
FITS input images. openImage returns a fits object.
|
entailment
|
def getHeader(self,exten=None):
""" Return just the specified header extension fileutil
is used instead of fits to account for non-FITS
input images. openImage returns a fits object.
"""
_image=fileutil.openImage(self._filename, clobber=False, memmap=False)
_header=fileutil.getExtn(_image,extn=exten).header
_image.close()
del _image
return _header
|
Return just the specified header extension fileutil
is used instead of fits to account for non-FITS
input images. openImage returns a fits object.
|
entailment
|
def updateData(self,exten,data):
""" Write out updated data and header to
the original input file for this object.
"""
_extnum=self._interpretExten(exten)
fimg = fileutil.openImage(self._filename, mode='update', memmap=False)
fimg[_extnum].data = data
fimg[_extnum].header = self._image[_extnum].header
fimg.close()
|
Write out updated data and header to
the original input file for this object.
|
entailment
|
def putData(self,data=None,exten=None):
""" Now that we are removing the data from the object to save memory,
we need something that cleanly puts the data array back into
the object so that we can write out everything together using
something like fits.writeto....this method is an attempt to
make sure that when you add an array back to the .data section
of the hdu it still matches the header information for that
section ( ie. update the bitpix to reflect the datatype of the
array you are adding). The other header stuff is up to you to verify.
Data should be the data array exten is where you want to stick it,
either extension number or a string like 'sci,1'
"""
if data is None:
log.warning("No data supplied")
else:
extnum = _interpretExten(exten)
ext = self._image[extnum]
# update the bitpix to the current datatype, this aint fancy and
# ignores bscale
ext.header['BITPIX'] = _NUMPY_TO_IRAF_DTYPES[data.dtype.name]
ext.data = data
|
Now that we are removing the data from the object to save memory,
we need something that cleanly puts the data array back into
the object so that we can write out everything together using
something like fits.writeto....this method is an attempt to
make sure that when you add an array back to the .data section
of the hdu it still matches the header information for that
section ( ie. update the bitpix to reflect the datatype of the
array you are adding). The other header stuff is up to you to verify.
Data should be the data array exten is where you want to stick it,
either extension number or a string like 'sci,1'
|
entailment
|
def getAllData(self,extname=None,exclude=None):
""" This function is meant to make it easier to attach ALL the data
extensions of the image object so that we can write out copies of
the original image nicer.
If no extname is given, the it retrieves all data from the original
file and attaches it. Otherwise, give the name of the extensions
you want and all of those will be restored.
Ok, I added another option. If you want to get all the data
extensions EXCEPT a particular one, leave extname=NONE and
set exclude=EXTNAME. This is helpfull cause you might not know
all the extnames the image has, this will find out and exclude
the one you do not want overwritten.
"""
extensions = self._findExtnames(extname=extname,exclude=exclude)
for i in range(1,self._nextend+1,1):
if hasattr(self._image[i],'_extension') and \
"IMAGE" in self._image[i]._extension:
extver = self._image[i].header['extver']
if (self._image[i].extname in extensions) and self._image[self.scienceExt,extver].group_member:
self._image[i].data=self.getData(self._image[i].extname + ','+str(self._image[i].extver))
|
This function is meant to make it easier to attach ALL the data
extensions of the image object so that we can write out copies of
the original image nicer.
If no extname is given, the it retrieves all data from the original
file and attaches it. Otherwise, give the name of the extensions
you want and all of those will be restored.
Ok, I added another option. If you want to get all the data
extensions EXCEPT a particular one, leave extname=NONE and
set exclude=EXTNAME. This is helpfull cause you might not know
all the extnames the image has, this will find out and exclude
the one you do not want overwritten.
|
entailment
|
def returnAllChips(self,extname=None,exclude=None):
""" Returns a list containing all the chips which match the
extname given minus those specified for exclusion (if any).
"""
extensions = self._findExtnames(extname=extname,exclude=exclude)
chiplist = []
for i in range(1,self._nextend+1,1):
if 'extver' in self._image[i].header:
extver = self._image[i].header['extver']
else:
extver = 1
if hasattr(self._image[i],'_extension') and \
"IMAGE" in self._image[i]._extension:
if (self._image[i].extname in extensions) and self._image[self.scienceExt,extver].group_member:
chiplist.append(self._image[i])
return chiplist
|
Returns a list containing all the chips which match the
extname given minus those specified for exclusion (if any).
|
entailment
|
def _findExtnames(self, extname=None, exclude=None):
""" This method builds a list of all extensions which have 'EXTNAME'==extname
and do not include any extensions with 'EXTNAME'==exclude, if any are
specified for exclusion at all.
"""
#make a list of the available extension names for the object
extensions=[]
if extname is not None:
if not isinstance(extname,list): extname=[extname]
for extn in extname:
extensions.append(extn.upper())
else:
#restore all the extensions data from the original file, be careful here
#if you've altered data in memory you want to keep!
for i in range(1,self._nextend+1,1):
if hasattr(self._image[i],'_extension') and \
"IMAGE" in self._image[i]._extension:
if self._image[i].extname.upper() not in extensions:
extensions.append(self._image[i].extname)
#remove this extension from the list
if exclude is not None:
exclude.upper()
if exclude in extensions:
newExt=[]
for item in extensions:
if item != exclude:
newExt.append(item)
extensions=newExt
del newExt
return extensions
|
This method builds a list of all extensions which have 'EXTNAME'==extname
and do not include any extensions with 'EXTNAME'==exclude, if any are
specified for exclusion at all.
|
entailment
|
def findExtNum(self, extname=None, extver=1):
"""Find the extension number of the give extname and extver."""
extnum = None
extname = extname.upper()
if not self._isSimpleFits:
for ext in self._image:
if (hasattr(ext,'_extension') and 'IMAGE' in ext._extension and
(ext.extname == extname) and (ext.extver == extver)):
extnum = ext.extnum
else:
log.info("Image is simple fits")
return extnum
|
Find the extension number of the give extname and extver.
|
entailment
|
def _assignRootname(self, chip):
""" Assign a unique rootname for the image based in the expname. """
extname=self._image[self.scienceExt,chip].header["EXTNAME"].lower()
extver=self._image[self.scienceExt,chip].header["EXTVER"]
expname = self._rootname
# record extension-based name to reflect what extension a mask file corresponds to
self._image[self.scienceExt,chip].rootname=expname + "_" + extname + str(extver)
self._image[self.scienceExt,chip].sciname=self._filename + "[" + extname +","+str(extver)+"]"
self._image[self.scienceExt,chip].dqrootname=self._rootname + "_" + extname + str(extver)
# Needed to keep EXPNAMEs associated properly (1 EXPNAME for all chips)
self._image[self.scienceExt,chip]._expname=expname
self._image[self.scienceExt,chip]._chip =chip
|
Assign a unique rootname for the image based in the expname.
|
entailment
|
def _setOutputNames(self,rootname,suffix='_drz'):
""" Define the default output filenames for drizzle products,
these are based on the original rootname of the image
filename should be just 1 filename, so call this in a loop
for chip names contained inside a file.
"""
# Define FITS output filenames for intermediate products
# Build names based on final DRIZZLE output name
# where 'output' normally would have been created
# by 'process_input()'
#
outFinal = rootname+suffix+'.fits'
outSci = rootname+suffix+'_sci.fits'
outWeight = rootname+suffix+'_wht.fits'
outContext = rootname+suffix+'_ctx.fits'
outMedian = rootname+'_med.fits'
# Build names based on input name
origFilename = self._filename.replace('.fits','_OrIg.fits')
outSky = rootname + '_sky.fits'
outSingle = rootname+'_single_sci.fits'
outSWeight = rootname+'_single_wht.fits'
crCorImage = rootname+'_crclean.fits'
# Build outputNames dictionary
fnames={
'origFilename': origFilename,
'outFinal': outFinal,
'outMedian': outMedian,
'outSci': outSci,
'outWeight': outWeight,
'outContext': outContext,
'outSingle': outSingle,
'outSWeight': outSWeight,
'outSContext': None,
'outSky': outSky,
'crcorImage': crCorImage,
'ivmFile': None
}
return fnames
|
Define the default output filenames for drizzle products,
these are based on the original rootname of the image
filename should be just 1 filename, so call this in a loop
for chip names contained inside a file.
|
entailment
|
def _initVirtualOutputs(self):
""" Sets up the structure to hold all the output data arrays for
this image in memory.
"""
self.virtualOutputs = {}
for product in self.outputNames:
self.virtualOutputs[product] = None
|
Sets up the structure to hold all the output data arrays for
this image in memory.
|
entailment
|
def saveVirtualOutputs(self,outdict):
""" Assign in-memory versions of generated products for this
``imageObject`` based on dictionary 'outdict'.
"""
if not self.inmemory:
return
for outname in outdict:
self.virtualOutputs[outname] = outdict[outname]
|
Assign in-memory versions of generated products for this
``imageObject`` based on dictionary 'outdict'.
|
entailment
|
def getOutputName(self,name):
""" Return the name of the file or PyFITS object associated with that
name, depending on the setting of self.inmemory.
"""
val = self.outputNames[name]
if self.inmemory: # if inmemory was turned on...
# return virtualOutput object saved with that name
val = self.virtualOutputs[val]
return val
|
Return the name of the file or PyFITS object associated with that
name, depending on the setting of self.inmemory.
|
entailment
|
def updateOutputValues(self,output_wcs):
""" Copy info from output WCSObject into outputnames for each chip
for use in creating outputimage object.
"""
outputvals = self.outputValues
outputvals['output'] = output_wcs.outputNames['outFinal']
outputvals['outnx'], outputvals['outny'] = output_wcs.wcs.pixel_shape
outputvals['texptime'] = output_wcs._exptime
outputvals['texpstart'] = output_wcs._expstart
outputvals['texpend'] = output_wcs._expend
outputvals['nimages'] = output_wcs.nimages
outputvals['scale'] = output_wcs.wcs.pscale #/ self._image[self.scienceExt,1].wcs.pscale
outputvals['exptime'] = self._exptime
outnames = self.outputNames
outnames['outMedian'] = output_wcs.outputNames['outMedian']
outnames['outFinal'] = output_wcs.outputNames['outFinal']
outnames['outSci'] = output_wcs.outputNames['outSci']
outnames['outWeight'] = output_wcs.outputNames['outWeight']
outnames['outContext'] = output_wcs.outputNames['outContext']
|
Copy info from output WCSObject into outputnames for each chip
for use in creating outputimage object.
|
entailment
|
def updateContextImage(self, contextpar):
""" Reset the name of the context image to `None` if parameter
``context`` is `False`.
"""
self.createContext = contextpar
if not contextpar:
log.info('No context image will be created for %s' %
self._filename)
self.outputNames['outContext'] = None
|
Reset the name of the context image to `None` if parameter
``context`` is `False`.
|
entailment
|
def find_DQ_extension(self):
""" Return the suffix for the data quality extension and the name of
the file which that DQ extension should be read from.
"""
dqfile = None
dq_suffix=None
if(self.maskExt is not None):
for hdu in self._image:
# Look for DQ extension in input file
if 'extname' in hdu.header and hdu.header['extname'].lower() == self.maskExt.lower():
dqfile = self._filename
dq_suffix=self.maskExt
break
return dqfile,dq_suffix
|
Return the suffix for the data quality extension and the name of
the file which that DQ extension should be read from.
|
entailment
|
def getKeywordList(self, kw):
"""
Return lists of all attribute values for all active chips in the
``imageObject``.
"""
kwlist = []
for chip in range(1,self._numchips+1,1):
sci_chip = self._image[self.scienceExt,chip]
if sci_chip.group_member:
kwlist.append(sci_chip.__dict__[kw])
return kwlist
|
Return lists of all attribute values for all active chips in the
``imageObject``.
|
entailment
|
def getflat(self, chip):
"""
Method for retrieving a detector's flat field.
Returns
-------
flat: array
This method will return an array the same shape as the image in
**units of electrons**.
"""
sci_chip = self._image[self.scienceExt, chip]
# The keyword for ACS flat fields in the primary header of the flt
# file is pfltfile. This flat file is already in the required
# units of electrons.
# The use of fileutil.osfn interprets any environment variable, such as
# jref$, used in the specification of the reference filename
filename = fileutil.osfn(self._image["PRIMARY"].header[self.flatkey])
hdulist = None
try:
hdulist = fileutil.openImage(filename, mode='readonly',
memmap=False)
data = hdulist[(self.scienceExt, chip)].data
if data.shape[0] != sci_chip.image_shape[0]:
ltv2 = int(np.round(sci_chip.ltv2))
else:
ltv2 = 0
size2 = sci_chip.image_shape[0] + ltv2
if data.shape[1] != sci_chip.image_shape[1]:
ltv1 = int(np.round(sci_chip.ltv1))
else:
ltv1 = 0
size1 = sci_chip.image_shape[1] + ltv1
flat = data[ltv2:size2, ltv1:size1]
except FileNotFoundError:
flat = np.ones(sci_chip.image_shape, dtype=sci_chip.image_dtype)
log.warning("Cannot find flat field file '{}'".format(filename))
log.warning("Treating flatfield as a constant value of '1'.")
finally:
if hdulist is not None:
hdulist.close()
return flat
|
Method for retrieving a detector's flat field.
Returns
-------
flat: array
This method will return an array the same shape as the image in
**units of electrons**.
|
entailment
|
def getReadNoiseImage(self, chip):
"""
Notes
=====
Method for returning the readnoise image of a detector
(in electrons).
The method will return an array of the same shape as the image.
:units: electrons
"""
sci_chip = self._image[self.scienceExt,chip]
return np.ones(sci_chip.image_shape,dtype=sci_chip.image_dtype) * sci_chip._rdnoise
|
Notes
=====
Method for returning the readnoise image of a detector
(in electrons).
The method will return an array of the same shape as the image.
:units: electrons
|
entailment
|
def getexptimeimg(self,chip):
"""
Notes
=====
Return an array representing the exposure time per pixel for the detector.
This method will be overloaded for IR detectors which have their own
EXP arrays, namely, WFC3/IR and NICMOS images.
:units:
None
Returns
=======
exptimeimg : numpy array
The method will return an array of the same shape as the image.
"""
sci_chip = self._image[self.scienceExt,chip]
if sci_chip._wtscl_par == 'expsq':
wtscl = sci_chip._exptime*sci_chip._exptime
else:
wtscl = sci_chip._exptime
return np.ones(sci_chip.image_shape,dtype=sci_chip.image_dtype)*wtscl
|
Notes
=====
Return an array representing the exposure time per pixel for the detector.
This method will be overloaded for IR detectors which have their own
EXP arrays, namely, WFC3/IR and NICMOS images.
:units:
None
Returns
=======
exptimeimg : numpy array
The method will return an array of the same shape as the image.
|
entailment
|
def getdarkimg(self,chip):
"""
Notes
=====
Return an array representing the dark image for the detector.
The method will return an array of the same shape as the image.
:units: electrons
"""
sci_chip = self._image[self.scienceExt,chip]
return np.ones(sci_chip.image_shape,dtype=sci_chip.image_dtype)*sci_chip.darkcurrent
|
Notes
=====
Return an array representing the dark image for the detector.
The method will return an array of the same shape as the image.
:units: electrons
|
entailment
|
def getskyimg(self,chip):
"""
Notes
=====
Return an array representing the sky image for the detector. The value
of the sky is what would actually be subtracted from the exposure by
the skysub step.
:units: electrons
"""
sci_chip = self._image[self.scienceExt,chip]
return np.ones(sci_chip.image_shape,dtype=sci_chip.image_dtype)*sci_chip.subtractedSky
|
Notes
=====
Return an array representing the sky image for the detector. The value
of the sky is what would actually be subtracted from the exposure by
the skysub step.
:units: electrons
|
entailment
|
def getExtensions(self, extname='SCI', section=None):
""" Return the list of EXTVER values for extensions with name specified
in extname.
"""
if section is None:
numext = 0
section = []
for hdu in self._image:
if 'extname' in hdu.header and hdu.header['extname'] == extname:
section.append(hdu.header['extver'])
else:
if not isinstance(section,list):
section = [section]
return section
|
Return the list of EXTVER values for extensions with name specified
in extname.
|
entailment
|
def _countEXT(self,extname="SCI"):
""" Count the number of extensions in the file with the given name
(``EXTNAME``).
"""
count=0 #simple fits image
if (self._image['PRIMARY'].header["EXTEND"]):
for i,hdu in enumerate(self._image):
if i > 0:
hduExtname = False
if 'EXTNAME' in hdu.header:
self._image[i].extnum=i
self._image[i].extname=hdu.header["EXTNAME"]
hduExtname = True
if 'EXTVER' in hdu.header:
self._image[i].extver=hdu.header["EXTVER"]
else:
self._image[i].extver = 1
if ((extname is not None) and \
(hduExtname and (hdu.header["EXTNAME"] == extname))) \
or extname is None:
count=count+1
return count
|
Count the number of extensions in the file with the given name
(``EXTNAME``).
|
entailment
|
def buildMask(self,chip,bits=0,write=False):
"""
Build masks as specified in the user parameters found in the
configObj object.
We should overload this function in the instrument specific
implementations so that we can add other stuff to the badpixel
mask? Like vignetting areas and chip boundries in nicmos which
are camera dependent? these are not defined in the DQ masks, but
should be masked out to get the best results in multidrizzle.
"""
dqarr = self.getData(exten=self.maskExt+','+str(chip))
dqmask = buildmask.buildMask(dqarr,bits)
if write:
phdu = fits.PrimaryHDU(data=dqmask,header=self._image[self.maskExt,chip].header)
dqmask_name = self._image[self.scienceExt,chip].dqrootname+'_dqmask.fits'
log.info('Writing out DQ/weight mask: %s' % dqmask_name)
if os.path.exists(dqmask_name): os.remove(dqmask_name)
phdu.writeto(dqmask_name)
del phdu
self._image[self.scienceExt,chip].dqmaskname = dqmask_name
# record the name of this mask file that was created for later
# removal by the 'clean()' method
self._image[self.scienceExt,chip].outputNames['dqmask'] = dqmask_name
del dqarr
return dqmask
|
Build masks as specified in the user parameters found in the
configObj object.
We should overload this function in the instrument specific
implementations so that we can add other stuff to the badpixel
mask? Like vignetting areas and chip boundries in nicmos which
are camera dependent? these are not defined in the DQ masks, but
should be masked out to get the best results in multidrizzle.
|
entailment
|
def buildEXPmask(self, chip, dqarr):
""" Builds a weight mask from an input DQ array and the exposure time
per pixel for this chip.
"""
log.info("Applying EXPTIME weighting to DQ mask for chip %s" %
chip)
#exparr = self.getexptimeimg(chip)
exparr = self._image[self.scienceExt,chip]._exptime
expmask = exparr*dqarr
return expmask.astype(np.float32)
|
Builds a weight mask from an input DQ array and the exposure time
per pixel for this chip.
|
entailment
|
def buildIVMmask(self ,chip, dqarr, scale):
""" Builds a weight mask from an input DQ array and either an IVM array
provided by the user or a self-generated IVM array derived from the
flat-field reference file associated with the input image.
"""
sci_chip = self._image[self.scienceExt,chip]
ivmname = self.outputNames['ivmFile']
if ivmname is not None:
log.info("Applying user supplied IVM files for chip %s" % chip)
#Parse the input file name to get the extension we are working on
extn = "IVM,{}".format(chip)
#Open the mask image for updating and the IVM image
ivm = fileutil.openImage(ivmname, mode='readonly', memmap=False)
ivmfile = fileutil.getExtn(ivm, extn)
# Multiply the IVM file by the input mask in place.
ivmarr = ivmfile.data * dqarr
ivm.close()
else:
log.info("Automatically creating IVM files for chip %s" % chip)
# If no IVM files were provided by the user we will
# need to automatically generate them based upon
# instrument specific information.
flat = self.getflat(chip)
RN = self.getReadNoiseImage(chip)
darkimg = self.getdarkimg(chip)
skyimg = self.getskyimg(chip)
#exptime = self.getexptimeimg(chip)
#exptime = sci_chip._exptime
#ivm = (flat*exptime)**2/(darkimg+(skyimg*flat)+RN**2)
ivm = (flat)**2/(darkimg+(skyimg*flat)+RN**2)
# Multiply the IVM file by the input mask in place.
ivmarr = ivm * dqarr
# Update 'wt_scl' parameter to match use of IVM file
sci_chip._wtscl = pow(sci_chip._exptime,2)/pow(scale,4)
#sci_chip._wtscl = 1.0/pow(scale,4)
return ivmarr.astype(np.float32)
|
Builds a weight mask from an input DQ array and either an IVM array
provided by the user or a self-generated IVM array derived from the
flat-field reference file associated with the input image.
|
entailment
|
def buildERRmask(self,chip,dqarr,scale):
"""
Builds a weight mask from an input DQ array and an ERR array
associated with the input image.
"""
sci_chip = self._image[self.scienceExt,chip]
# Set default value in case of error, or lack of ERR array
errmask = dqarr
if self.errExt is not None:
try:
# Attempt to open the ERR image.
err = self.getData(exten=self.errExt+','+str(chip))
log.info("Applying ERR weighting to DQ mask for chip %s" %
chip)
# Multiply the scaled ERR file by the input mask in place.
#exptime = self.getexptimeimg(chip)
exptime = sci_chip._exptime
errmask = (exptime/err)**2 * dqarr
# Update 'wt_scl' parameter to match use of IVM file
#sci_chip._wtscl = pow(sci_chip._exptime,2)/pow(scale,4)
sci_chip._wtscl = 1.0/pow(scale,4)
del err
except:
# We cannot find an 'ERR' extension and the data isn't WFPC2.
# Print a generic warning message and continue on with the
# final drizzle step.
print(textutil.textbox(
'WARNING: No ERR weighting will be applied to the mask '
'used in the final drizzle step! Weighting will be only '
'by exposure time.\n\nThe data provided as input does not '
'contain an ERR extension'), file=sys.stderr)
print('\n Continue with final drizzle step...', sys.stderr)
else:
# If we were unable to find an 'ERR' extension to apply, one
# possible reason was that the input was a 'standard' WFPC2 data
# file that does not actually contain an error array. Test for
# this condition and issue a Warning to the user and continue on to
# the final drizzle.
print(textutil.textbox(
"WARNING: No ERR weighting will be applied to the mask used "
"in the final drizzle step! Weighting will be only by "
"exposure time.\n\nThe WFPC2 data provided as input does not "
"contain ERR arrays. WFPC2 data is not supported by this "
"weighting type.\n\nA workaround would be to create inverse "
"variance maps and use 'IVM' as the final_wht_type. See the "
"HELP file for more details on using inverse variance maps."),
file=sys.stderr)
print("\n Continue with final drizzle step...", file=sys.stderr)
return errmask.astype(np.float32)
|
Builds a weight mask from an input DQ array and an ERR array
associated with the input image.
|
entailment
|
def set_mt_wcs(self, image):
""" Reset the WCS for this image based on the WCS information from
another imageObject.
"""
for chip in range(1,self._numchips+1,1):
sci_chip = self._image[self.scienceExt,chip]
ref_chip = image._image[image.scienceExt,chip]
# Do we want to keep track of original WCS or not? No reason now...
sci_chip.wcs = ref_chip.wcs.copy()
|
Reset the WCS for this image based on the WCS information from
another imageObject.
|
entailment
|
def set_wtscl(self, chip, wtscl_par):
""" Sets the value of the wt_scl parameter as needed for drizzling.
"""
sci_chip = self._image[self.scienceExt,chip]
exptime = 1 #sci_chip._exptime
_parval = 'unity'
if wtscl_par is not None:
if type(wtscl_par) == type(''):
if not wtscl_par.isdigit():
# String passed in as value, check for 'exptime' or 'expsq'
_wtscl_float = None
try:
_wtscl_float = float(wtscl_par)
except ValueError:
_wtscl_float = None
if _wtscl_float is not None:
_wtscl = _wtscl_float
elif wtscl_par == 'expsq':
_wtscl = exptime*exptime
_parval = 'expsq'
else:
# Default to the case of 'exptime', if
# not explicitly specified as 'expsq'
_wtscl = exptime
else:
# int value passed in as a string, convert to float
_wtscl = float(wtscl_par)
else:
# We have a non-string value passed in...
_wtscl = float(wtscl_par)
else:
# Default case: wt_scl = exptime
_wtscl = exptime
sci_chip._wtscl_par = _parval
sci_chip._wtscl = _wtscl
|
Sets the value of the wt_scl parameter as needed for drizzling.
|
entailment
|
def getInstrParameter(self, value, header, keyword):
""" This method gets a instrument parameter from a
pair of task parameters: a value, and a header keyword.
The default behavior is:
- if the value and header keyword are given, raise an exception.
- if the value is given, use it.
- if the value is blank and the header keyword is given, use
the header keyword.
- if both are blank, or if the header keyword is not
found, return None.
"""
if isinstance(value, str) and value in ['None', '', ' ', 'INDEF']:
value = None
if value and (keyword is not None and keyword.strip() != ''):
exceptionMessage = "ERROR: Your input is ambiguous! Please specify either a value or a keyword.\n You specifed both " + str(value) + " and " + str(keyword)
raise ValueError(exceptionMessage)
elif value is not None and value != '':
return self._averageFromList(value)
elif keyword is not None and keyword.strip() != '':
return self._averageFromHeader(header, keyword)
else:
return None
|
This method gets a instrument parameter from a
pair of task parameters: a value, and a header keyword.
The default behavior is:
- if the value and header keyword are given, raise an exception.
- if the value is given, use it.
- if the value is blank and the header keyword is given, use
the header keyword.
- if both are blank, or if the header keyword is not
found, return None.
|
entailment
|
def _averageFromHeader(self, header, keyword):
""" Averages out values taken from header. The keywords where
to read values from are passed as a comma-separated list.
"""
_list = ''
for _kw in keyword.split(','):
if _kw in header:
_list = _list + ',' + str(header[_kw])
else:
return None
return self._averageFromList(_list)
|
Averages out values taken from header. The keywords where
to read values from are passed as a comma-separated list.
|
entailment
|
def _averageFromList(self, param):
""" Averages out values passed as a comma-separated
list, disregarding the zero-valued entries.
"""
_result = 0.0
_count = 0
for _param in param.split(','):
if _param != '' and float(_param) != 0.0:
_result = _result + float(_param)
_count += 1
if _count >= 1:
_result = _result / _count
return _result
|
Averages out values passed as a comma-separated
list, disregarding the zero-valued entries.
|
entailment
|
def compute_wcslin(self,undistort=True):
""" Compute the undistorted WCS based solely on the known distortion
model information associated with the WCS.
"""
for chip in range(1,self._numchips+1,1):
sci_chip = self._image[self.scienceExt,chip]
chip_wcs = sci_chip.wcs.copy()
if chip_wcs.sip is None or not undistort or chip_wcs.instrument=='DEFAULT':
chip_wcs.sip = None
chip_wcs.cpdis1 = None
chip_wcs.cpdis2 = None
chip_wcs.det2im = None
undistort=False
# compute the undistorted 'natural' plate scale for this chip
wcslin = distortion.utils.output_wcs([chip_wcs],undistort=undistort)
sci_chip.wcslin_pscale = wcslin.pscale
|
Compute the undistorted WCS based solely on the known distortion
model information associated with the WCS.
|
entailment
|
def set_units(self,chip):
""" Define units for this image.
"""
# Determine output value of BUNITS
# and make sure it is not specified as 'ergs/cm...'
sci_chip = self._image[self.scienceExt,chip]
_bunit = None
if 'BUNIT' in sci_chip.header and sci_chip.header['BUNIT'].find('ergs') < 0:
_bunit = sci_chip.header['BUNIT']
else:
_bunit = 'ELECTRONS/S'
sci_chip._bunit = _bunit
#
if '/s' in _bunit.lower():
_in_units = 'cps'
else:
_in_units = 'counts'
sci_chip.in_units = _in_units
|
Define units for this image.
|
entailment
|
def getTemplates(fnames, blend=True):
""" Process all headers to produce a set of combined headers
that follows the rules defined by each instrument.
"""
if not blend:
newhdrs = blendheaders.getSingleTemplate(fnames[0])
newtab = None
else:
# apply rules to create final version of headers, plus table
newhdrs, newtab = blendheaders.get_blended_headers(inputs=fnames)
cleanTemplates(newhdrs[1],newhdrs[2],newhdrs[3])
return newhdrs, newtab
|
Process all headers to produce a set of combined headers
that follows the rules defined by each instrument.
|
entailment
|
def addWCSKeywords(wcs,hdr,blot=False,single=False,after=None):
""" Update input header 'hdr' with WCS keywords.
"""
wname = wcs.wcs.name
if not single:
wname = 'DRZWCS'
# Update WCS Keywords based on PyDrizzle product's value
# since 'drizzle' itself doesn't update that keyword.
hdr['WCSNAME'] = wname
hdr.set('VAFACTOR', value=1.0, after=after)
hdr.set('ORIENTAT', value=wcs.orientat, after=after)
# Use of 'after' not needed if these keywords already exist in the header
if after in WCS_KEYWORDS:
after = None
if 'CTYPE1' not in hdr:
hdr.set('CTYPE2', value=wcs.wcs.ctype[1], after=after)
hdr.set('CTYPE1', value=wcs.wcs.ctype[0], after=after)
hdr.set('CRPIX2', value=wcs.wcs.crpix[1], after=after)
hdr.set('CRPIX1', value=wcs.wcs.crpix[0], after=after)
hdr.set('CRVAL2', value=wcs.wcs.crval[1], after=after)
hdr.set('CRVAL1', value=wcs.wcs.crval[0], after=after)
hdr.set('CD2_2', value=wcs.wcs.cd[1][1], after=after)
hdr.set('CD2_1', value=wcs.wcs.cd[1][0], after=after)
hdr.set('CD1_2', value=wcs.wcs.cd[0][1], after=after)
hdr.set('CD1_1', value=wcs.wcs.cd[0][0], after=after)
# delete distortion model related keywords
deleteDistortionKeywords(hdr)
if not blot:
blendheaders.remove_distortion_keywords(hdr)
|
Update input header 'hdr' with WCS keywords.
|
entailment
|
def writeSingleFITS(data,wcs,output,template,clobber=True,verbose=True):
""" Write out a simple FITS file given a numpy array and the name of another
FITS file to use as a template for the output image header.
"""
outname,outextn = fileutil.parseFilename(output)
outextname,outextver = fileutil.parseExtn(outextn)
if fileutil.findFile(outname):
if clobber:
log.info('Deleting previous output product: %s' % outname)
fileutil.removeFile(outname)
else:
log.warning('Output file %s already exists and overwrite not '
'specified!' % outname)
log.error('Quitting... Please remove before resuming operations.')
raise IOError
# Now update WCS keywords with values from provided WCS
if hasattr(wcs.sip,'a_order'):
siphdr = True
else:
siphdr = False
wcshdr = wcs.wcs2header(sip2hdr=siphdr)
if template is not None:
# Get default headers from multi-extension FITS file
# If input data is not in MEF FITS format, it will return 'None'
# NOTE: These are HEADER objects, not HDUs
(prihdr,scihdr,errhdr,dqhdr),newtab = getTemplates(template,EXTLIST)
if scihdr is None:
scihdr = fits.Header()
indx = 0
for c in prihdr.cards:
if c.keyword not in ['INHERIT','EXPNAME']: indx += 1
else: break
for i in range(indx,len(prihdr)):
scihdr.append(prihdr.cards[i])
for i in range(indx, len(prihdr)):
del prihdr[indx]
else:
scihdr = fits.Header()
prihdr = fits.Header()
# Start by updating PRIMARY header keywords...
prihdr.set('EXTEND', value=True, after='NAXIS')
prihdr['FILENAME'] = outname
if outextname == '':
outextname = 'sci'
if outextver == 0: outextver = 1
scihdr['EXTNAME'] = outextname.upper()
scihdr['EXTVER'] = outextver
for card in wcshdr.cards:
scihdr[card.keyword] = (card.value, card.comment)
# Create PyFITS HDUList for all extensions
outhdu = fits.HDUList()
# Setup primary header as an HDU ready for appending to output FITS file
prihdu = fits.PrimaryHDU(header=prihdr)
scihdu = fits.ImageHDU(header=scihdr,data=data)
outhdu.append(prihdu)
outhdu.append(scihdu)
outhdu.writeto(outname)
if verbose:
print('Created output image: %s' % outname)
|
Write out a simple FITS file given a numpy array and the name of another
FITS file to use as a template for the output image header.
|
entailment
|
def writeDrizKeywords(hdr,imgnum,drizdict):
""" Write basic drizzle-related keywords out to image header as a record
of the processing performed to create the image
The dictionary 'drizdict' will contain the keywords and values to be
written out to the header.
"""
_keyprefix = 'D%03d'%imgnum
for key in drizdict:
val = drizdict[key]['value']
if val is None: val = ""
comment = drizdict[key]['comment']
if comment is None: comment = ""
hdr[_keyprefix+key] = (val, drizdict[key]['comment'])
|
Write basic drizzle-related keywords out to image header as a record
of the processing performed to create the image
The dictionary 'drizdict' will contain the keywords and values to be
written out to the header.
|
entailment
|
def writeFITS(self, template, sciarr, whtarr, ctxarr=None,
versions=None, overwrite=yes, blend=True, virtual=False):
"""
Generate PyFITS objects for each output extension
using the file given by 'template' for populating
headers.
The arrays will have the size specified by 'shape'.
"""
if not isinstance(template, list):
template = [template]
if fileutil.findFile(self.output):
if overwrite:
log.info('Deleting previous output product: %s' % self.output)
fileutil.removeFile(self.output)
else:
log.warning('Output file %s already exists and overwrite not '
'specified!' % self.output)
log.error('Quitting... Please remove before resuming '
'operations.')
raise IOError
# initialize output value for this method
outputFITS = {}
# Default value for NEXTEND when 'build'== True
nextend = 3
if not self.build:
nextend = 0
if self.outweight:
if overwrite:
if fileutil.findFile(self.outweight):
log.info('Deleting previous output WHT product: %s' %
self.outweight)
fileutil.removeFile(self.outweight)
else:
log.warning('Output file %s already exists and overwrite '
'not specified!' % self.outweight)
log.error('Quitting... Please remove before resuming '
'operations.')
raise IOError
if self.outcontext:
if overwrite:
if fileutil.findFile(self.outcontext):
log.info('Deleting previous output CTX product: %s' %
self.outcontext)
fileutil.removeFile(self.outcontext)
else:
log.warning('Output file %s already exists and overwrite '
'not specified!' % self.outcontext)
log.error('Quitting... Please remove before resuming '
'operations.')
raise IOError
# Get default headers from multi-extension FITS file
# If only writing out single drizzle product, blending needs to be
# forced off as there is only 1 input to report, no blending needed
if self.single:
blend=False
# If input data is not in MEF FITS format, it will return 'None'
# and those headers will have to be generated from drizzle output
# file FITS headers.
# NOTE: These are HEADER objects, not HDUs
#prihdr,scihdr,errhdr,dqhdr = getTemplates(template)
self.fullhdrs, intab = getTemplates(template, blend=False)
newhdrs, newtab = getTemplates(template,blend=blend)
if newtab is not None: nextend += 1 # account for new table extn
prihdr = newhdrs[0]
scihdr = newhdrs[1]
errhdr = newhdrs[2]
dqhdr = newhdrs[3]
# Setup primary header as an HDU ready for appending to output FITS file
prihdu = fits.PrimaryHDU(header=prihdr, data=None)
# Start by updating PRIMARY header keywords...
prihdu.header.set('EXTEND', value=True, after='NAXIS')
prihdu.header['NEXTEND'] = nextend
prihdu.header['FILENAME'] = self.output
prihdu.header['PROD_VER'] = 'DrizzlePac {}'.format(version.__version__)
# Update the ROOTNAME with the new value as well
_indx = self.output.find('_drz')
if _indx < 0:
rootname_val = self.output
else:
rootname_val = self.output[:_indx]
prihdu.header['ROOTNAME'] = rootname_val
# Get the total exposure time for the image
# If not calculated by PyDrizzle and passed through
# the pardict, then leave value from the template image.
if self.texptime:
prihdu.header['EXPTIME'] = self.texptime
prihdu.header.set('TEXPTIME', value=self.texptime, after='EXPTIME')
prihdu.header['EXPSTART'] = self.expstart
prihdu.header['EXPEND'] = self.expend
#Update ASN_MTYPE to reflect the fact that this is a product
# Currently hard-wired to always output 'PROD-DTH' as MTYPE
prihdu.header['ASN_MTYP'] = 'PROD-DTH'
# Update DITHCORR calibration keyword if present
# Remove when we can modify FITS headers in place...
if 'DRIZCORR' in prihdu.header:
prihdu.header['DRIZCORR'] = 'COMPLETE'
if 'DITHCORR' in prihdu.header:
prihdu.header['DITHCORR'] = 'COMPLETE'
prihdu.header['NDRIZIM'] =(len(self.parlist),
'Drizzle, No. images drizzled onto output')
# Only a subset of these keywords makes sense for the new WCS based
# transformations. They need to be reviewed to decide what to keep
# and what to leave out.
if not self.blot:
self.addDrizKeywords(prihdu.header,versions)
if scihdr:
try:
del scihdr['OBJECT']
except KeyError:
pass
if 'CCDCHIP' in scihdr: scihdr['CCDCHIP'] = '-999'
if 'NCOMBINE' in scihdr:
scihdr['NCOMBINE'] = self.parlist[0]['nimages']
# If BUNIT keyword was found and reset, then
bunit_last_kw = self.find_kwupdate_location(scihdr,'bunit')
if self.bunit is not None:
comment_str = "Units of science product"
if self.bunit.lower()[:5] == 'count':
comment_str = "counts * gain = electrons"
scihdr.set('BUNIT', value=self.bunit,
comment=comment_str,
after=bunit_last_kw)
else:
# check to see whether to update already present BUNIT comment
if 'bunit' in scihdr and scihdr['bunit'].lower()[:5] == 'count':
comment_str = "counts * gain = electrons"
scihdr.set('BUNIT', value=scihdr['bunit'],
comment=comment_str,
after=bunit_last_kw)
# Add WCS keywords to SCI header
if self.wcs:
pre_wcs_kw = self.find_kwupdate_location(scihdr,'CD1_1')
addWCSKeywords(self.wcs,scihdr,blot=self.blot,
single=self.single, after=pre_wcs_kw)
# Recompute this after removing distortion kws
pre_wcs_kw = self.find_kwupdate_location(scihdr,'CD1_1')
##########
# Now, build the output file
##########
if self.build:
print('-Generating multi-extension output file: ',self.output)
fo = fits.HDUList()
# Add primary header to output file...
fo.append(prihdu)
if self.single and self.compress:
hdu = fits.CompImageHDU(data=sciarr, header=scihdr, name=EXTLIST[0])
else:
hdu = fits.ImageHDU(data=sciarr, header=scihdr, name=EXTLIST[0])
last_kw = self.find_kwupdate_location(scihdr,'EXTNAME')
hdu.header.set('EXTNAME', value='SCI', after=last_kw)
hdu.header.set('EXTVER', value=1, after='EXTNAME')
fo.append(hdu)
# Build WHT extension here, if requested...
if errhdr:
errhdr['CCDCHIP'] = '-999'
if self.single and self.compress:
hdu = fits.CompImageHDU(data=whtarr, header=errhdr, name=EXTLIST[1])
else:
hdu = fits.ImageHDU(data=whtarr, header=errhdr, name=EXTLIST[1])
last_kw = self.find_kwupdate_location(errhdr,'EXTNAME')
hdu.header.set('EXTNAME', value='WHT', after=last_kw)
hdu.header.set('EXTVER', value=1, after='EXTNAME')
if self.wcs:
pre_wcs_kw = self.find_kwupdate_location(hdu.header,'CD1_1')
# Update WCS Keywords based on PyDrizzle product's value
# since 'drizzle' itself doesn't update that keyword.
addWCSKeywords(self.wcs,hdu.header,blot=self.blot,
single=self.single, after=pre_wcs_kw)
fo.append(hdu)
# Build CTX extension here
# If there is only 1 plane, write it out as a 2-D extension
if self.outcontext:
if ctxarr.shape[0] == 1:
_ctxarr = ctxarr[0]
else:
_ctxarr = ctxarr
else:
_ctxarr = None
if self.single and self.compress:
hdu = fits.CompImageHDU(data=_ctxarr, header=dqhdr, name=EXTLIST[2])
else:
hdu = fits.ImageHDU(data=_ctxarr, header=dqhdr, name=EXTLIST[2])
last_kw = self.find_kwupdate_location(dqhdr,'EXTNAME')
hdu.header.set('EXTNAME', value='CTX', after=last_kw)
hdu.header.set('EXTVER', value=1, after='EXTNAME')
if self.wcs:
pre_wcs_kw = self.find_kwupdate_location(hdu.header,'CD1_1')
# Update WCS Keywords based on PyDrizzle product's value
# since 'drizzle' itself doesn't update that keyword.
addWCSKeywords(self.wcs,hdu.header,blot=self.blot,
single=self.single, after=pre_wcs_kw)
fo.append(hdu)
# remove all alternate WCS solutions from headers of this product
wcs_functions.removeAllAltWCS(fo,[1])
# add table of combined header keyword values to FITS file
if newtab is not None:
fo.append(newtab)
if not virtual:
print('Writing out to disk:',self.output)
# write out file to disk
fo.writeto(self.output)
fo.close()
del fo, hdu
fo = None
# End 'if not virtual'
outputFITS[self.output]= fo
else:
print('-Generating simple FITS output: %s' % self.outdata)
fo = fits.HDUList()
hdu_header = prihdu.header.copy()
del hdu_header['nextend']
# Append remaining unique header keywords from template DQ
# header to Primary header...
if scihdr:
for _card in scihdr.cards:
if _card.keyword not in RESERVED_KEYS and _card.keyword not in hdu_header:
hdu_header.append(_card)
for kw in ['PCOUNT', 'GCOUNT']:
try:
del kw
except KeyError:
pass
hdu_header['filename'] = self.outdata
if self.compress:
hdu = fits.CompImageHDU(data=sciarr, header=hdu_header)
wcs_ext = [1]
else:
hdu = fits.ImageHDU(data=sciarr, header=hdu_header)
wcs_ext = [0]
# explicitly set EXTEND to FALSE for simple FITS files.
dim = len(sciarr.shape)
hdu.header.set('extend', value=False, after='NAXIS%s'%dim)
# Add primary header to output file...
fo.append(hdu)
# remove all alternate WCS solutions from headers of this product
logutil.logging.disable(logutil.logging.INFO)
wcs_functions.removeAllAltWCS(fo,wcs_ext)
logutil.logging.disable(logutil.logging.NOTSET)
# add table of combined header keyword values to FITS file
if newtab is not None:
fo.append(newtab)
if not virtual:
print('Writing out image to disk:',self.outdata)
# write out file to disk
fo.writeto(self.outdata)
del fo,hdu
fo = None
# End 'if not virtual'
outputFITS[self.outdata]= fo
if self.outweight and whtarr is not None:
# We need to build new PyFITS objects for each WHT array
fwht = fits.HDUList()
if errhdr:
errhdr['CCDCHIP'] = '-999'
if self.compress:
hdu = fits.CompImageHDU(data=whtarr, header=prihdu.header)
else:
hdu = fits.ImageHDU(data=whtarr, header=prihdu.header)
# Append remaining unique header keywords from template DQ
# header to Primary header...
if errhdr:
for _card in errhdr.cards:
if _card.keyword not in RESERVED_KEYS and _card.keyword not in hdu.header:
hdu.header.append(_card)
hdu.header['filename'] = self.outweight
hdu.header['CCDCHIP'] = '-999'
if self.wcs:
pre_wcs_kw = self.find_kwupdate_location(hdu.header,'CD1_1')
# Update WCS Keywords based on PyDrizzle product's value
# since 'drizzle' itself doesn't update that keyword.
addWCSKeywords(self.wcs,hdu.header, blot=self.blot,
single=self.single, after=pre_wcs_kw)
# Add primary header to output file...
fwht.append(hdu)
# remove all alternate WCS solutions from headers of this product
wcs_functions.removeAllAltWCS(fwht,wcs_ext)
if not virtual:
print('Writing out image to disk:',self.outweight)
fwht.writeto(self.outweight)
del fwht,hdu
fwht = None
# End 'if not virtual'
outputFITS[self.outweight]= fwht
# If a context image was specified, build a PyFITS object
# for it as well...
if self.outcontext and ctxarr is not None:
fctx = fits.HDUList()
# If there is only 1 plane, write it out as a 2-D extension
if ctxarr.shape[0] == 1:
_ctxarr = ctxarr[0]
else:
_ctxarr = ctxarr
if self.compress:
hdu = fits.CompImageHDU(data=_ctxarr, header=prihdu.header)
else:
hdu = fits.ImageHDU(data=_ctxarr, header=prihdu.header)
# Append remaining unique header keywords from template DQ
# header to Primary header...
if dqhdr:
for _card in dqhdr.cards:
if ( (_card.keyword not in RESERVED_KEYS) and
_card.keyword not in hdu.header):
hdu.header.append(_card)
hdu.header['filename'] = self.outcontext
if self.wcs:
pre_wcs_kw = self.find_kwupdate_location(hdu.header,'CD1_1')
# Update WCS Keywords based on PyDrizzle product's value
# since 'drizzle' itself doesn't update that keyword.
addWCSKeywords(self.wcs,hdu.header, blot=self.blot,
single=self.single, after=pre_wcs_kw)
fctx.append(hdu)
# remove all alternate WCS solutions from headers of this product
wcs_functions.removeAllAltWCS(fctx,wcs_ext)
if not virtual:
print('Writing out image to disk:',self.outcontext)
fctx.writeto(self.outcontext)
del fctx,hdu
fctx = None
# End 'if not virtual'
outputFITS[self.outcontext]= fctx
return outputFITS
|
Generate PyFITS objects for each output extension
using the file given by 'template' for populating
headers.
The arrays will have the size specified by 'shape'.
|
entailment
|
def find_kwupdate_location(self,hdr,keyword):
"""
Find the last keyword in the output header that comes before the new
keyword in the original, full input headers.
This will rely on the original ordering of keywords from the original input
files in order to place the updated keyword in the correct location in case
the keyword was removed from the output header prior to calling this method.
"""
# start by looping through the full templates
kw_list = None
last_kw = None
for extn in self.fullhdrs:
if keyword in extn:
#indx = extn.ascard.index_of(keyword)
indx = extn.index(keyword)
kw_list = list(extn.keys())[:indx]
break
if kw_list:
# find which keyword from this list exists in header to be updated
for kw in kw_list[::-1]:
if kw in hdr:
last_kw = kw
break
# determine new value for the last keyword found before the HISTORY kws
if last_kw is None:
hdrkeys = list(hdr.keys())
i = -1
last_kw = hdrkeys[i]
while last_kw == 'HISTORY':
i -= 1
last_kw = hdrkeys[i]
return last_kw
|
Find the last keyword in the output header that comes before the new
keyword in the original, full input headers.
This will rely on the original ordering of keywords from the original input
files in order to place the updated keyword in the correct location in case
the keyword was removed from the output header prior to calling this method.
|
entailment
|
def addDrizKeywords(self,hdr,versions):
""" Add drizzle parameter keywords to header. """
# Extract some global information for the keywords
_geom = 'User parameters'
_imgnum = 0
for pl in self.parlist:
# Start by building up the keyword prefix based
# on the image number for the chip
#_keyprefix = 'D%03d'%_imgnum
_imgnum += 1
drizdict = DRIZ_KEYWORDS.copy()
# Update drizdict with current values
drizdict['VER']['value'] = pl['driz_version'][:44]
drizdict['DATA']['value'] = pl['data'][:64]
drizdict['DEXP']['value'] = pl['exptime']
drizdict['OUDA']['value'] = pl['outFinal'][:64]
drizdict['OUWE']['value'] = pl['outWeight'][:64]
if pl['outContext'] is None:
outcontext = ""
else:
outcontext = pl['outContext'][:64]
drizdict['OUCO']['value'] = outcontext
if self.single:
drizdict['MASK']['value'] = pl['singleDrizMask'][:64]
else:
drizdict['MASK']['value'] = pl['finalMask'][:64]
# Process the values of WT_SCL to be consistent with
# what IRAF Drizzle would output
if 'wt_scl_val' in pl:
_wtscl = pl['wt_scl_val']
else:
if pl['wt_scl'] == 'exptime': _wtscl = pl['exptime']
elif pl['wt_scl'] == 'expsq': _wtscl = pl['exptime']*pl['exptime']
else: _wtscl = pl['wt_scl']
drizdict['WTSC']['value'] = _wtscl
drizdict['KERN']['value'] = pl['kernel']
drizdict['PIXF']['value'] = pl['pixfrac']
drizdict['OUUN']['value'] = self.units
if pl['fillval'] is None:
_fillval = 'INDEF'
else:
_fillval = pl['fillval']
drizdict['FVAL']['value'] = _fillval
drizdict['WKEY']['value'] = pl['driz_wcskey']
drizdict['SCAL'] = {'value':pl['scale'],'comment':'Drizzle, pixel size (arcsec) of output image'}
drizdict['ISCL'] = {'value':pl['idcscale'],'comment':'Drizzle, default IDCTAB pixel size(arcsec)'}
# Now update header with values
writeDrizKeywords(hdr,_imgnum,drizdict)
del drizdict
# Add version information as HISTORY cards to the header
if versions is not None:
ver_str = "AstroDrizzle processing performed using: "
hdr.add_history(ver_str)
for k in versions.keys():
ver_str = ' '+str(k)+' Version '+str(versions[k])
hdr.add_history(ver_str)
|
Add drizzle parameter keywords to header.
|
entailment
|
def iter_fit_shifts(xy,uv,nclip=3,sigma=3.0):
""" Perform an iterative-fit with 'nclip' iterations
"""
fit = fit_shifts(xy,uv)
if nclip is None: nclip = 0
# define index to initially include all points
for n in range(nclip):
resids = compute_resids(xy,uv,fit)
resids1d = np.sqrt(np.power(resids[:,0],2)+np.power(resids[:,1],2))
sig = resids1d.std()
# redefine what pixels will be included in next iteration
goodpix = resids1d < sigma*sig
xy = xy[goodpix]
uv = uv[goodpix]
fit = fit_shifts(xy,uv)
fit['img_coords'] = xy
fit['ref_coords'] = uv
return fit
|
Perform an iterative-fit with 'nclip' iterations
|
entailment
|
def fit_all(xy,uv,mode='rscale',center=None,verbose=True):
""" Performs an 'rscale' fit between matched lists of pixel positions xy and uv"""
if mode not in ['general', 'shift', 'rscale']:
mode = 'rscale'
if not isinstance(xy,np.ndarray):
# cast input list as numpy ndarray for fitting
xy = np.array(xy)
if not isinstance(uv,np.ndarray):
# cast input list as numpy ndarray for fitting
uv = np.array(uv)
if mode == 'shift':
logstr = 'Performing "shift" fit'
if verbose:
print(logstr)
else:
log.info(logstr)
result = fit_shifts(xy, uv)
elif mode == 'general':
logstr = 'Performing "general" fit'
if verbose:
print(logstr)
else:
log.info(logstr)
result = fit_general(xy, uv)
else:
logstr = 'Performing "rscale" fit'
if verbose:
print(logstr)
else:
log.info(logstr)
result = geomap_rscale(xy, uv, center=center)
return result
|
Performs an 'rscale' fit between matched lists of pixel positions xy and uv
|
entailment
|
def fit_shifts(xy, uv):
""" Performs a simple fit for the shift only between
matched lists of positions 'xy' and 'uv'.
Output: (same as for fit_arrays)
=================================
DEVELOPMENT NOTE:
Checks need to be put in place to verify that
enough objects are available for a fit.
=================================
"""
diff_pts = xy - uv
Pcoeffs = np.array([1.0,0.0,diff_pts[:,0].mean(dtype=np.float64)])
Qcoeffs = np.array([0.0,1.0,diff_pts[:,1].mean(dtype=np.float64)])
fit = build_fit(Pcoeffs, Qcoeffs, 'shift')
resids = diff_pts - fit['offset']
fit['resids'] = resids
fit['rms'] = resids.std(axis=0)
fit['rmse'] = float(np.sqrt(np.mean(2 * resids**2)))
fit['mae'] = float(np.mean(np.linalg.norm(resids, axis=1)))
return fit
|
Performs a simple fit for the shift only between
matched lists of positions 'xy' and 'uv'.
Output: (same as for fit_arrays)
=================================
DEVELOPMENT NOTE:
Checks need to be put in place to verify that
enough objects are available for a fit.
=================================
|
entailment
|
def fit_general(xy, uv):
""" Performs a simple fit for the shift only between
matched lists of positions 'xy' and 'uv'.
Output: (same as for fit_arrays)
=================================
DEVELOPMENT NOTE:
Checks need to be put in place to verify that
enough objects are available for a fit.
=================================
"""
# Set up products used for computing the fit
gxy = uv.astype(ndfloat128)
guv = xy.astype(ndfloat128)
Sx = gxy[:,0].sum()
Sy = gxy[:,1].sum()
Su = guv[:,0].sum()
Sv = guv[:,1].sum()
Sux = np.dot(guv[:,0], gxy[:,0])
Svx = np.dot(guv[:,1], gxy[:,0])
Suy = np.dot(guv[:,0], gxy[:,1])
Svy = np.dot(guv[:,1], gxy[:,1])
Sxx = np.dot(gxy[:,0], gxy[:,0])
Syy = np.dot(gxy[:,1], gxy[:,1])
Sxy = np.dot(gxy[:,0], gxy[:,1])
n = len(xy[:,0])
M = np.array([[Sx, Sy, n], [Sxx, Sxy, Sx], [Sxy, Syy, Sy]])
U = np.array([Su, Sux, Suy])
V = np.array([Sv, Svx, Svy])
# The fit solutioN...
# where
# u = P0 + P1*x + P2*y
# v = Q0 + Q1*x + Q2*y
#
try:
invM = np.linalg.inv(M.astype(np.float64))
except np.linalg.LinAlgError:
raise SingularMatrixError(
"Singular matrix: suspected colinear points."
)
P = np.dot(invM, U).astype(np.float64)
Q = np.dot(invM, V).astype(np.float64)
if not (np.all(np.isfinite(P)) and np.all(np.isfinite(Q))):
raise ArithmeticError('Singular matrix.')
# Return the shift, rotation, and scale changes
result = build_fit(P, Q, 'general')
resids = xy - np.dot(uv, result['fit_matrix']) - result['offset']
result['rms'] = resids.std(axis=0)
result['resids'] = resids
result['rmse'] = float(np.sqrt(np.mean(2 * resids**2)))
result['mae'] = float(np.mean(np.linalg.norm(resids, axis=1)))
return result
|
Performs a simple fit for the shift only between
matched lists of positions 'xy' and 'uv'.
Output: (same as for fit_arrays)
=================================
DEVELOPMENT NOTE:
Checks need to be put in place to verify that
enough objects are available for a fit.
=================================
|
entailment
|
def fit_arrays(uv, xy):
""" Performs a generalized fit between matched lists of positions
given by the 2 column arrays xy and uv.
This function fits for translation, rotation, and scale changes
between 'xy' and 'uv', allowing for different scales and
orientations for X and Y axes.
=================================
DEVELOPMENT NOTE:
Checks need to be put in place to verify that
enough objects are available for a fit.
=================================
Output:
(Xo,Yo),Rot,(Scale,Sx,Sy)
where
Xo,Yo: offset,
Rot: rotation,
Scale: average scale change, and
Sx,Sy: scale changes in X and Y separately.
Algorithm and nomenclature provided by: Colin Cox (11 Nov 2004)
"""
if not isinstance(xy,np.ndarray):
# cast input list as numpy ndarray for fitting
xy = np.array(xy)
if not isinstance(uv,np.ndarray):
# cast input list as numpy ndarray for fitting
uv = np.array(uv)
# Set up products used for computing the fit
Sx = xy[:,0].sum()
Sy = xy[:,1].sum()
Su = uv[:,0].sum()
Sv = uv[:,1].sum()
Sux = np.dot(uv[:,0], xy[:,0])
Svx = np.dot(uv[:,1], xy[:,0])
Suy = np.dot(uv[:,0], xy[:,1])
Svy = np.dot(uv[:,1], xy[:,1])
Sxx = np.dot(xy[:,0], xy[:,0])
Syy = np.dot(xy[:,1], xy[:,1])
Sxy = np.dot(xy[:,0], xy[:,1])
n = len(xy[:,0])
M = np.array([[Sx, Sy, n], [Sxx, Sxy, Sx], [Sxy, Syy, Sy]])
U = np.array([Su, Sux, Suy])
V = np.array([Sv, Svx, Svy])
# The fit solutioN...
# where
# u = P0 + P1*x + P2*y
# v = Q0 + Q1*x + Q2*y
#
try:
invM = np.linalg.inv(M.astype(np.float64))
except np.linalg.LinAlgError:
raise SingularMatrixError(
"Singular matrix: suspected colinear points."
)
P = np.dot(invM, U).astype(np.float64)
Q = np.dot(invM, V).astype(np.float64)
if not (np.all(np.isfinite(P)) and np.all(np.isfinite(Q))):
raise ArithmeticError('Singular matrix.')
# Return the shift, rotation, and scale changes
return build_fit(P, Q, 'general')
|
Performs a generalized fit between matched lists of positions
given by the 2 column arrays xy and uv.
This function fits for translation, rotation, and scale changes
between 'xy' and 'uv', allowing for different scales and
orientations for X and Y axes.
=================================
DEVELOPMENT NOTE:
Checks need to be put in place to verify that
enough objects are available for a fit.
=================================
Output:
(Xo,Yo),Rot,(Scale,Sx,Sy)
where
Xo,Yo: offset,
Rot: rotation,
Scale: average scale change, and
Sx,Sy: scale changes in X and Y separately.
Algorithm and nomenclature provided by: Colin Cox (11 Nov 2004)
|
entailment
|
def apply_old_coeffs(xy,coeffs):
""" Apply the offset/shift/rot values from a linear fit
to an array of x,y positions.
"""
_theta = np.deg2rad(coeffs[1])
_mrot = np.zeros(shape=(2,2),dtype=np.float64)
_mrot[0] = (np.cos(_theta),np.sin(_theta))
_mrot[1] = (-np.sin(_theta),np.cos(_theta))
new_pos = (np.dot(xy,_mrot)/coeffs[2][0]) + coeffs[0]
return new_pos
|
Apply the offset/shift/rot values from a linear fit
to an array of x,y positions.
|
entailment
|
def apply_fit(xy,coeffs):
""" Apply the coefficients from a linear fit to
an array of x,y positions.
The coeffs come from the 'coeffs' member of the
'fit_arrays()' output.
"""
x_new = coeffs[0][2] + coeffs[0][0]*xy[:,0] + coeffs[0][1]*xy[:,1]
y_new = coeffs[1][2] + coeffs[1][0]*xy[:,0] + coeffs[1][1]*xy[:,1]
return x_new,y_new
|
Apply the coefficients from a linear fit to
an array of x,y positions.
The coeffs come from the 'coeffs' member of the
'fit_arrays()' output.
|
entailment
|
def compute_resids(xy,uv,fit):
""" Compute the residuals based on fit and input arrays to the fit
"""
print('FIT coeffs: ',fit['coeffs'])
xn,yn = apply_fit(uv,fit['coeffs'])
resids = xy - np.transpose([xn,yn])
return resids
|
Compute the residuals based on fit and input arrays to the fit
|
entailment
|
def geomap_rscale(xyin,xyref,center=None):
"""
Set up the products used for computing the fit derived using the code from
lib/geofit.x for the function 'geo_fmagnify()'. Comparisons with results from
geomap (no additional clipping) were made and produced the same results
out to 5 decimal places.
Output
------
fit: dict
Dictionary containing full solution for fit.
"""
if center is not None:
xcen = center[0]
ycen = center[1]
else:
xcen = xyref[:,0].mean()
ycen = xyref[:,1].mean()
dx = xyref[:,0].astype(ndfloat128)
dy = xyref[:,1].astype(ndfloat128)
du = xyin[:,0].astype(ndfloat128)
dv = xyin[:,1].astype(ndfloat128)
n = xyref.shape[0]
Sx = dx.sum()
Sy = dy.sum()
Su = du.sum()
Sv = dv.sum()
xr0 = Sx/n
yr0 = Sy/n
xi0 = Su/n
yi0 = Sv/n
Sxrxr = np.power((dx-xr0),2).sum()
Syryr = np.power((dy-yr0),2).sum()
Syrxi = ((dy-yr0)*(du-xi0)).sum()
Sxryi = ((dx-xr0)*(dv-yi0)).sum()
Sxrxi = ((dx-xr0)*(du-xi0)).sum()
Syryi = ((dy-yr0)*(dv-yi0)).sum()
rot_num = Sxrxi * Syryi
rot_denom = Syrxi * Sxryi
if rot_num == rot_denom: det = 0.0
else: det = rot_num - rot_denom
if (det < 0):
rot_num = Syrxi + Sxryi
rot_denom = Sxrxi - Syryi
else:
rot_num = Syrxi - Sxryi
rot_denom = Sxrxi + Syryi
if rot_num == rot_denom: theta = 0.0
else:
theta = np.rad2deg(np.arctan2(rot_num,rot_denom))
if theta < 0:
theta += 360.0
ctheta = np.cos(np.deg2rad(theta))
stheta = np.sin(np.deg2rad(theta))
s_num = rot_denom*ctheta + rot_num*stheta
s_denom = Sxrxr + Syryr
if s_denom < 0:
mag = 1.0
else:
mag = s_num/s_denom
if det < 0:
# "flip" y-axis (reflection about x-axis *after* rotation)
# NOTE: keep in mind that 'fit_matrix'
# is the transposed rotation matrix.
sthetax = -mag*stheta
cthetay = -mag*ctheta
else:
sthetax = mag*stheta
cthetay = mag*ctheta
cthetax = mag*ctheta
sthetay = mag*stheta
sdet = np.sign(det)
xshift = (xi0 - (xr0*cthetax + sdet*yr0*sthetax)).astype(np.float64)
yshift = (yi0 - (-sdet*xr0*sthetay + yr0*cthetay)).astype(np.float64)
P = np.array([ cthetax, sthetay, xshift],dtype=np.float64)
Q = np.array([ -sthetax, cthetay, yshift],dtype=np.float64)
# Return the shift, rotation, and scale changes
result = build_fit(P, Q, fitgeom='rscale')
resids = xyin - np.dot((xyref), result['fit_matrix']) - result['offset']
result['rms'] = resids.std(axis=0)
result['resids'] = resids
result['rmse'] = float(np.sqrt(np.mean(2 * resids**2)))
result['mae'] = float(np.mean(np.linalg.norm(resids, axis=1)))
return result
|
Set up the products used for computing the fit derived using the code from
lib/geofit.x for the function 'geo_fmagnify()'. Comparisons with results from
geomap (no additional clipping) were made and produced the same results
out to 5 decimal places.
Output
------
fit: dict
Dictionary containing full solution for fit.
|
entailment
|
def AstroDrizzle(input=None, mdriztab=False, editpars=False, configobj=None,
wcsmap=None, **input_dict):
""" AstroDrizzle command-line interface """
# Support input of filenames from command-line without a parameter name
# then copy this into input_dict for merging with TEAL ConfigObj
# parameters.
# Load any user-specified configobj
if isinstance(configobj, (str, bytes)):
if configobj == 'defaults':
# load "TEAL"-defaults (from ~/.teal/):
configobj = teal.load(__taskname__)
else:
if not os.path.exists(configobj):
raise RuntimeError('Cannot find .cfg file: '+configobj)
configobj = teal.load(configobj, strict=False)
elif configobj is None:
# load 'astrodrizzle' parameter defaults as described in the docs:
configobj = teal.load(__taskname__, defaults=True)
if input and not util.is_blank(input):
input_dict['input'] = input
elif configobj is None:
raise TypeError("AstroDrizzle() needs either 'input' or "
"'configobj' arguments")
if 'updatewcs' in input_dict: # user trying to explicitly turn on updatewcs
configobj['updatewcs'] = input_dict['updatewcs']
del input_dict['updatewcs']
# If called from interactive user-interface, configObj will not be
# defined yet, so get defaults using EPAR/TEAL.
#
# Also insure that the input_dict (user-specified values) are folded in
# with a fully populated configObj instance.
try:
configObj = util.getDefaultConfigObj(__taskname__, configobj,
input_dict,
loadOnly=(not editpars))
log.debug('')
log.debug("INPUT_DICT:")
util.print_cfg(input_dict, log.debug)
log.debug('')
# If user specifies optional parameter for final_wcs specification in input_dict,
# insure that the final_wcs step gets turned on
util.applyUserPars_steps(configObj, input_dict, step='3a')
util.applyUserPars_steps(configObj, input_dict, step='7a')
except ValueError:
print("Problem with input parameters. Quitting...", file=sys.stderr)
return
if not configObj:
return
configObj['mdriztab'] = mdriztab
# If 'editpars' was set to True, util.getDefaultConfigObj() will have
# already called 'run()'.
if not editpars:
run(configObj, wcsmap=wcsmap)
|
AstroDrizzle command-line interface
|
entailment
|
def run(configobj, wcsmap=None):
"""
Initial example by Nadia ran MD with configobj EPAR using:
It can be run in one of two ways:
from stsci.tools import teal
1. Passing a config object to teal
teal.teal('drizzlepac/pars/astrodrizzle.cfg')
2. Passing a task name:
teal.teal('astrodrizzle')
The example config files are in drizzlepac/pars
"""
# turn on logging, redirecting stdout/stderr messages to a log file
# while also printing them out to stdout as well
# also, initialize timing of processing steps
#
# We need to define a default logfile name from the user's parameters
input_list, output, ivmlist, odict = \
processInput.processFilenames(configobj['input'])
if output is not None:
def_logname = output
elif len(input_list) > 0:
def_logname = input_list[0]
else:
print(textutil.textbox(
"ERROR:\nNo valid input files found! Please restart the task "
"and check the value for the 'input' parameter."), file=sys.stderr)
def_logname = None
return
clean = configobj['STATE OF INPUT FILES']['clean']
procSteps = util.ProcSteps()
print("AstroDrizzle Version {:s} ({:s}) started at: {:s}\n"
.format(__version__, __version_date__, util._ptime()[0]))
util.print_pkg_versions(log=log)
log.debug('')
log.debug(
"==== AstroDrizzle was invoked with the following parameters: ===="
)
log.debug('')
util.print_cfg(configobj, log.debug)
try:
# Define list of imageObject instances and output WCSObject instance
# based on input paramters
imgObjList = None
procSteps.addStep('Initialization')
imgObjList, outwcs = processInput.setCommonInput(configobj)
procSteps.endStep('Initialization')
if imgObjList is None or not imgObjList:
errmsg = "No valid images found for processing!\n"
errmsg += "Check log file for full details.\n"
errmsg += "Exiting AstroDrizzle now..."
print(textutil.textbox(errmsg, width=65))
print(textutil.textbox(
'ERROR:\nAstroDrizzle Version {:s} encountered a problem! '
'Processing terminated at {:s}.'
.format(__version__, util._ptime()[0])), file=sys.stderr)
return
log.info("USER INPUT PARAMETERS common to all Processing Steps:")
util.printParams(configobj, log=log)
# Call rest of MD steps...
#create static masks for each image
staticMask.createStaticMask(imgObjList, configobj,
procSteps=procSteps)
#subtract the sky
sky.subtractSky(imgObjList, configobj, procSteps=procSteps)
# _dbg_dump_virtual_outputs(imgObjList)
#drizzle to separate images
adrizzle.drizSeparate(imgObjList, outwcs, configobj, wcsmap=wcsmap,
procSteps=procSteps)
# _dbg_dump_virtual_outputs(imgObjList)
#create the median images from the driz sep images
createMedian.createMedian(imgObjList, configobj, procSteps=procSteps)
#blot the images back to the original reference frame
ablot.runBlot(imgObjList, outwcs, configobj, wcsmap=wcsmap,
procSteps=procSteps)
#look for cosmic rays
drizCR.rundrizCR(imgObjList, configobj, procSteps=procSteps)
#Make your final drizzled image
adrizzle.drizFinal(imgObjList, outwcs, configobj, wcsmap=wcsmap,
procSteps=procSteps)
print()
print("AstroDrizzle Version {:s} is finished processing at {:s}.\n"
.format(__version__, util._ptime()[0]))
except:
clean = False
print(textutil.textbox(
"ERROR:\nAstroDrizzle Version {:s} encountered a problem! "
"Processing terminated at {:s}."
.format(__version__, util._ptime()[0])), file=sys.stderr)
raise
finally:
procSteps.reportTimes()
if imgObjList:
for image in imgObjList:
if clean:
image.clean()
image.close()
del imgObjList
del outwcs
|
Initial example by Nadia ran MD with configobj EPAR using:
It can be run in one of two ways:
from stsci.tools import teal
1. Passing a config object to teal
teal.teal('drizzlepac/pars/astrodrizzle.cfg')
2. Passing a task name:
teal.teal('astrodrizzle')
The example config files are in drizzlepac/pars
|
entailment
|
def _dbg_dump_virtual_outputs(imgObjList):
""" dump some helpful information. strictly for debugging """
global _fidx
tag = 'virtual'
log.info((tag+' ')*7)
for iii in imgObjList:
log.info('-'*80)
log.info(tag+' orig nm: '+iii._original_file_name)
log.info(tag+' names.data: '+str(iii.outputNames["data"]))
log.info(tag+' names.orig: '+str(iii.outputNames["origFilename"]))
log.info(tag+' id: '+str(id(iii)))
log.info(tag+' in.mem: '+str(iii.inmemory))
log.info(tag+' vo items...')
for vok in sorted(iii.virtualOutputs.keys()):
FITSOBJ = iii.virtualOutputs[vok]
log.info(tag+': '+str(vok)+' = '+str(FITSOBJ))
if vok.endswith('.fits'):
if not hasattr(FITSOBJ, 'data'):
FITSOBJ = FITSOBJ[0] # list of PrimaryHDU ?
if not hasattr(FITSOBJ, 'data'):
FITSOBJ = FITSOBJ[0] # was list of HDUList ?
dbgname = 'DEBUG_%02d_'%(_fidx,)
dbgname+=os.path.basename(vok)
_fidx+=1
FITSOBJ.writeto(dbgname)
log.info(tag+' wrote: '+dbgname)
log.info('\n'+vok)
if hasattr(FITSOBJ, 'data'):
log.info(str(FITSOBJ._summary()))
log.info('min and max are: '+str( (FITSOBJ.data.min(),
FITSOBJ.data.max()) ))
log.info('avg and sum are: '+str( (FITSOBJ.data.mean(),
FITSOBJ.data.sum()) ))
# log.info(str(FITSOBJ.data)[:75])
else:
log.info(vok+' has no .data attr')
log.info(str(type(FITSOBJ)))
log.info(vok+'\n')
log.info('-'*80)
|
dump some helpful information. strictly for debugging
|
entailment
|
def getdarkcurrent(self,chip):
"""
Return the dark current for the WFC3 UVIS detector. This value
will be contained within an instrument specific keyword.
Returns
-------
darkcurrent: float
The dark current value with **units of electrons**.
"""
darkcurrent = 0.
try:
darkcurrent = self._image[self.scienceExt, chip].header['MEANDARK']
except:
msg = "#############################################\n"
msg += "# #\n"
msg += "# Error: #\n"
msg += "# Cannot find the value for 'MEANDARK' #\n"
msg += "# in the image header. WFC3 input images #\n"
msg += "# are expected to have this header #\n"
msg += "# keyword. #\n"
msg += "# #\n"
msg += "# Error occured in WFC3UVISInputImage class #\n"
msg += "# #\n"
msg += "#############################################\n"
raise ValueError(msg)
return darkcurrent
|
Return the dark current for the WFC3 UVIS detector. This value
will be contained within an instrument specific keyword.
Returns
-------
darkcurrent: float
The dark current value with **units of electrons**.
|
entailment
|
def doUnitConversions(self):
"""WF3 IR data come out in electrons, and I imagine the
photometry keywords will be calculated as such, so no image
manipulation needs be done between native and electrons """
# Image information
_handle = fileutil.openImage(self._filename, mode='readonly', memmap=False)
for chip in self.returnAllChips(extname=self.scienceExt):
conversionFactor = 1.0
if '/S' in chip._bunit:
conversionFactor = chip._exptime
else:
print("Input %s[%s,%d] already in units of ELECTRONS"
%(self._filename,self.scienceExt,chip._chip))
chip._effGain = 1.0# chip._gain #1.
chip._conversionFactor = conversionFactor #1.
_handle.close()
self._effGain= 1.0
|
WF3 IR data come out in electrons, and I imagine the
photometry keywords will be calculated as such, so no image
manipulation needs be done between native and electrons
|
entailment
|
def getdarkimg(self,chip):
"""
Return an array representing the dark image for the detector.
Returns
-------
dark: array
Dark image array in the same shape as the input image with **units of cps**
"""
sci_chip = self._image[self.scienceExt,chip]
# First attempt to get the dark image specified by the "DARKFILE"
# keyword in the primary keyword of the science data.
try:
filename = self.header["DARKFILE"]
handle = fileutil.openImage(filename, mode='readonly', memmap=False)
hdu = fileutil.getExtn(handle,extn="sci,1")
darkobj = hdu.data[sci_chip.ltv2:sci_chip.size2,sci_chip.ltv1:sci_chip.size1]
# If the darkfile cannot be located, create the dark image from
# what we know about the detector dark current and assume a
# constant dark current for the whole image.
except:
darkobj = (np.ones(sci_chip.image_shape,
dtype=sci_chip.image_dtype) *
self.getdarkcurrent())
return darkobj
|
Return an array representing the dark image for the detector.
Returns
-------
dark: array
Dark image array in the same shape as the input image with **units of cps**
|
entailment
|
def getskyimg(self,chip):
"""
Notes
=====
Return an array representing the sky image for the detector. The value
of the sky is what would actually be subtracted from the exposure by
the skysub step.
:units: electrons
"""
sci_chip = self._image[self.scienceExt,chip]
skyimg = np.ones(sci_chip.image_shape,dtype=sci_chip.image_dtype)*sci_chip.subtractedSky
if sci_chip._conversionFactor != 1.0: # If units are not already ELECTRONS
skyimg *= self.getexptimeimg(chip)
return skyimg
|
Notes
=====
Return an array representing the sky image for the detector. The value
of the sky is what would actually be subtracted from the exposure by
the skysub step.
:units: electrons
|
entailment
|
def getdarkcurrent(self,extver):
"""
Return the dark current for the ACS detector. This value
will be contained within an instrument specific keyword.
The value in the image header will be converted to units
of electrons.
Returns
-------
darkcurrent: float
Dark current value for the ACS detector in **units of electrons**.
"""
darkcurrent=0.
try:
darkcurrent = self._image[self.scienceExt,extver].header['MEANDARK']
except:
str = "#############################################\n"
str += "# #\n"
str += "# Error: #\n"
str += "# Cannot find the value for 'MEANDARK' #\n"
str += "# in the image header. ACS input images #\n"
str += "# are expected to have this header #\n"
str += "# keyword. #\n"
str += "# #\n"
str += "# Error occured in the ACSInputImage class #\n"
str += "# #\n"
str += "#############################################\n"
raise ValueError(str)
return darkcurrent
|
Return the dark current for the ACS detector. This value
will be contained within an instrument specific keyword.
The value in the image header will be converted to units
of electrons.
Returns
-------
darkcurrent: float
Dark current value for the ACS detector in **units of electrons**.
|
entailment
|
def setInstrumentParameters(self,instrpars):
""" Sets the instrument parameters.
"""
pri_header = self._image[0].header
if self._isNotValid (instrpars['gain'], instrpars['gnkeyword']):
instrpars['gnkeyword'] = None
if self._isNotValid (instrpars['rdnoise'], instrpars['rnkeyword']):
instrpars['rnkeyword'] = None
if self._isNotValid (instrpars['exptime'], instrpars['expkeyword']):
instrpars['expkeyword'] = 'EXPTIME'
# We need to treat Read Noise and Gain as a special case since it is
# not populated in the SBC primary header for the MAMA
for chip in self.returnAllChips(extname=self.scienceExt):
chip._gain = 1.0 #self.getInstrParameter("", pri_header,
# instrpars['gnkeyword'])
chip._rdnoise = 0.0 #self.getInstrParameter("", pri_header,
# instrpars['rnkeyword'])
chip._exptime = self.getInstrParameter(instrpars['exptime'], pri_header,
instrpars['expkeyword'])
if chip._exptime is None:
print('ERROR: invalid instrument task parameter')
raise ValueError
# We need to determine if the user has used the default readnoise/gain value
# since if not, they will need to supply a gain/readnoise value as well
usingDefaultGain = instrpars['gnkeyword'] is None
usingDefaultReadnoise = instrpars['rnkeyword'] is None
# Set the default readnoise or gain values based upon the amount of user input given.
# Case 1: User supplied no gain or readnoise information
if usingDefaultReadnoise and usingDefaultGain:
# Set the default gain and readnoise values
self._setSBCchippars()
# Case 2: The user has supplied a value for gain
elif usingDefaultReadnoise and not usingDefaultGain:
# Set the default readnoise value
self._setDefaultSBCReadnoise()
# Case 3: The user has supplied a value for readnoise
elif not usingDefaultReadnoise and usingDefaultGain:
# Set the default gain value
self._setDefaultSBCGain()
else:
# In this case, the user has specified both a gain and readnoise values. Just use them as is.
pass
|
Sets the instrument parameters.
|
entailment
|
def min_med(images, weight_images, readnoise_list, exptime_list,
background_values, weight_masks=None, combine_grow=1,
combine_nsigma1=4, combine_nsigma2=3, fillval=False):
""" Create a median array, rejecting the highest pixel and
computing the lowest valid pixel after mask application.
.. note::
In this version of the mimmed algorithm we assume that the units of
all input data is electons.
Parameters
----------
images : list of numpy.ndarray
List of input data to be combined.
weight_images : list of numpy.ndarray
List of input data weight images to be combined.
readnoise_list : list
List of readnoise values to use for the input images.
exptime_list : list
List of exposure times to use for the input images.
background_values : list
List of image background values to use for the input images.
weight_masks : list of numpy.ndarray, None
List of imput data weight masks to use for pixel rejection.
(Default: `None`)
combine_grow : int
Radius (pixels) for neighbor rejection. (Default: 1)
combine_nsigma1 : float
Significance for accepting minimum instead of median. (Default: 4)
combine_nsigma2 : float
Significance for accepting minimum instead of median. (Default: 3)
fillval : bool
Turn on use of imedian/imean. (Default: `False`)
Returns
-------
combined_array : numpy.ndarray
Combined array.
"""
# In this case we want to calculate two things:
# 1) the median array, rejecting the highest pixel (thus running
# imcombine with nlow=0, nhigh=1, nkeep=1, using the masks)
# 2) the lowest valid pixel after applying the masks (thus running
# imcombine with nlow=0, nhigh=3, nkeep=1, using the masks)
#
# We also calculate the sum of the weight files (to produce the total
# effective exposure time for each pixel).
#
# The total effective background in the final image is calculated as
# follows:
# - convert background for each input image to counts/s
# (divide by exptime)
# - multiply this value by the weight image, to obtain the effective
# background counts (in DN) for each pixel, for each image
# - Add these images together, to obtain the total effective background
# for the combined image.
#
# Once we've made these two files, then calculate the SNR based on the
# median-pixel image, and compare with the minimum.
nimages = len(images)
combtype_median = 'imedian' if fillval else 'median'
images = np.asarray(images)
weight_images = np.asarray(weight_images)
if weight_masks == [] or weight_masks is None:
weight_masks = None
mask_sum = np.zeros(images.shape[1:], dtype=np.int16)
all_bad_idx = np.array([], dtype=np.int)
all_bad_idy = np.array([], dtype=np.int)
else:
weight_masks = np.asarray(weight_masks, dtype=np.bool)
mask_sum = np.sum(weight_masks, axis=0, dtype=np.int16)
all_bad_idx, all_bad_idy = np.where(mask_sum == nimages)
# Create a different median image based upon the number of images in the
# input list.
if nimages == 2:
median_file = num_combine(
images,
masks=weight_masks,
combination_type='imean' if fillval else 'mean',
nlow=0, nhigh=0, lower=None, upper=None
)
else:
# The value of NHIGH=1 will cause problems when there is only 1 valid
# unmasked input image for that pixel due to a difference in behavior
# between 'num_combine' and 'iraf.imcombine'.
# This value may need to be adjusted on the fly based on the number of
# inputs and the number of masked values/pixel.
#
median_file = num_combine(
images,
masks=weight_masks,
combination_type=combtype_median,
nlow=0, nhigh=1, lower=None, upper=None
)
# The following section of code will address the problem caused by
# having a value of nhigh = 1. This will behave in a way similar to
# the way the IRAF task IMCOMBINE behaves. In order to accomplish
# this, the following procedure will be followed:
# 1) The input masks will be summed.
# 2) The science data will be summed.
# 3) In the locations of the summed mask where the sum is 1 less than
# the total number of images, the value of that location in the
# summed science image will be used to replace the existing value
# in the existing median_file.
#
# This procedure is being used to prevent too much data from being
# thrown out of the image. Take for example the case of 3 input images.
# In two of the images the pixel locations have been masked out.
# Now, if nhigh is applied there will be no value to use for that
# position. However, if this new procedure is used that value in
# the resulting images will be the value that was rejected by the
# nhigh rejection step.
# We need to make certain that "bad" pixels in the sci data are set to
# 0. That way, when the sci images are summed, the value of the sum
# will only come from the "good" pixels.
if weight_masks is None:
sci_sum = np.sum(images, axis=0)
if nimages == 1:
median_file = sci_sum
else:
sci_sum = np.sum(images * np.logical_not(weight_masks), axis=0)
# Use the summed sci image values in locations where the mask_sum
# indicates that there is only 1 good pixel to use. The value will
# be used in the median_file image
idx = np.where(mask_sum == (nimages - 1))
median_file[idx] = sci_sum[idx]
# Create the minimum image from the stack of input images.
if weight_masks is not None:
# make a copy of images to avoid side-effect of modifying input
# argument:
images = images.copy()
images[weight_masks] = np.nan
images[:, all_bad_idx, all_bad_idy] = 0
minimum_file = np.nanmin(images, axis=0)
else:
minimum_file = np.amin(images, axis=0)
# Scale the weight images by the background values and add them to the bk
# Create an image of the total effective background (in DN) per pixel:
# (which is the sum of all the background-scaled weight files)
s = np.asarray([bv / et for bv, et in
zip(background_values, exptime_list)])
bkgd_file = np.sum(weight_images * s[:, None, None], axis=0)
# Scale the weight mask images by the square of the readnoise values.
# Create an image of the total readnoise**2 per pixel
# (which is the sum of all the input readnoise values).
if weight_masks is None:
rdn2 = sum((r**2 for r in readnoise_list))
readnoise_file = rdn2 * np.ones_like(images[0])
else:
readnoise_file = np.sum(
np.logical_not(weight_masks) *
(np.asarray(readnoise_list)**2)[:, None, None],
axis=0
)
# Create an image of the total effective exposure time per pixel:
# (which is simply the sum of all the drizzle output weight files)
weight_file = np.sum(weight_images, axis=0)
# Scale up both the median and minimum arrays by the total effective
# exposure time per pixel.
minimum_file_weighted = minimum_file * weight_file
median_file_weighted = median_file * weight_file
del weight_file
# Calculate the 1-sigma r.m.s.:
# variance = median_electrons + bkgd_electrons + readnoise**2
# rms = sqrt(variance)
# This image has units of electrons.
#
# make this the abs value so that negative numbers dont throw an exception?
rms_file2 = np.fmax(
median_file_weighted + bkgd_file + readnoise_file,
np.zeros_like(median_file_weighted)
)
rms_file = np.sqrt(rms_file2)
del bkgd_file, readnoise_file
# For the median array, calculate the n-sigma lower threshold to the array
# and incorporate that into the pixel values.
median_rms_file = median_file_weighted - rms_file * combine_nsigma1
if combine_grow != 0:
# Do a more sophisticated rejection: For all cases where the minimum
# pixel will be accepted instead of the median, set a lower threshold
# for that pixel and the ones around it (ie become less conservative
# in rejecting the median). This is because in cases of
# triple-incidence cosmic rays, quite often the low-lying outliers
# of the CRs can influence the median for the initial relatively high
# value of sigma, so a lower threshold must be used to mnake sure that
# the minimum is selected.
#
# This is done as follows:
# 1) make an image which is zero everywhere except where the minimum
# will be accepted
# 2) box-car smooth this image, to make these regions grow.
# 3) In the file "median_rms_file_electrons", replace these pixels
# by median - combine_nsigma2 * rms
#
# Then use this image in the final replacement, in the same way as for
# the case where this option is not selected.
minimum_flag_file = np.less(minimum_file_weighted,
median_rms_file).astype(np.float64)
# The box size value must be an integer. This is not a problem since
# __combine_grow should always be an integer type. The combine_grow
# column in the MDRIZTAB should also be an integer type.
boxsize = int(2 * combine_grow + 1)
boxshape = (boxsize, boxsize)
minimum_grow_file = np.zeros_like(images[0])
# If the boxcar convolution has failed it is potentially for
# two reasons:
# 1) The kernel size for the boxcar is bigger than the actual image.
# 2) The grow parameter was specified with a value < 0. This would
# result in an illegal boxshape kernel. The dimensions of the
# kernel box *MUST* be integer and greater than zero.
#
# If the boxcar convolution has failed, try to give a meaningfull
# explanation as to why based upon the conditionals described above.
if boxsize <= 0:
errormsg1 = "############################################################\n"
errormsg1 += "# The boxcar convolution in minmed has failed. The 'grow' #\n"
errormsg1 += "# parameter must be greater than or equal to zero. You #\n"
errormsg1 += "# specified an input value for the 'grow' parameter of: #\n"
errormsg1 += " combine_grow: " + str(combine_grow)+'\n'
errormsg1 += "############################################################\n"
raise ValueError(errormsg1)
if boxsize > images.shape[1]:
errormsg2 = "############################################################\n"
errormsg2 += "# The boxcar convolution in minmed has failed. The 'grow' #\n"
errormsg2 += "# parameter specified has resulted in a boxcar kernel that #\n"
errormsg2 += "# has dimensions larger than the actual image. You #\n"
errormsg2 += "# specified an input value for the 'grow' parameter of: #\n"
errormsg2 += " combine_grow: " + str(combine_grow) + '\n'
errormsg2 += "############################################################\n"
print(images.shape[1:])
raise ValueError(errormsg2)
# Attempt the boxcar convolution using the boxshape based upon the user
# input value of "grow"
ker = np.ones((boxsize, boxsize)) / float(boxsize**2)
minimum_grow_file = signal.convolve2d(minimum_flag_file, ker,
boundary='fill', mode='same')
median_rms_file = np.where(
np.equal(minimum_grow_file, 0),
median_file_weighted - rms_file * combine_nsigma1,
median_file_weighted - rms_file * combine_nsigma2
)
del rms_file, minimum_grow_file
# Finally decide whether to use the minimim or the median (in counts/s),
# based on whether the median is more than 3 sigma above the minimum.
combined_array = np.where(
np.less(minimum_file_weighted, median_rms_file),
minimum_file,
median_file
)
# Set fill regions to a pixel value of 0.
combined_array[all_bad_idx, all_bad_idy] = 0
return combined_array
|
Create a median array, rejecting the highest pixel and
computing the lowest valid pixel after mask application.
.. note::
In this version of the mimmed algorithm we assume that the units of
all input data is electons.
Parameters
----------
images : list of numpy.ndarray
List of input data to be combined.
weight_images : list of numpy.ndarray
List of input data weight images to be combined.
readnoise_list : list
List of readnoise values to use for the input images.
exptime_list : list
List of exposure times to use for the input images.
background_values : list
List of image background values to use for the input images.
weight_masks : list of numpy.ndarray, None
List of imput data weight masks to use for pixel rejection.
(Default: `None`)
combine_grow : int
Radius (pixels) for neighbor rejection. (Default: 1)
combine_nsigma1 : float
Significance for accepting minimum instead of median. (Default: 4)
combine_nsigma2 : float
Significance for accepting minimum instead of median. (Default: 3)
fillval : bool
Turn on use of imedian/imean. (Default: `False`)
Returns
-------
combined_array : numpy.ndarray
Combined array.
|
entailment
|
def _sumImages(self,numarrayObjectList):
""" Sum a list of numarray objects. """
if numarrayObjectList in [None, []]:
return None
tsum = np.zeros(numarrayObjectList[0].shape, dtype=numarrayObjectList[0].dtype)
for image in numarrayObjectList:
tsum += image
return tsum
|
Sum a list of numarray objects.
|
entailment
|
def gaussian1(height, x0, y0, a, b, c):
"""
height - the amplitude of the gaussian
x0, y0, - center of the gaussian
a, b, c - ellipse parameters (coefficients in the quadratic form)
"""
return lambda x, y: height * np.exp(-0.5* (a*(x-x0)**2 + b*(x-x0)*(y-y0) + c*(y-y0)**2))
|
height - the amplitude of the gaussian
x0, y0, - center of the gaussian
a, b, c - ellipse parameters (coefficients in the quadratic form)
|
entailment
|
def gausspars(fwhm, nsigma=1.5, ratio=1, theta=0.):
"""
height - the amplitude of the gaussian
x0, y0, - center of the gaussian
fwhm - full width at half maximum of the observation
nsigma - cut the gaussian at nsigma
ratio = ratio of xsigma/ysigma
theta - angle of position angle of the major axis measured
counter-clockwise from the x axis
Returns dimensions nx and ny of the elliptical kernel as well as the
ellipse parameters a, b, c, and f when defining an ellipse through the
quadratic form: a*(x-x0)^2+b(x-x0)*(y-y0)+c*(y-y0)^2 <= 2*f
"""
xsigma = fwhm / FWHM2SIG
ysigma = ratio * xsigma
f = nsigma**2/2.
theta = np.deg2rad(theta)
cost = np.cos(theta)
sint = np.sin(theta)
if ratio == 0: # 1D Gaussian
if theta == 0 or theta == 180:
a = 1/xsigma**2
b = 0.0
c = 0.0
elif theta == 90:
a = 0.0
b = 0.0
c = 1/xsigma**2
else:
print('Unable to construct 1D Gaussian with these parameters\n')
raise ValueError
nx = 2 * int(max(2, (xsigma*nsigma*np.abs(cost))))+1
ny = 2 * int(max(2, (xsigma*nsigma*np.abs(sint))))+1
else: #2D gaussian
xsigma2 = xsigma * xsigma
ysigma2 = ysigma * ysigma
a = cost**2/xsigma2 + sint**2/ysigma2
b = 2 * cost * sint *(1.0/xsigma2-1.0/ysigma2)
c = sint**2/xsigma2 + cost**2/ysigma2
d = b**2 - 4*a*c # discriminant
# nx = int(2*max(2, math.sqrt(-8*c*f/d)))+1
# ny = int(2*max(2, math.sqrt(-8*a*f/d)))+1
nx = 2 * int(2*max(1, nsigma*math.sqrt(-c/d)))+1
ny = 2 * int(2*max(1, nsigma*math.sqrt(-a/d)))+1
return nx, ny, a, b, c, f
|
height - the amplitude of the gaussian
x0, y0, - center of the gaussian
fwhm - full width at half maximum of the observation
nsigma - cut the gaussian at nsigma
ratio = ratio of xsigma/ysigma
theta - angle of position angle of the major axis measured
counter-clockwise from the x axis
Returns dimensions nx and ny of the elliptical kernel as well as the
ellipse parameters a, b, c, and f when defining an ellipse through the
quadratic form: a*(x-x0)^2+b(x-x0)*(y-y0)+c*(y-y0)^2 <= 2*f
|
entailment
|
def moments(data,cntr):
"""
Returns (height, x, y, width_x, width_y)
the gaussian parameters of a 2D distribution by calculating its
moments.
"""
total = data.sum()
#X, Y = np.indices(data.shape)
#x = (X*data).sum()/total
#y = (Y*data).sum()/total
x,y = cntr
xi = int(x)
yi = int(y)
if xi < 0 or xi >= data.shape[1] or yi < 0 or yi >= data.shape[0]:
raise ValueError
col = data[:, xi]
width_x = np.sqrt(abs(((np.arange(col.size)-y)**2*col).sum()/col.sum()))
row = data[yi, :]
width_y = np.sqrt(abs(((np.arange(row.size)-x)**2*row).sum()/row.sum()))
height = data.max()
return height, x, y, width_x, width_y
|
Returns (height, x, y, width_x, width_y)
the gaussian parameters of a 2D distribution by calculating its
moments.
|
entailment
|
def apply_nsigma_separation(fitind,fluxes,separation,niter=10):
"""
Remove sources which are within nsigma*fwhm/2 pixels of each other, leaving
only a single valid source in that region.
This algorithm only works for sources which end up sequentially next to each other
based on Y position and removes enough duplicates to make the final source list more
managable. It sorts the positions by Y value in order to group those at the
same positions as much as possible.
"""
for n in range(niter):
if len(fitind) < 1:
break
fitarr = np.array(fitind,np.float32)
fluxarr = np.array(fluxes,np.float32)
inpind = np.argsort(fitarr[:,1])
npind = fitarr[inpind]
fluxind = fluxarr[inpind]
fitind = npind.tolist()
fluxes = fluxind.tolist()
dx = npind[1:,0] - npind[:-1,0]
dy = npind[1:,1] - npind[:-1,1]
dr = np.sqrt(np.power(dx,2)+np.power(dy,2))
nsame = np.where(dr <= separation)[0]
if nsame.shape[0] > 0:
for ind in nsame[-1::-1]:
#continue # <- turn off filtering by source separation
del fitind[ind]
del fluxes[ind]
else:
break
return fitind,fluxes
|
Remove sources which are within nsigma*fwhm/2 pixels of each other, leaving
only a single valid source in that region.
This algorithm only works for sources which end up sequentially next to each other
based on Y position and removes enough duplicates to make the final source list more
managable. It sorts the positions by Y value in order to group those at the
same positions as much as possible.
|
entailment
|
def xy_round(data,x0,y0,skymode,ker2d,xsigsq,ysigsq,datamin=None,datamax=None):
""" Compute center of source
Original code from IRAF.noao.digiphot.daofind.apfind ap_xy_round()
"""
nyk,nxk = ker2d.shape
if datamin is None:
datamin = data.min()
if datamax is None:
datamax = data.max()
# call C function for speed now...
xy_val = cdriz.arrxyround(data,x0,y0,skymode,ker2d,xsigsq,ysigsq,datamin,datamax)
if xy_val is None:
x = None
y = None
round = None
else:
x = xy_val[0]
y = xy_val[1]
round = xy_val[2]
return x,y,round
|
Compute center of source
Original code from IRAF.noao.digiphot.daofind.apfind ap_xy_round()
|
entailment
|
def precompute_sharp_round(nxk, nyk, xc, yc):
"""
Pre-computes mask arrays to be used by the 'sharp_round' function
for roundness computations based on two- and four-fold symmetries.
"""
# Create arrays for the two- and four-fold symmetry computations:
s4m = np.ones((nyk,nxk),dtype=np.int16)
s4m[yc, xc] = 0
s2m = np.ones((nyk,nxk),dtype=np.int16)
s2m[yc, xc] = 0
s2m[yc:nyk, 0:xc] = -1;
s2m[0:yc+1, xc+1:nxk] = -1;
return s2m, s4m
|
Pre-computes mask arrays to be used by the 'sharp_round' function
for roundness computations based on two- and four-fold symmetries.
|
entailment
|
def sharp_round(data, density, kskip, xc, yc, s2m, s4m, nxk, nyk,
datamin, datamax):
"""
sharp_round -- Compute first estimate of the roundness and sharpness of the
detected objects.
A Python translation of the AP_SHARP_ROUND IRAF/DAOFIND function.
"""
# Compute the first estimate of roundness:
sum2 = np.sum(s2m*density)
sum4 = np.sum(s4m*abs(density))
if sum2 == 0.0:
round = 0.0
elif sum4 <= 0.0: # eps?
round = None
else:
round = 2.0 * sum2 / sum4
# Eliminate the sharpness test if the central pixel is bad:
mid_data_pix = data[yc, xc]
mid_dens_pix = density[yc, xc]
if mid_data_pix > datamax:
return True, round, None
if mid_data_pix < datamin:
return False, round, None
########################
# Sharpness statistics:
satur = np.max(kskip*data) > datamax
# Exclude pixels (create a mask) outside the [datamin, datamax] range:
uskip = np.where((data >= datamin) & (data <= datamax), 1, 0)
# Update the mask with the "skipped" values from the convolution kernel:
uskip *= kskip
# Also, exclude central pixel:
uskip[yc, xc] = 0
npixels = np.sum(uskip)
if (npixels < 1 or mid_dens_pix <= 0.0):
return satur, round, None
sharp = (mid_data_pix - np.sum(uskip*data)/npixels) / mid_dens_pix
#sharp = (mid_data_pix - np.mean(uskip*data)) / mid_dens_pix
return satur, round, sharp
|
sharp_round -- Compute first estimate of the roundness and sharpness of the
detected objects.
A Python translation of the AP_SHARP_ROUND IRAF/DAOFIND function.
|
entailment
|
def roundness(im):
"""
from astropy.io import fits as pyfits
data=pyfits.getdata('j94f05bgq_flt.fits',ext=1)
star0=data[403:412,423:432]
star=data[396:432,3522:3558]
In [53]: findobj.roundness(star0)
Out[53]: 0.99401955054989544
In [54]: findobj.roundness(star)
Out[54]: 0.83091919980660645
"""
perimeter = im.shape[0]*2 +im.shape[1]*2 -4
area = im.size
return 4*np.pi*area/perimeter**2
|
from astropy.io import fits as pyfits
data=pyfits.getdata('j94f05bgq_flt.fits',ext=1)
star0=data[403:412,423:432]
star=data[396:432,3522:3558]
In [53]: findobj.roundness(star0)
Out[53]: 0.99401955054989544
In [54]: findobj.roundness(star)
Out[54]: 0.83091919980660645
|
entailment
|
def immoments(im, p,q):
x = list(range(im.shape[1]))
y = list(range(im.shape[0]))
#coord=np.array([x.flatten(),y.flatten()]).T
"""
moment = 0
momentx = 0
for i in x.flatten():
moment+=momentx
sumx=0
for j in y.flatten():
sumx+=i**0*j**0*star0[i,j]
"""
moment = np.sum([i**p*j**q*im[i,j] for j in x for i in y], dtype=np.float64)
return moment
|
moment = 0
momentx = 0
for i in x.flatten():
moment+=momentx
sumx=0
for j in y.flatten():
sumx+=i**0*j**0*star0[i,j]
|
entailment
|
def centroid(im):
"""
Computes the centroid of an image using the image moments:
centroid = {m10/m00, m01/m00}
These calls point to Python version of moments function
m00 = immoments(im,0,0)
m10 = immoments(im, 1,0)
m01 = immoments(im,0,1)
"""
# These calls point to Python version of moments function
m00 = cdriz.arrmoments(im,0,0)
m10 = cdriz.arrmoments(im, 1,0)
m01 = cdriz.arrmoments(im,0,1)
ycen = m10 / m00
xcen = m01 / m00
return xcen, ycen
|
Computes the centroid of an image using the image moments:
centroid = {m10/m00, m01/m00}
These calls point to Python version of moments function
m00 = immoments(im,0,0)
m10 = immoments(im, 1,0)
m01 = immoments(im,0,1)
|
entailment
|
def getMdriztabParameters(files):
""" Gets entry in MDRIZTAB where task parameters live.
This method returns a record array mapping the selected
row.
"""
# Get the MDRIZTAB table file name from the primary header.
# It is gotten from the first file in the input list. No
# consistency checks are performed.
_fileName = files[0]
_header = fileutil.getHeader(_fileName)
if 'MDRIZTAB' in _header:
_tableName = _header['MDRIZTAB']
else:
raise KeyError("No MDRIZTAB found in file " + _fileName)
_tableName = fileutil.osfn(_tableName)
# Now get the filters from the primary header.
_filters = fileutil.getFilterNames(_header)
# Specifically check to see whether the MDRIZTAB file can be found
mtab_path = os.path.split(_tableName)[0] # protect against no path given for _tableName
if mtab_path and not os.path.exists(mtab_path): # check path first, if given
raise IOError("Directory for MDRIZTAB '%s' could not be accessed!"%mtab_path)
if not os.path.exists(_tableName): # then check for the table itself
raise IOError("MDRIZTAB table '%s' could not be found!"%_tableName)
# Open MDRIZTAB file.
try:
_mdriztab = fits.open(_tableName, memmap=False)
except:
raise IOError("MDRIZTAB table '%s' not valid!" % _tableName)
# Look for matching rows based on filter name. If no
# match, pick up rows for the default filter.
_rows = _getRowsByFilter(_mdriztab, _filters)
if _rows == []:
_rows = _getRowsByFilter(_mdriztab, 'ANY')
# Now look for the row that matches the number of images.
# The logic below assumes that rows for a given filter
# are arranged in ascending order of the 'numimage' field.
_nimages = len(files)
_row = 0
for i in _rows:
_numimages = _mdriztab[1].data.field('numimages')[i]
if _nimages >= _numimages:
_row = i
print('- MDRIZTAB: AstroDrizzle parameters read from row %s.'%(_row+1))
mpars = _mdriztab[1].data[_row]
_mdriztab.close()
interpreted = _interpretMdriztabPars(mpars)
if "staticfile" in interpreted:
interpreted.pop("staticfile")
return interpreted
|
Gets entry in MDRIZTAB where task parameters live.
This method returns a record array mapping the selected
row.
|
entailment
|
def _interpretMdriztabPars(rec):
"""
Collect task parameters from the MDRIZTAB record and
update the master parameters list with those values
Note that parameters read from the MDRIZTAB record must
be cleaned up in a similar way that parameters read
from the user interface are.
"""
tabdict = {}
# for each entry in the record...
for indx in range(len(rec.array.names)):
# ... get the name, format, and value.
_name = rec.array.names[indx]
_format = rec.array.formats[indx]
_value = rec.field(_name)
# Translate names from MDRIZTAB columns names to
# input parameter names found in IRAF par file.
#
#if _name.find('final') > -1: _name = 'driz_'+_name
if _name in ['shiftfile','mdriztab']:
continue
drizstep_names = ['driz_sep_','final_']
if _name in ['refimage','bits']:
for dnames in drizstep_names:
tabdict[dnames+_name] = _value
continue
if _name in ['driz_sep_bits','final_bits']:
tabdict[_name] = str(_value)
continue
if _name == 'coeffs':
_val = True
if _value in ['INDEF',None,"None",'',' ']: _val = False
tabdict[_name] = _val
continue
par_table = {'subsky':'skysub','crbitval':'crbit','readnoise':'rdnoise'}
if _name in par_table:
_name = par_table[_name]
# We do not care about the first two columns at this point
# as they are only used for selecting the rows
if _name != 'filter' and _name != 'numimages':
# start by determining the format type of the parameter
_fmt = findFormat(_format)
# Based on format type, apply proper conversion/cleaning
if (_fmt == 'a') or (_fmt == 'A'):
_val = cleanBlank(_value)
if _val is None:
_val = ''
elif (_format == 'i1') or (_format=='1L'):
_val = toBoolean(_value)
elif (_format == 'i4') or (_format == '1J'):
_val = cleanInt(_value)
elif ('E' in _format) or (_format == 'f4') :
_val = cleanNaN(_value)
else:
print('MDRIZTAB column ',_name,' has unrecognized format',_format)
raise ValueError
if _name in ['ra','dec']:
for dnames in drizstep_names:
tabdict[dnames+_name] = _val
else:
tabdict[_name] = _val
return tabdict
|
Collect task parameters from the MDRIZTAB record and
update the master parameters list with those values
Note that parameters read from the MDRIZTAB record must
be cleaned up in a similar way that parameters read
from the user interface are.
|
entailment
|
def run(configObj,wcsmap=None):
""" Interpret parameters from TEAL/configObj interface as set interactively
by the user and build the new WCS instance
"""
distortion_pars = configObj['Distortion Model']
outwcs = build(configObj['outwcs'], configObj['wcsname'],
configObj['refimage'], undistort = configObj['undistort'],
usecoeffs=distortion_pars['applycoeffs'], coeffsfile=distortion_pars['coeffsfile'],
**configObj['User WCS Parameters'])
|
Interpret parameters from TEAL/configObj interface as set interactively
by the user and build the new WCS instance
|
entailment
|
def build(outname, wcsname, refimage, undistort=False,
applycoeffs=False, coeffsfile=None, **wcspars):
""" Core functionality to create a WCS instance from a reference image WCS,
user supplied parameters or user adjusted reference WCS.
The distortion information can either be read in as part of the reference
image WCS or given in 'coeffsfile'.
Parameters
----------
outname : string
filename of output WCS
wcsname : string
WCSNAME ID for generated WCS
refimage : string
filename of image with source WCS used as basis for output WCS
undistort : bool
Create an undistorted WCS?
applycoeffs : bool
Apply coefficients from refimage to generate undistorted WCS?
coeffsfile : string
If specified, read distortion coeffs from separate file
"""
# Insure that the User WCS parameters have values for all the parameters,
# even if that value is 'None'
user_wcs_pars = convert_user_pars(wcspars)
userwcs = wcspars['userwcs']
"""
Use cases to document the logic required to interpret the parameters
WCS generation based on refimage/userwcs parameters
-------------------------------------------------------------
refimage == None, userwcs == False:
*NO WCS specified*
=> print a WARNING message and return without doing anything
refimage == None, userwcs == True:
=> Create WCS without a distortion model entirely from user parameters*
refimage != None, userwcs == False:
=> No user WCS parameters specified
=> Simply use refimage WCS as specified
refimage != None, userwcs == True:
=> Update refimage WCS with user specified values*
Apply distortion and generate final headerlet using processed WCS
-----------------------------------------------------------------
refimage == None, userwcs == True:
*Output WCS generated entirely from user supplied parameters*
Case 1: applycoeffs == False, undistort == True/False (ignored)
=> no distortion model to interpret
=> generate undistorted headerlet with no distortion model
Case 2: applycoeffs == True/False, undistort == True
=> ignore any user specified distortion model
=> generate undistorted headerlet with no distortion model
Case 3: applycoeffs == True, undistort == False
=> WCS from scratch combined with distortion model from another image
=> generate headerlet with distortion model
refimage != None, userwcs == True/False:
*Output WCS generated from reference image possibly modified by user parameters*
Case 4: applycoeffs == False, undistort == True
=> If refimage has distortion, remove it
=> generate undistorted headerlet with no distortion model
Case 5: applycoeffs == False, undistort == False
=> Leave refimage distortion model (if any) unmodified
=> generate a headerlet using same distortion model (if any) as refimage
Case 6: applycoeffs == True, undistort == False
=> Update refimage with distortion model with user-specified model
=> generate a headerlet with a distortion model
Case 7: applycoeffs == True, undistort == True
=> ignore user specified distortion model and undistort WCS
=> generate a headerlet without a distortion model
"""
### Build WCS from refimage and/or user pars
if util.is_blank(refimage) and not userwcs:
print('WARNING: No WCS specified... No WCS created!')
return
customwcs = None
if util.is_blank(refimage) and userwcs:
# create HSTWCS object from user parameters
complete_wcs = True
for key in user_wcs_pars:
if util.is_blank(user_wcs_pars[key]):
complete_wcs = False
break
if complete_wcs:
customwcs = wcs_functions.build_hstwcs(user_wcs_pars['crval1'],user_wcs_pars['crval2'],
user_wcs_pars['crpix1'],user_wcs_pars['crpix2'],
user_wcs_pars['naxis1'],user_wcs_pars['naxis2'],
user_wcs_pars['pscale'],user_wcs_pars['orientat'])
else:
print('WARNING: Not enough WCS information provided by user!')
raise ValueError
if not util.is_blank(refimage):
refwcs = stwcs.wcsutil.HSTWCS(refimage)
else:
refwcs = customwcs
### Apply distortion model (if any) to update WCS
if applycoeffs and not util.is_blank(coeffsfile):
if not util.is_blank(refimage):
replace_model(refwcs, coeffsfile)
else:
if not undistort:
add_model(refwcs,coeffsfile)
# Only working with custom WCS from user, no distortion
# so apply model to WCS, including modifying the CD matrix
apply_model(refwcs)
### Create undistorted WCS, if requested
if undistort:
outwcs = undistortWCS(refwcs)
else:
outwcs = refwcs
if userwcs:
# replace (some/all?) WCS values from refimage with user WCS values
# by running 'updatewcs' functions on input WCS
outwcs = mergewcs(outwcs,customwcs,user_wcs_pars)
### Create the final headerlet and write it out, if specified
if not util.is_blank(refimage):
template = refimage
elif not util.is_blank(coeffsfile):
template = coeffsfile
else:
template = None
# create default WCSNAME if None was given
wcsname = create_WCSname(wcsname)
print('Creating final headerlet with name ',wcsname,' using template ',template)
outhdr = generate_headerlet(outwcs,template,wcsname,outname=outname)
# synchronize this new WCS with the rest of the chips in the image
for ext in outhdr:
if 'extname' in ext.header and ext.header['extname'] == 'SIPWCS':
ext_wcs = wcsutil.HSTWCS(ext)
stwcs.updatewcs.makewcs.MakeWCS.updateWCS(ext_wcs,outwcs)
return outwcs
|
Core functionality to create a WCS instance from a reference image WCS,
user supplied parameters or user adjusted reference WCS.
The distortion information can either be read in as part of the reference
image WCS or given in 'coeffsfile'.
Parameters
----------
outname : string
filename of output WCS
wcsname : string
WCSNAME ID for generated WCS
refimage : string
filename of image with source WCS used as basis for output WCS
undistort : bool
Create an undistorted WCS?
applycoeffs : bool
Apply coefficients from refimage to generate undistorted WCS?
coeffsfile : string
If specified, read distortion coeffs from separate file
|
entailment
|
def create_WCSname(wcsname):
""" Verify that a valid WCSNAME has been provided, and if not, create a
default WCSNAME based on current date.
"""
if util.is_blank(wcsname):
ptime = fileutil.getDate()
wcsname = "User_"+ptime
return wcsname
|
Verify that a valid WCSNAME has been provided, and if not, create a
default WCSNAME based on current date.
|
entailment
|
def convert_user_pars(wcspars):
""" Convert the parameters provided by the configObj into the corresponding
parameters from an HSTWCS object
"""
default_pars = default_user_wcs.copy()
for kw in user_hstwcs_pars:
default_pars[user_hstwcs_pars[kw]] = wcspars[kw]
return default_pars
|
Convert the parameters provided by the configObj into the corresponding
parameters from an HSTWCS object
|
entailment
|
def mergewcs(outwcs, customwcs, wcspars):
""" Merge the WCS keywords from user specified values into a full HSTWCS object
This function will essentially follow the same algorithm as used by
updatehdr only it will use direct calls to updatewcs.Makewcs methods
instead of using 'updatewcs' as a whole
"""
# start by working on a copy of the refwcs
if outwcs.sip is not None:
wcslin = stwcs.distortion.utils.undistortWCS(outwcs)
outwcs.wcs.cd = wcslin.wcs.cd
outwcs.wcs.set()
outwcs.setOrient()
outwcs.setPscale()
else:
wcslin = outwcs
if customwcs is None:
# update valid pars from wcspars
if wcspars['crval1'] is not None:
outwcs.wcs.crval = np.array([wcspars['crval1'],wcspars['crval2']])
if wcspars['crpix1'] is not None:
outwcs.wcs.crpix = np.array([wcspars['crpix1'],wcspars['crpix2']])
if wcspars['naxis1'] is not None:
outwcs.pixel_shape = (wcspars['naxis1'], wcspars['naxis2'])
outwcs.wcs.crpix = np.array(outwcs.pixel_shape) / 2.0
pscale = wcspars['pscale']
orient = wcspars['orientat']
if pscale is not None or orient is not None:
if pscale is None: pscale = wcslin.pscale
if orient is None: orient = wcslin.orientat
pix_ratio = pscale/wcslin.pscale
delta_rot = wcslin.orientat - orient
delta_rot_mat = fileutil.buildRotMatrix(delta_rot)
outwcs.wcs.cd = np.dot(outwcs.wcs.cd,delta_rot_mat)*pix_ratio
# apply model to new linear CD matrix
apply_model(outwcs)
else:
# A new fully described WCS was provided in customwcs
outwcs.wcs.cd = customwcs.wcs.cd
outwcs.wcs.crval = customwcs.wcs.crval
outwcs.wcs.crpix = customwcs.wcs.crpix
outwcs.pixel_shape = customwcs.pixel_shape
return outwcs
|
Merge the WCS keywords from user specified values into a full HSTWCS object
This function will essentially follow the same algorithm as used by
updatehdr only it will use direct calls to updatewcs.Makewcs methods
instead of using 'updatewcs' as a whole
|
entailment
|
def add_model(refwcs, newcoeffs):
""" Add (new?) distortion model to existing HSTWCS object
"""
# Update refwcs with distortion model
for kw in model_attrs:
if newcoeffs.__dict__[key] is not None:
refwcs.__dict__[key] = newcoeffs.__dict__[key]
|
Add (new?) distortion model to existing HSTWCS object
|
entailment
|
def apply_model(refwcs):
""" Apply distortion model to WCS, including modifying
CD with linear distortion terms
"""
# apply distortion model to CD matrix
if 'ocx10' in refwcs.__dict__ and refwcs.ocx10 is not None:
linmat = np.array([[refwcs.ocx11,refwcs.ocx10],[refwcs.ocy11,refwcs.ocy10]])/refwcs.idcscale
refwcs.wcs.cd = np.dot(refwcs.wcs.cd,linmat)
refwcs.wcs.set()
refwcs.setOrient()
refwcs.setPscale()
|
Apply distortion model to WCS, including modifying
CD with linear distortion terms
|
entailment
|
def replace_model(refwcs, newcoeffs):
""" Replace the distortion model in a current WCS with a new model
Start by creating linear WCS, then run
"""
print('WARNING:')
print(' Replacing existing distortion model with one')
print(' not necessarily matched to the observation!')
# create linear version of WCS to be updated by new model
wcslin = stwcs.distortion.utils.undistortWCS(refwcs)
outwcs = refwcs.deepcopy()
outwcs.wcs.cd = wcslin.wcs.cd
outwcs.wcs.set()
outwcs.setOrient()
outwcs.setPscale()
# add new model to updated WCS object
add_model(outwcs,newcoeffs)
# Update CD matrix with new model
apply_model(outwcs)
# replace original input WCS with newly updated WCS
refwcs = outwcs.deepcopy()
|
Replace the distortion model in a current WCS with a new model
Start by creating linear WCS, then run
|
entailment
|
def undistortWCS(refwcs):
""" Generate an undistorted HSTWCS from an HSTWCS object with a distortion model
"""
wcslin = stwcs.distortion.utils.output_wcs([refwcs])
outwcs = stwcs.wcsutil.HSTWCS()
outwcs.wcs = wcslin.wcs
outwcs.wcs.set()
outwcs.setPscale()
outwcs.setOrient()
outwcs.sip = None
# Update instrument specific keywords
outwcs.inst_kw = refwcs.inst_kw
for kw in refwcs.inst_kw:
outwcs.__dict__[kw] = refwcs.__dict__[kw]
outwcs.pixel_shape = wcslin.pixel_shape
return outwcs
|
Generate an undistorted HSTWCS from an HSTWCS object with a distortion model
|
entailment
|
def generate_headerlet(outwcs,template,wcsname,outname=None):
""" Create a headerlet based on the updated HSTWCS object
This function uses 'template' as the basis for the headerlet.
This file can either be the original wcspars['refimage'] or
wcspars['coeffsfile'], in this order of preference.
If 'template' is None, then a simple Headerlet will be
generated with a single SIPWCS extension and no distortion
"""
# Create header object from HSTWCS object
siphdr = True
if outwcs.sip is None:
siphdr = False
outwcs_hdr = outwcs.wcs2header(sip2hdr=siphdr)
outwcs_hdr['NPIX1'] = outwcs.pixel_shape[0]
outwcs_hdr['NPIX2'] = outwcs.pixel_shape[1]
# create headerlet object in memory; either from a file or from scratch
if template is not None and siphdr:
print('Creating headerlet from template...')
fname,extn = fileutil.parseFilename(template)
extnum = fileutil.parseExtn(extn)
extname = ('sipwcs',extnum[1])
hdrlet = headerlet.createHeaderlet(fname,wcsname)
# update hdrlet with header values from outwcs
for kw in outwcs_hdr.items():
hdrlet[extname].header[kw[0]] = kw[1]
hdrlet[extname].header['WCSNAME'] = wcsname
else:
print('Creating headerlet from scratch...')
hdrlet = fits.HDUList()
hdrlet.append(fits.PrimaryHDU())
siphdr = fits.ImageHDU(header=outwcs_hdr)
siphdr.header['EXTNAME'] = 'SIPWCS'
siphdr.header['WCSNAME'] = wcsname
hdrlet.append(siphdr)
# Write out header to a file as the final product
if outname is not None:
if outname.find('_hdr.fits') < 0:
outname += '_hdr.fits'
if os.path.exists(outname):
print('Overwrite existing file "%s"'%outname)
os.remove(outname)
hdrlet.writeto(outname)
print('Wrote out headerlet :',outname)
|
Create a headerlet based on the updated HSTWCS object
This function uses 'template' as the basis for the headerlet.
This file can either be the original wcspars['refimage'] or
wcspars['coeffsfile'], in this order of preference.
If 'template' is None, then a simple Headerlet will be
generated with a single SIPWCS extension and no distortion
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.