code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
# Sort the points lexicographically (tuples are compared lexicographically). # Remove duplicates to detect the case we have just one unique point. points = sorted(set(points)) # Boring case: no points or a single point, possibly repeated multiple times. if len(points) <= 1: return points # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product. # Returns a positive value, if OAB makes a counter-clockwise turn, # negative for clockwise turn, and zero if the points are collinear. def cross(o, a, b): return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0]) # Build lower hull lower = [] for p in points: while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0: lower.pop() lower.append(p) # Build upper hull upper = [] for p in reversed(points): while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0: upper.pop() upper.append(p) # Concatenation of the lower and upper hulls gives the convex hull. # Last point of each list is omitted because it is repeated at the beginning of the other list. return lower[:-1] + upper
def convex_hull(points)
Computes the convex hull of a set of 2D points. Implements `Andrew's monotone chain algorithm <http://en.wikibooks.org/wiki/Algorithm_Implementation/Geometry/Convex_hull/Monotone_chain>`_. The algorithm has O(n log n) complexity. Credit: `<http://en.wikibooks.org/wiki/Algorithm_Implementation/Geometry/Convex_hull/Monotone_chain>`_ Parameters ---------- points : list of tuples An iterable sequence of (x, y) pairs representing the points. Returns ------- Output : list A list of vertices of the convex hull in counter-clockwise order, starting from the vertex with the lexicographically smallest coordinates.
1.721629
1.726715
0.997054
print("Computing initial guess for X and Y shifts...") # create ZP matrix zpmat = _xy_2dhist(imgxy, refxy, r=searchrad) nonzeros = np.count_nonzero(zpmat) if nonzeros == 0: # no matches within search radius. Return (0, 0): print("WARNING: No matches found within a search radius of {:g} " "pixels.".format(searchrad)) return 0.0, 0.0, 0, 0, zpmat, False elif nonzeros == 1: # only one non-zero bin: yp, xp = np.unravel_index(np.argmax(zpmat), zpmat.shape) maxval = int(np.ceil(zpmat[yp, xp])) xp -= searchrad yp -= searchrad print("Found initial X and Y shifts of {:.4g}, {:.4g} " "based on a single non-zero bin and {} matches" .format(xp, yp, maxval)) return xp, yp, maxval, maxval, zpmat, True (xp, yp), fit_status, fit_sl = _find_peak(zpmat, peak_fit_box=5, mask=zpmat > 0) if fit_status.startswith('ERROR'): print("WARNING: No valid shift found within a search radius of {:g} " "pixels.".format(searchrad)) maxval = int(np.ceil(zpmat.max())) return 0.0, 0.0, maxval, maxval, zpmat, False xp -= searchrad yp -= searchrad if fit_status == 'WARNING:EDGE': print( "WARNING: Found peak in the 2D histogram lies at the edge of " "the histogram. Try increasing 'searchrad' for improved results." ) # Attempt to estimate "significance of detection": maxval = zpmat.max() zpmat_mask = (zpmat > 0) & (zpmat < maxval) if np.any(zpmat_mask): bkg = zpmat[zpmat_mask].mean() sig = maxval / np.sqrt(bkg) flux = int(zpmat[fit_sl].sum()) print("Found initial X and Y shifts of {:.4g}, {:.4g} " "with significance of {:.4g} and {:d} matches" .format(xp, yp, sig, flux)) return xp, yp, int(np.ceil(maxval)), flux, zpmat, True
def _estimate_2dhist_shift(imgxy, refxy, searchrad=3.0)
Create a 2D matrix-histogram which contains the delta between each XY position and each UV position. Then estimate initial offset between catalogs.
3.388013
3.412613
0.992791
if self._im.closed: if not self._dq.closed: self._dq.release() assert(self._dq.closed) fi = FileExtMaskInfo(clobber=False, doNotOpenDQ=not openDQ, im_fmode=self.open_mode) fi.image = self.name self._im = fi.image fi.append_ext(spu.get_ext_list(self._im, extname='SCI')) fi.finalize() self._im = fi.image self._dq = fi.DQimage self._imext = fi.fext self._dqext = fi.dqext
def openFile(self, openDQ=False)
Open file and set up filehandle for image file
7.777596
7.378477
1.054092
wcslist = [] for chip in self.chip_catalogs: wcslist.append(self.chip_catalogs[chip]['wcs']) return wcslist
def get_wcs(self)
Helper method to return a list of all the input WCS objects associated with this image.
4.085604
3.755383
1.087932
self.all_radec = None self.all_radec_orig = None ralist = [] declist = [] fluxlist = [] idlist = [] for scichip in self.chip_catalogs: skycat = self.chip_catalogs[scichip]['catalog'].radec xycat = self.chip_catalogs[scichip]['catalog'].xypos if skycat is not None: ralist.append(skycat[0]) declist.append(skycat[1]) if xycat is not None and len(xycat) > 2: fluxlist.append(xycat[2]) idlist.append(xycat[3]) elif len(skycat) > 2: fluxlist.append(skycat[2]) idlist.append(skycat[3]) else: fluxlist.append([999.0]*len(skycat[0])) idlist.append(np.arange(len(skycat[0]))) self.all_radec = [np.concatenate(ralist),np.concatenate(declist), np.concatenate(fluxlist),np.concatenate(idlist)] self.all_radec_orig = copy.deepcopy(self.all_radec)
def buildSkyCatalog(self)
Convert sky catalog for all chips into a single catalog for the entire field-of-view of this image.
2.350483
2.308245
1.018299
self.default_refWCS = None if self.use_wcs: wcslist = [] for scichip in self.chip_catalogs: wcslist.append(self.chip_catalogs[scichip]['wcs']) self.default_refWCS = utils.output_wcs(wcslist)
def buildDefaultRefWCS(self)
Generate a default reference WCS for this image.
4.41727
4.31918
1.02271
if not isinstance(ref_wcs, pywcs.WCS): print(textutil.textbox('Reference WCS not a valid HSTWCS object'), file=sys.stderr) raise ValueError # Need to concatenate catalogs from each input if self.outxy is None or force: outxy = ref_wcs.wcs_world2pix(self.all_radec[0],self.all_radec[1],self.origin) # convert outxy list to a Nx2 array self.outxy = np.column_stack([outxy[0][:,np.newaxis],outxy[1][:,np.newaxis]]) if self.pars['writecat']: catname = self.rootname+"_refxy_catalog.coo" self.write_outxy(catname) self.catalog_names['ref_xy'] = catname
def transformToRef(self,ref_wcs,force=False)
Transform sky coords from ALL chips into X,Y coords in reference WCS.
5.148005
5.113509
1.006746
if len(self.all_radec_orig[2].nonzero()[0]) == 0: warn_str = "Source catalog NOT trimmed by flux/mag. No fluxes read in for sources!" print('\nWARNING: ',warn_str,'\n') log.warning(warn_str) return clip_catalog = False clip_prefix = '' for k in sortKeys: for p in self.pars.keys(): pindx = p.find(k) if pindx >= 0 and self.pars[p] is not None: log.info('found a match for %s to %s'%( str(p),str(self.pars[p]))) # find prefix (if any) clip_prefix = p[:pindx].strip() #Only clip the catalog if one of the keys is specified # in the catalog parameters, not the source finding pars if clip_prefix and 'units' not in p: clip_catalog = True break if clip_catalog: break all_radec = None if clip_catalog: # Start by clipping by any specified flux range if self.pars[clip_prefix+'maxflux'] is not None or \ self.pars[clip_prefix+'minflux'] is not None: clip_catalog = True if self.pars[clip_prefix+'minflux'] is not None: fluxmin = self.pars[clip_prefix+'minflux'] else: fluxmin = self.all_radec[2].min() if self.pars[clip_prefix+'maxflux'] is not None: fluxmax = self.pars[clip_prefix+'maxflux'] else: fluxmax = self.all_radec[2].max() # apply flux limit clipping minindx = self.all_radec_orig[2] >= fluxmin maxindx = self.all_radec_orig[2] <= fluxmax flux_indx = np.bitwise_and(minindx,maxindx) all_radec = [] all_radec.append(self.all_radec_orig[0][flux_indx]) all_radec.append(self.all_radec_orig[1][flux_indx]) all_radec.append(self.all_radec_orig[2][flux_indx]) all_radec.append(np.arange(len(self.all_radec_orig[0][flux_indx]))) if clip_prefix+'nbright' in self.pars and \ self.pars[clip_prefix+'nbright'] is not None: clip_catalog = True nbright = self.pars[clip_prefix+'nbright'] # pick out only the brightest 'nbright' sources if self.pars[clip_prefix+'fluxunits'] == 'mag': nbslice = slice(None,nbright) else: nbslice = slice(nbright,None) if all_radec is None: # work on copy of all original data all_radec = copy.deepcopy(self.all_radec_orig) # find indices of brightest nbright_indx = np.argsort(all_radec[2])[nbslice] self.all_radec[0] = all_radec[0][nbright_indx] self.all_radec[1] = all_radec[1][nbright_indx] self.all_radec[2] = all_radec[2][nbright_indx] self.all_radec[3] = np.arange(len(all_radec[0][nbright_indx])) else: if all_radec is not None: self.all_radec = copy.deepcopy(all_radec)
def sortSkyCatalog(self)
Sort and clip the source catalog based on the flux range specified by the user. It keeps a copy of the original full list in order to support iteration.
2.538644
2.496267
1.016976
# Insure filehandle is open and available... self.openFile() pars = kwargs.copy() rms_pars = self.fit['rms_keys'] str_kw = ['descrip','history','author','hdrfile'] for kw in str_kw: if pars[kw] == '': pars[kw] = None # Call function with properly interpreted input parameters # Syntax: write_headerlet(filename, hdrname, output, sciext='SCI', # wcsname=None, wcskey=None, destim=None, # sipname=None, npolfile=None, d2imfile=None, # author=None, descrip=None, history=None, # rms_ra=None, rms_dec=None, nmatch=None, catalog=None, # attach=True, clobber=False): headerlet.write_headerlet(self._im.hdu, pars['hdrname'], output=pars['hdrfile'], wcsname=None, wcskey=self.next_key, destim=None, sipname=None, npolfile=None, d2imfile=None, author=pars['author'], descrip=pars['descrip'], history=pars['history'], nmatch=rms_pars['NMATCH'],catalog=pars['catalog'], attach=pars['attach'], clobber=pars['clobber'] )
def writeHeaderlet(self,**kwargs)
Write and/or attach a headerlet based on update to PRIMARY WCS
5.548428
5.448035
1.018427
if self.all_radec is None: return ralist = self.all_radec[0]#.tolist() declist = self.all_radec[1]#.tolist() f = open(filename,'w') f.write("#Sky positions for: "+self.name+'\n') f.write("#RA Dec\n") f.write("#(deg) (deg)\n") for i in range(len(ralist)): f.write('%0.12f %0.12f\n'%(ralist[i],declist[i])) f.close()
def write_skycatalog(self,filename)
Write out the all_radec catalog for this image to a file.
2.504605
2.292807
1.092375
catstr = self.name+' ' if 'input_xy' in self.catalog_names: for xycat in self.catalog_names['input_xy']: catstr += ' '+xycat return catstr + '\n'
def get_xy_catnames(self)
Return a string with the names of input_xy catalog names
5.89003
4.060413
1.450599
f = open(filename,'w') f.write("#Pixel positions for: "+self.name+'\n') f.write("#X Y\n") f.write("#(pix) (pix)\n") for i in range(self.all_radec[0].shape[0]): f.write('%f %f\n'%(self.outxy[i,0],self.outxy[i,1])) f.close()
def write_outxy(self,filename)
Write out the output(transformed) XY catalog for this image to a file.
3.350318
3.191676
1.049705
if self.fit is not None: rowstr = '%s %0.6f %0.6f %0.6f %0.6f %0.6f %0.6f\n'%( self.name,self.fit['offset'][0],self.fit['offset'][1], self.fit['rot'],self.fit['scale'][0], self.fit['rms'][0],self.fit['rms'][1]) else: rowstr = None return rowstr
def get_shiftfile_row(self)
Return the information for a shiftfile for this image to provide compatability with the IRAF-based MultiDrizzle.
3.167439
3.119689
1.015306
#TODO: add cleaning of mask files, *if* created ... for f in self.catalog_names: if 'match' in f: if os.path.exists(self.catalog_names[f]): log.info('Deleting intermediate match file: %s'% self.catalog_names[f]) os.remove(self.catalog_names[f]) else: for extn in f: if os.path.exists(extn): log.info('Deleting intermediate catalog: %d'%extn) os.remove(extn)
def clean(self)
Remove intermediate files created.
4.802206
4.357256
1.102117
f = open(filename,'w') f.write("#Sky positions for cumulative reference catalog. Initial catalog from: "+self.name+'\n') header1 = "#RA Dec" header2 = "#(deg) (deg)" if show_flux: header1 += " Flux" header2 += " (counts)" if show_id: header1 += " ID Origin" header2 += "" header1 += "\n" header2 += "\n" f.write(header1) f.write(header2) show_details = show_flux or show_id flux_end_char = '' if show_details and show_flux: if show_id: flux_end_char = '\t' for i in range(self.all_radec[0].shape[0]): src_line = "{:.7f} {:.7f}" \ .format(self.all_radec[0][i], self.all_radec[1][i]) if show_details: #src_line += " #" if show_flux: #src_line += " flux: {:.5g}{:s}" \ #.format(self.xy_catalog[2][i], flux_end_char) src_line += " {:.5g}".format(self.xy_catalog[2][i]) if show_id: #src_line += " ID: {:d}\torigin: '{:s}'" \ #.format(self.xy_catalog[3][i], self.xy_catalog[-1][i]) src_line += " {:d} {:s}".format(self.xy_catalog[3][i], self.xy_catalog[-1][i]) f.write(src_line + '\n') f.close()
def write_skycatalog(self, filename, show_flux=False, show_id=False)
Write out the all_radec catalog for this image to a file.
2.85332
2.797926
1.019798
if 'refxyunits' in self.pars and self.pars['refxyunits'] == 'pixels': log.info('Creating RA/Dec positions for reference sources...') self.outxy = np.column_stack([self.all_radec[0][:,np.newaxis],self.all_radec[1][:,np.newaxis]]) skypos = self.wcs.wcs_pix2world(self.all_radec[0],self.all_radec[1],self.origin) self.all_radec[0] = skypos[0] self.all_radec[1] = skypos[1] else: log.info('Converting RA/Dec positions of reference sources from "%s" to '%self.name+ 'X,Y positions in reference WCS...') self.refWCS = self.wcs outxy = self.wcs.wcs_world2pix(self.all_radec[0],self.all_radec[1],self.origin) # convert outxy list to a Nx2 array self.outxy = np.column_stack([outxy[0][:,np.newaxis],outxy[1][:,np.newaxis]])
def transformToRef(self)
Transform reference catalog sky positions (self.all_radec) to reference tangent plane (self.wcs) to create output X,Y positions.
2.993905
2.571261
1.164372
if not util.is_blank(self.catalog.catname) and os.path.exists(self.catalog.catname): os.remove(self.catalog.catname)
def clean(self)
Remove intermediate files created
4.926157
4.757232
1.035509
if self._image is None: return # mcara: I think the code below is not necessary but in order to # preserve the same functionality as the code removed below, # I make an empty copy of the image object: empty_image = fits.HDUList() for u in self._image: empty_image.append(u.__class__(data=None, header=None)) # mcara: END unnecessary code self._image.close() #calls fits.close() self._image = empty_image
def close(self)
Close the object nicely and release all the data arrays from memory YOU CANT GET IT BACK, the pointers and data are gone so use the getData method to get the data array returned for future use. You can use putData to reattach a new data array to the imageObject.
8.045906
7.680976
1.047511
clean_files = ['blotImage','crmaskImage','finalMask', 'staticMask','singleDrizMask','outSky', 'outSContext','outSWeight','outSingle', 'outMedian','dqmask','tmpmask', 'skyMatchMask'] log.info('Removing intermediate files for %s' % self._filename) # We need to remove the combined products first; namely, median image util.removeFileSafely(self.outputNames['outMedian']) # Now remove chip-specific intermediate files, if any were created. for chip in self.returnAllChips(extname='SCI'): for fname in clean_files: if fname in chip.outputNames: util.removeFileSafely(chip.outputNames[fname])
def clean(self)
Deletes intermediate products generated for this imageObject.
13.556894
11.73851
1.154908
if exten.lower().find('sci') > -1: # For SCI extensions, the current file will have the data fname = self._filename else: # otherwise, the data being requested may need to come from a # separate file, as is the case with WFPC2 DQ data. # # convert exten to 'sci',extver to get the DQ info for that chip extn = exten.split(',') sci_chip = self._image[self.scienceExt,int(extn[1])] fname = sci_chip.dqfile extnum = self._interpretExten(exten) if self._image[extnum].data is None: if os.path.exists(fname): _image=fileutil.openImage(fname, clobber=False, memmap=False) _data=fileutil.getExtn(_image, extn=exten).data _image.close() del _image self._image[extnum].data = _data else: _data = None else: _data = self._image[extnum].data return _data
def getData(self,exten=None)
Return just the data array from the specified extension fileutil is used instead of fits to account for non- FITS input images. openImage returns a fits object.
6.102051
5.806983
1.050813
_image=fileutil.openImage(self._filename, clobber=False, memmap=False) _header=fileutil.getExtn(_image,extn=exten).header _image.close() del _image return _header
def getHeader(self,exten=None)
Return just the specified header extension fileutil is used instead of fits to account for non-FITS input images. openImage returns a fits object.
6.353606
4.681429
1.357194
_extnum=self._interpretExten(exten) fimg = fileutil.openImage(self._filename, mode='update', memmap=False) fimg[_extnum].data = data fimg[_extnum].header = self._image[_extnum].header fimg.close()
def updateData(self,exten,data)
Write out updated data and header to the original input file for this object.
7.362586
5.908292
1.246145
if data is None: log.warning("No data supplied") else: extnum = _interpretExten(exten) ext = self._image[extnum] # update the bitpix to the current datatype, this aint fancy and # ignores bscale ext.header['BITPIX'] = _NUMPY_TO_IRAF_DTYPES[data.dtype.name] ext.data = data
def putData(self,data=None,exten=None)
Now that we are removing the data from the object to save memory, we need something that cleanly puts the data array back into the object so that we can write out everything together using something like fits.writeto....this method is an attempt to make sure that when you add an array back to the .data section of the hdu it still matches the header information for that section ( ie. update the bitpix to reflect the datatype of the array you are adding). The other header stuff is up to you to verify. Data should be the data array exten is where you want to stick it, either extension number or a string like 'sci,1'
9.583224
7.146739
1.340923
extensions = self._findExtnames(extname=extname,exclude=exclude) for i in range(1,self._nextend+1,1): if hasattr(self._image[i],'_extension') and \ "IMAGE" in self._image[i]._extension: extver = self._image[i].header['extver'] if (self._image[i].extname in extensions) and self._image[self.scienceExt,extver].group_member: self._image[i].data=self.getData(self._image[i].extname + ','+str(self._image[i].extver))
def getAllData(self,extname=None,exclude=None)
This function is meant to make it easier to attach ALL the data extensions of the image object so that we can write out copies of the original image nicer. If no extname is given, the it retrieves all data from the original file and attaches it. Otherwise, give the name of the extensions you want and all of those will be restored. Ok, I added another option. If you want to get all the data extensions EXCEPT a particular one, leave extname=NONE and set exclude=EXTNAME. This is helpfull cause you might not know all the extnames the image has, this will find out and exclude the one you do not want overwritten.
5.914876
5.912988
1.000319
extensions = self._findExtnames(extname=extname,exclude=exclude) chiplist = [] for i in range(1,self._nextend+1,1): if 'extver' in self._image[i].header: extver = self._image[i].header['extver'] else: extver = 1 if hasattr(self._image[i],'_extension') and \ "IMAGE" in self._image[i]._extension: if (self._image[i].extname in extensions) and self._image[self.scienceExt,extver].group_member: chiplist.append(self._image[i]) return chiplist
def returnAllChips(self,extname=None,exclude=None)
Returns a list containing all the chips which match the extname given minus those specified for exclusion (if any).
4.493784
4.477063
1.003735
#make a list of the available extension names for the object extensions=[] if extname is not None: if not isinstance(extname,list): extname=[extname] for extn in extname: extensions.append(extn.upper()) else: #restore all the extensions data from the original file, be careful here #if you've altered data in memory you want to keep! for i in range(1,self._nextend+1,1): if hasattr(self._image[i],'_extension') and \ "IMAGE" in self._image[i]._extension: if self._image[i].extname.upper() not in extensions: extensions.append(self._image[i].extname) #remove this extension from the list if exclude is not None: exclude.upper() if exclude in extensions: newExt=[] for item in extensions: if item != exclude: newExt.append(item) extensions=newExt del newExt return extensions
def _findExtnames(self, extname=None, exclude=None)
This method builds a list of all extensions which have 'EXTNAME'==extname and do not include any extensions with 'EXTNAME'==exclude, if any are specified for exclusion at all.
4.43012
4.380999
1.011212
extnum = None extname = extname.upper() if not self._isSimpleFits: for ext in self._image: if (hasattr(ext,'_extension') and 'IMAGE' in ext._extension and (ext.extname == extname) and (ext.extver == extver)): extnum = ext.extnum else: log.info("Image is simple fits") return extnum
def findExtNum(self, extname=None, extver=1)
Find the extension number of the give extname and extver.
5.006927
4.767971
1.050117
extname=self._image[self.scienceExt,chip].header["EXTNAME"].lower() extver=self._image[self.scienceExt,chip].header["EXTVER"] expname = self._rootname # record extension-based name to reflect what extension a mask file corresponds to self._image[self.scienceExt,chip].rootname=expname + "_" + extname + str(extver) self._image[self.scienceExt,chip].sciname=self._filename + "[" + extname +","+str(extver)+"]" self._image[self.scienceExt,chip].dqrootname=self._rootname + "_" + extname + str(extver) # Needed to keep EXPNAMEs associated properly (1 EXPNAME for all chips) self._image[self.scienceExt,chip]._expname=expname self._image[self.scienceExt,chip]._chip =chip
def _assignRootname(self, chip)
Assign a unique rootname for the image based in the expname.
6.005898
5.54127
1.083849
# Define FITS output filenames for intermediate products # Build names based on final DRIZZLE output name # where 'output' normally would have been created # by 'process_input()' # outFinal = rootname+suffix+'.fits' outSci = rootname+suffix+'_sci.fits' outWeight = rootname+suffix+'_wht.fits' outContext = rootname+suffix+'_ctx.fits' outMedian = rootname+'_med.fits' # Build names based on input name origFilename = self._filename.replace('.fits','_OrIg.fits') outSky = rootname + '_sky.fits' outSingle = rootname+'_single_sci.fits' outSWeight = rootname+'_single_wht.fits' crCorImage = rootname+'_crclean.fits' # Build outputNames dictionary fnames={ 'origFilename': origFilename, 'outFinal': outFinal, 'outMedian': outMedian, 'outSci': outSci, 'outWeight': outWeight, 'outContext': outContext, 'outSingle': outSingle, 'outSWeight': outSWeight, 'outSContext': None, 'outSky': outSky, 'crcorImage': crCorImage, 'ivmFile': None } return fnames
def _setOutputNames(self,rootname,suffix='_drz')
Define the default output filenames for drizzle products, these are based on the original rootname of the image filename should be just 1 filename, so call this in a loop for chip names contained inside a file.
4.668521
4.611894
1.012279
self.virtualOutputs = {} for product in self.outputNames: self.virtualOutputs[product] = None
def _initVirtualOutputs(self)
Sets up the structure to hold all the output data arrays for this image in memory.
5.519304
4.988256
1.10646
if not self.inmemory: return for outname in outdict: self.virtualOutputs[outname] = outdict[outname]
def saveVirtualOutputs(self,outdict)
Assign in-memory versions of generated products for this ``imageObject`` based on dictionary 'outdict'.
4.079978
3.711721
1.099215
val = self.outputNames[name] if self.inmemory: # if inmemory was turned on... # return virtualOutput object saved with that name val = self.virtualOutputs[val] return val
def getOutputName(self,name)
Return the name of the file or PyFITS object associated with that name, depending on the setting of self.inmemory.
14.749196
9.245404
1.5953
outputvals = self.outputValues outputvals['output'] = output_wcs.outputNames['outFinal'] outputvals['outnx'], outputvals['outny'] = output_wcs.wcs.pixel_shape outputvals['texptime'] = output_wcs._exptime outputvals['texpstart'] = output_wcs._expstart outputvals['texpend'] = output_wcs._expend outputvals['nimages'] = output_wcs.nimages outputvals['scale'] = output_wcs.wcs.pscale #/ self._image[self.scienceExt,1].wcs.pscale outputvals['exptime'] = self._exptime outnames = self.outputNames outnames['outMedian'] = output_wcs.outputNames['outMedian'] outnames['outFinal'] = output_wcs.outputNames['outFinal'] outnames['outSci'] = output_wcs.outputNames['outSci'] outnames['outWeight'] = output_wcs.outputNames['outWeight'] outnames['outContext'] = output_wcs.outputNames['outContext']
def updateOutputValues(self,output_wcs)
Copy info from output WCSObject into outputnames for each chip for use in creating outputimage object.
3.674964
3.502348
1.049286
self.createContext = contextpar if not contextpar: log.info('No context image will be created for %s' % self._filename) self.outputNames['outContext'] = None
def updateContextImage(self, contextpar)
Reset the name of the context image to `None` if parameter ``context`` is `False`.
10.630541
9.36223
1.135471
dqfile = None dq_suffix=None if(self.maskExt is not None): for hdu in self._image: # Look for DQ extension in input file if 'extname' in hdu.header and hdu.header['extname'].lower() == self.maskExt.lower(): dqfile = self._filename dq_suffix=self.maskExt break return dqfile,dq_suffix
def find_DQ_extension(self)
Return the suffix for the data quality extension and the name of the file which that DQ extension should be read from.
4.072658
3.710251
1.097677
kwlist = [] for chip in range(1,self._numchips+1,1): sci_chip = self._image[self.scienceExt,chip] if sci_chip.group_member: kwlist.append(sci_chip.__dict__[kw]) return kwlist
def getKeywordList(self, kw)
Return lists of all attribute values for all active chips in the ``imageObject``.
7.852935
6.811547
1.152886
sci_chip = self._image[self.scienceExt, chip] # The keyword for ACS flat fields in the primary header of the flt # file is pfltfile. This flat file is already in the required # units of electrons. # The use of fileutil.osfn interprets any environment variable, such as # jref$, used in the specification of the reference filename filename = fileutil.osfn(self._image["PRIMARY"].header[self.flatkey]) hdulist = None try: hdulist = fileutil.openImage(filename, mode='readonly', memmap=False) data = hdulist[(self.scienceExt, chip)].data if data.shape[0] != sci_chip.image_shape[0]: ltv2 = int(np.round(sci_chip.ltv2)) else: ltv2 = 0 size2 = sci_chip.image_shape[0] + ltv2 if data.shape[1] != sci_chip.image_shape[1]: ltv1 = int(np.round(sci_chip.ltv1)) else: ltv1 = 0 size1 = sci_chip.image_shape[1] + ltv1 flat = data[ltv2:size2, ltv1:size1] except FileNotFoundError: flat = np.ones(sci_chip.image_shape, dtype=sci_chip.image_dtype) log.warning("Cannot find flat field file '{}'".format(filename)) log.warning("Treating flatfield as a constant value of '1'.") finally: if hdulist is not None: hdulist.close() return flat
def getflat(self, chip)
Method for retrieving a detector's flat field. Returns ------- flat: array This method will return an array the same shape as the image in **units of electrons**.
4.410247
4.325498
1.019593
sci_chip = self._image[self.scienceExt,chip] return np.ones(sci_chip.image_shape,dtype=sci_chip.image_dtype) * sci_chip._rdnoise
def getReadNoiseImage(self, chip)
Notes ===== Method for returning the readnoise image of a detector (in electrons). The method will return an array of the same shape as the image. :units: electrons
8.701744
8.850444
0.983199
sci_chip = self._image[self.scienceExt,chip] if sci_chip._wtscl_par == 'expsq': wtscl = sci_chip._exptime*sci_chip._exptime else: wtscl = sci_chip._exptime return np.ones(sci_chip.image_shape,dtype=sci_chip.image_dtype)*wtscl
def getexptimeimg(self,chip)
Notes ===== Return an array representing the exposure time per pixel for the detector. This method will be overloaded for IR detectors which have their own EXP arrays, namely, WFC3/IR and NICMOS images. :units: None Returns ======= exptimeimg : numpy array The method will return an array of the same shape as the image.
6.676717
7.577618
0.88111
sci_chip = self._image[self.scienceExt,chip] return np.ones(sci_chip.image_shape,dtype=sci_chip.image_dtype)*sci_chip.darkcurrent
def getdarkimg(self,chip)
Notes ===== Return an array representing the dark image for the detector. The method will return an array of the same shape as the image. :units: electrons
11.054105
10.223405
1.081255
sci_chip = self._image[self.scienceExt,chip] return np.ones(sci_chip.image_shape,dtype=sci_chip.image_dtype)*sci_chip.subtractedSky
def getskyimg(self,chip)
Notes ===== Return an array representing the sky image for the detector. The value of the sky is what would actually be subtracted from the exposure by the skysub step. :units: electrons
10.555392
10.198093
1.035036
if section is None: numext = 0 section = [] for hdu in self._image: if 'extname' in hdu.header and hdu.header['extname'] == extname: section.append(hdu.header['extver']) else: if not isinstance(section,list): section = [section] return section
def getExtensions(self, extname='SCI', section=None)
Return the list of EXTVER values for extensions with name specified in extname.
2.99817
2.73849
1.094826
count=0 #simple fits image if (self._image['PRIMARY'].header["EXTEND"]): for i,hdu in enumerate(self._image): if i > 0: hduExtname = False if 'EXTNAME' in hdu.header: self._image[i].extnum=i self._image[i].extname=hdu.header["EXTNAME"] hduExtname = True if 'EXTVER' in hdu.header: self._image[i].extver=hdu.header["EXTVER"] else: self._image[i].extver = 1 if ((extname is not None) and \ (hduExtname and (hdu.header["EXTNAME"] == extname))) \ or extname is None: count=count+1 return count
def _countEXT(self,extname="SCI")
Count the number of extensions in the file with the given name (``EXTNAME``).
3.169939
3.291926
0.962943
dqarr = self.getData(exten=self.maskExt+','+str(chip)) dqmask = buildmask.buildMask(dqarr,bits) if write: phdu = fits.PrimaryHDU(data=dqmask,header=self._image[self.maskExt,chip].header) dqmask_name = self._image[self.scienceExt,chip].dqrootname+'_dqmask.fits' log.info('Writing out DQ/weight mask: %s' % dqmask_name) if os.path.exists(dqmask_name): os.remove(dqmask_name) phdu.writeto(dqmask_name) del phdu self._image[self.scienceExt,chip].dqmaskname = dqmask_name # record the name of this mask file that was created for later # removal by the 'clean()' method self._image[self.scienceExt,chip].outputNames['dqmask'] = dqmask_name del dqarr return dqmask
def buildMask(self,chip,bits=0,write=False)
Build masks as specified in the user parameters found in the configObj object. We should overload this function in the instrument specific implementations so that we can add other stuff to the badpixel mask? Like vignetting areas and chip boundries in nicmos which are camera dependent? these are not defined in the DQ masks, but should be masked out to get the best results in multidrizzle.
4.134411
3.976192
1.039792
log.info("Applying EXPTIME weighting to DQ mask for chip %s" % chip) #exparr = self.getexptimeimg(chip) exparr = self._image[self.scienceExt,chip]._exptime expmask = exparr*dqarr return expmask.astype(np.float32)
def buildEXPmask(self, chip, dqarr)
Builds a weight mask from an input DQ array and the exposure time per pixel for this chip.
8.736843
7.391082
1.182079
sci_chip = self._image[self.scienceExt,chip] ivmname = self.outputNames['ivmFile'] if ivmname is not None: log.info("Applying user supplied IVM files for chip %s" % chip) #Parse the input file name to get the extension we are working on extn = "IVM,{}".format(chip) #Open the mask image for updating and the IVM image ivm = fileutil.openImage(ivmname, mode='readonly', memmap=False) ivmfile = fileutil.getExtn(ivm, extn) # Multiply the IVM file by the input mask in place. ivmarr = ivmfile.data * dqarr ivm.close() else: log.info("Automatically creating IVM files for chip %s" % chip) # If no IVM files were provided by the user we will # need to automatically generate them based upon # instrument specific information. flat = self.getflat(chip) RN = self.getReadNoiseImage(chip) darkimg = self.getdarkimg(chip) skyimg = self.getskyimg(chip) #exptime = self.getexptimeimg(chip) #exptime = sci_chip._exptime #ivm = (flat*exptime)**2/(darkimg+(skyimg*flat)+RN**2) ivm = (flat)**2/(darkimg+(skyimg*flat)+RN**2) # Multiply the IVM file by the input mask in place. ivmarr = ivm * dqarr # Update 'wt_scl' parameter to match use of IVM file sci_chip._wtscl = pow(sci_chip._exptime,2)/pow(scale,4) #sci_chip._wtscl = 1.0/pow(scale,4) return ivmarr.astype(np.float32)
def buildIVMmask(self ,chip, dqarr, scale)
Builds a weight mask from an input DQ array and either an IVM array provided by the user or a self-generated IVM array derived from the flat-field reference file associated with the input image.
5.418044
5.166123
1.048764
sci_chip = self._image[self.scienceExt,chip] # Set default value in case of error, or lack of ERR array errmask = dqarr if self.errExt is not None: try: # Attempt to open the ERR image. err = self.getData(exten=self.errExt+','+str(chip)) log.info("Applying ERR weighting to DQ mask for chip %s" % chip) # Multiply the scaled ERR file by the input mask in place. #exptime = self.getexptimeimg(chip) exptime = sci_chip._exptime errmask = (exptime/err)**2 * dqarr # Update 'wt_scl' parameter to match use of IVM file #sci_chip._wtscl = pow(sci_chip._exptime,2)/pow(scale,4) sci_chip._wtscl = 1.0/pow(scale,4) del err except: # We cannot find an 'ERR' extension and the data isn't WFPC2. # Print a generic warning message and continue on with the # final drizzle step. print(textutil.textbox( 'WARNING: No ERR weighting will be applied to the mask ' 'used in the final drizzle step! Weighting will be only ' 'by exposure time.\n\nThe data provided as input does not ' 'contain an ERR extension'), file=sys.stderr) print('\n Continue with final drizzle step...', sys.stderr) else: # If we were unable to find an 'ERR' extension to apply, one # possible reason was that the input was a 'standard' WFPC2 data # file that does not actually contain an error array. Test for # this condition and issue a Warning to the user and continue on to # the final drizzle. print(textutil.textbox( "WARNING: No ERR weighting will be applied to the mask used " "in the final drizzle step! Weighting will be only by " "exposure time.\n\nThe WFPC2 data provided as input does not " "contain ERR arrays. WFPC2 data is not supported by this " "weighting type.\n\nA workaround would be to create inverse " "variance maps and use 'IVM' as the final_wht_type. See the " "HELP file for more details on using inverse variance maps."), file=sys.stderr) print("\n Continue with final drizzle step...", file=sys.stderr) return errmask.astype(np.float32)
def buildERRmask(self,chip,dqarr,scale)
Builds a weight mask from an input DQ array and an ERR array associated with the input image.
6.076285
5.931678
1.024379
for chip in range(1,self._numchips+1,1): sci_chip = self._image[self.scienceExt,chip] ref_chip = image._image[image.scienceExt,chip] # Do we want to keep track of original WCS or not? No reason now... sci_chip.wcs = ref_chip.wcs.copy()
def set_mt_wcs(self, image)
Reset the WCS for this image based on the WCS information from another imageObject.
7.382182
6.517708
1.132635
sci_chip = self._image[self.scienceExt,chip] exptime = 1 #sci_chip._exptime _parval = 'unity' if wtscl_par is not None: if type(wtscl_par) == type(''): if not wtscl_par.isdigit(): # String passed in as value, check for 'exptime' or 'expsq' _wtscl_float = None try: _wtscl_float = float(wtscl_par) except ValueError: _wtscl_float = None if _wtscl_float is not None: _wtscl = _wtscl_float elif wtscl_par == 'expsq': _wtscl = exptime*exptime _parval = 'expsq' else: # Default to the case of 'exptime', if # not explicitly specified as 'expsq' _wtscl = exptime else: # int value passed in as a string, convert to float _wtscl = float(wtscl_par) else: # We have a non-string value passed in... _wtscl = float(wtscl_par) else: # Default case: wt_scl = exptime _wtscl = exptime sci_chip._wtscl_par = _parval sci_chip._wtscl = _wtscl
def set_wtscl(self, chip, wtscl_par)
Sets the value of the wt_scl parameter as needed for drizzling.
3.613101
3.533178
1.022621
if isinstance(value, str) and value in ['None', '', ' ', 'INDEF']: value = None if value and (keyword is not None and keyword.strip() != ''): exceptionMessage = "ERROR: Your input is ambiguous! Please specify either a value or a keyword.\n You specifed both " + str(value) + " and " + str(keyword) raise ValueError(exceptionMessage) elif value is not None and value != '': return self._averageFromList(value) elif keyword is not None and keyword.strip() != '': return self._averageFromHeader(header, keyword) else: return None
def getInstrParameter(self, value, header, keyword)
This method gets a instrument parameter from a pair of task parameters: a value, and a header keyword. The default behavior is: - if the value and header keyword are given, raise an exception. - if the value is given, use it. - if the value is blank and the header keyword is given, use the header keyword. - if both are blank, or if the header keyword is not found, return None.
4.666164
4.598581
1.014696
_list = '' for _kw in keyword.split(','): if _kw in header: _list = _list + ',' + str(header[_kw]) else: return None return self._averageFromList(_list)
def _averageFromHeader(self, header, keyword)
Averages out values taken from header. The keywords where to read values from are passed as a comma-separated list.
4.093936
3.529355
1.159967
_result = 0.0 _count = 0 for _param in param.split(','): if _param != '' and float(_param) != 0.0: _result = _result + float(_param) _count += 1 if _count >= 1: _result = _result / _count return _result
def _averageFromList(self, param)
Averages out values passed as a comma-separated list, disregarding the zero-valued entries.
2.691044
2.533114
1.062346
for chip in range(1,self._numchips+1,1): sci_chip = self._image[self.scienceExt,chip] chip_wcs = sci_chip.wcs.copy() if chip_wcs.sip is None or not undistort or chip_wcs.instrument=='DEFAULT': chip_wcs.sip = None chip_wcs.cpdis1 = None chip_wcs.cpdis2 = None chip_wcs.det2im = None undistort=False # compute the undistorted 'natural' plate scale for this chip wcslin = distortion.utils.output_wcs([chip_wcs],undistort=undistort) sci_chip.wcslin_pscale = wcslin.pscale
def compute_wcslin(self,undistort=True)
Compute the undistorted WCS based solely on the known distortion model information associated with the WCS.
5.77054
5.789257
0.996767
# Determine output value of BUNITS # and make sure it is not specified as 'ergs/cm...' sci_chip = self._image[self.scienceExt,chip] _bunit = None if 'BUNIT' in sci_chip.header and sci_chip.header['BUNIT'].find('ergs') < 0: _bunit = sci_chip.header['BUNIT'] else: _bunit = 'ELECTRONS/S' sci_chip._bunit = _bunit # if '/s' in _bunit.lower(): _in_units = 'cps' else: _in_units = 'counts' sci_chip.in_units = _in_units
def set_units(self,chip)
Define units for this image.
5.720454
5.407879
1.0578
if not blend: newhdrs = blendheaders.getSingleTemplate(fnames[0]) newtab = None else: # apply rules to create final version of headers, plus table newhdrs, newtab = blendheaders.get_blended_headers(inputs=fnames) cleanTemplates(newhdrs[1],newhdrs[2],newhdrs[3]) return newhdrs, newtab
def getTemplates(fnames, blend=True)
Process all headers to produce a set of combined headers that follows the rules defined by each instrument.
8.870178
8.094201
1.095868
wname = wcs.wcs.name if not single: wname = 'DRZWCS' # Update WCS Keywords based on PyDrizzle product's value # since 'drizzle' itself doesn't update that keyword. hdr['WCSNAME'] = wname hdr.set('VAFACTOR', value=1.0, after=after) hdr.set('ORIENTAT', value=wcs.orientat, after=after) # Use of 'after' not needed if these keywords already exist in the header if after in WCS_KEYWORDS: after = None if 'CTYPE1' not in hdr: hdr.set('CTYPE2', value=wcs.wcs.ctype[1], after=after) hdr.set('CTYPE1', value=wcs.wcs.ctype[0], after=after) hdr.set('CRPIX2', value=wcs.wcs.crpix[1], after=after) hdr.set('CRPIX1', value=wcs.wcs.crpix[0], after=after) hdr.set('CRVAL2', value=wcs.wcs.crval[1], after=after) hdr.set('CRVAL1', value=wcs.wcs.crval[0], after=after) hdr.set('CD2_2', value=wcs.wcs.cd[1][1], after=after) hdr.set('CD2_1', value=wcs.wcs.cd[1][0], after=after) hdr.set('CD1_2', value=wcs.wcs.cd[0][1], after=after) hdr.set('CD1_1', value=wcs.wcs.cd[0][0], after=after) # delete distortion model related keywords deleteDistortionKeywords(hdr) if not blot: blendheaders.remove_distortion_keywords(hdr)
def addWCSKeywords(wcs,hdr,blot=False,single=False,after=None)
Update input header 'hdr' with WCS keywords.
2.732857
2.726318
1.002399
outname,outextn = fileutil.parseFilename(output) outextname,outextver = fileutil.parseExtn(outextn) if fileutil.findFile(outname): if clobber: log.info('Deleting previous output product: %s' % outname) fileutil.removeFile(outname) else: log.warning('Output file %s already exists and overwrite not ' 'specified!' % outname) log.error('Quitting... Please remove before resuming operations.') raise IOError # Now update WCS keywords with values from provided WCS if hasattr(wcs.sip,'a_order'): siphdr = True else: siphdr = False wcshdr = wcs.wcs2header(sip2hdr=siphdr) if template is not None: # Get default headers from multi-extension FITS file # If input data is not in MEF FITS format, it will return 'None' # NOTE: These are HEADER objects, not HDUs (prihdr,scihdr,errhdr,dqhdr),newtab = getTemplates(template,EXTLIST) if scihdr is None: scihdr = fits.Header() indx = 0 for c in prihdr.cards: if c.keyword not in ['INHERIT','EXPNAME']: indx += 1 else: break for i in range(indx,len(prihdr)): scihdr.append(prihdr.cards[i]) for i in range(indx, len(prihdr)): del prihdr[indx] else: scihdr = fits.Header() prihdr = fits.Header() # Start by updating PRIMARY header keywords... prihdr.set('EXTEND', value=True, after='NAXIS') prihdr['FILENAME'] = outname if outextname == '': outextname = 'sci' if outextver == 0: outextver = 1 scihdr['EXTNAME'] = outextname.upper() scihdr['EXTVER'] = outextver for card in wcshdr.cards: scihdr[card.keyword] = (card.value, card.comment) # Create PyFITS HDUList for all extensions outhdu = fits.HDUList() # Setup primary header as an HDU ready for appending to output FITS file prihdu = fits.PrimaryHDU(header=prihdr) scihdu = fits.ImageHDU(header=scihdr,data=data) outhdu.append(prihdu) outhdu.append(scihdu) outhdu.writeto(outname) if verbose: print('Created output image: %s' % outname)
def writeSingleFITS(data,wcs,output,template,clobber=True,verbose=True)
Write out a simple FITS file given a numpy array and the name of another FITS file to use as a template for the output image header.
4.078175
4.138993
0.985306
_keyprefix = 'D%03d'%imgnum for key in drizdict: val = drizdict[key]['value'] if val is None: val = "" comment = drizdict[key]['comment'] if comment is None: comment = "" hdr[_keyprefix+key] = (val, drizdict[key]['comment'])
def writeDrizKeywords(hdr,imgnum,drizdict)
Write basic drizzle-related keywords out to image header as a record of the processing performed to create the image The dictionary 'drizdict' will contain the keywords and values to be written out to the header.
3.275932
3.744884
0.874775
# start by looping through the full templates kw_list = None last_kw = None for extn in self.fullhdrs: if keyword in extn: #indx = extn.ascard.index_of(keyword) indx = extn.index(keyword) kw_list = list(extn.keys())[:indx] break if kw_list: # find which keyword from this list exists in header to be updated for kw in kw_list[::-1]: if kw in hdr: last_kw = kw break # determine new value for the last keyword found before the HISTORY kws if last_kw is None: hdrkeys = list(hdr.keys()) i = -1 last_kw = hdrkeys[i] while last_kw == 'HISTORY': i -= 1 last_kw = hdrkeys[i] return last_kw
def find_kwupdate_location(self,hdr,keyword)
Find the last keyword in the output header that comes before the new keyword in the original, full input headers. This will rely on the original ordering of keywords from the original input files in order to place the updated keyword in the correct location in case the keyword was removed from the output header prior to calling this method.
4.987636
4.788076
1.041678
# Extract some global information for the keywords _geom = 'User parameters' _imgnum = 0 for pl in self.parlist: # Start by building up the keyword prefix based # on the image number for the chip #_keyprefix = 'D%03d'%_imgnum _imgnum += 1 drizdict = DRIZ_KEYWORDS.copy() # Update drizdict with current values drizdict['VER']['value'] = pl['driz_version'][:44] drizdict['DATA']['value'] = pl['data'][:64] drizdict['DEXP']['value'] = pl['exptime'] drizdict['OUDA']['value'] = pl['outFinal'][:64] drizdict['OUWE']['value'] = pl['outWeight'][:64] if pl['outContext'] is None: outcontext = "" else: outcontext = pl['outContext'][:64] drizdict['OUCO']['value'] = outcontext if self.single: drizdict['MASK']['value'] = pl['singleDrizMask'][:64] else: drizdict['MASK']['value'] = pl['finalMask'][:64] # Process the values of WT_SCL to be consistent with # what IRAF Drizzle would output if 'wt_scl_val' in pl: _wtscl = pl['wt_scl_val'] else: if pl['wt_scl'] == 'exptime': _wtscl = pl['exptime'] elif pl['wt_scl'] == 'expsq': _wtscl = pl['exptime']*pl['exptime'] else: _wtscl = pl['wt_scl'] drizdict['WTSC']['value'] = _wtscl drizdict['KERN']['value'] = pl['kernel'] drizdict['PIXF']['value'] = pl['pixfrac'] drizdict['OUUN']['value'] = self.units if pl['fillval'] is None: _fillval = 'INDEF' else: _fillval = pl['fillval'] drizdict['FVAL']['value'] = _fillval drizdict['WKEY']['value'] = pl['driz_wcskey'] drizdict['SCAL'] = {'value':pl['scale'],'comment':'Drizzle, pixel size (arcsec) of output image'} drizdict['ISCL'] = {'value':pl['idcscale'],'comment':'Drizzle, default IDCTAB pixel size(arcsec)'} # Now update header with values writeDrizKeywords(hdr,_imgnum,drizdict) del drizdict # Add version information as HISTORY cards to the header if versions is not None: ver_str = "AstroDrizzle processing performed using: " hdr.add_history(ver_str) for k in versions.keys(): ver_str = ' '+str(k)+' Version '+str(versions[k]) hdr.add_history(ver_str)
def addDrizKeywords(self,hdr,versions)
Add drizzle parameter keywords to header.
4.339401
4.277949
1.014365
fit = fit_shifts(xy,uv) if nclip is None: nclip = 0 # define index to initially include all points for n in range(nclip): resids = compute_resids(xy,uv,fit) resids1d = np.sqrt(np.power(resids[:,0],2)+np.power(resids[:,1],2)) sig = resids1d.std() # redefine what pixels will be included in next iteration goodpix = resids1d < sigma*sig xy = xy[goodpix] uv = uv[goodpix] fit = fit_shifts(xy,uv) fit['img_coords'] = xy fit['ref_coords'] = uv return fit
def iter_fit_shifts(xy,uv,nclip=3,sigma=3.0)
Perform an iterative-fit with 'nclip' iterations
4.2011
4.139939
1.014773
if mode not in ['general', 'shift', 'rscale']: mode = 'rscale' if not isinstance(xy,np.ndarray): # cast input list as numpy ndarray for fitting xy = np.array(xy) if not isinstance(uv,np.ndarray): # cast input list as numpy ndarray for fitting uv = np.array(uv) if mode == 'shift': logstr = 'Performing "shift" fit' if verbose: print(logstr) else: log.info(logstr) result = fit_shifts(xy, uv) elif mode == 'general': logstr = 'Performing "general" fit' if verbose: print(logstr) else: log.info(logstr) result = fit_general(xy, uv) else: logstr = 'Performing "rscale" fit' if verbose: print(logstr) else: log.info(logstr) result = geomap_rscale(xy, uv, center=center) return result
def fit_all(xy,uv,mode='rscale',center=None,verbose=True)
Performs an 'rscale' fit between matched lists of pixel positions xy and uv
2.21262
2.177199
1.016269
diff_pts = xy - uv Pcoeffs = np.array([1.0,0.0,diff_pts[:,0].mean(dtype=np.float64)]) Qcoeffs = np.array([0.0,1.0,diff_pts[:,1].mean(dtype=np.float64)]) fit = build_fit(Pcoeffs, Qcoeffs, 'shift') resids = diff_pts - fit['offset'] fit['resids'] = resids fit['rms'] = resids.std(axis=0) fit['rmse'] = float(np.sqrt(np.mean(2 * resids**2))) fit['mae'] = float(np.mean(np.linalg.norm(resids, axis=1))) return fit
def fit_shifts(xy, uv)
Performs a simple fit for the shift only between matched lists of positions 'xy' and 'uv'. Output: (same as for fit_arrays) ================================= DEVELOPMENT NOTE: Checks need to be put in place to verify that enough objects are available for a fit. =================================
3.316549
3.383902
0.980096
# Set up products used for computing the fit gxy = uv.astype(ndfloat128) guv = xy.astype(ndfloat128) Sx = gxy[:,0].sum() Sy = gxy[:,1].sum() Su = guv[:,0].sum() Sv = guv[:,1].sum() Sux = np.dot(guv[:,0], gxy[:,0]) Svx = np.dot(guv[:,1], gxy[:,0]) Suy = np.dot(guv[:,0], gxy[:,1]) Svy = np.dot(guv[:,1], gxy[:,1]) Sxx = np.dot(gxy[:,0], gxy[:,0]) Syy = np.dot(gxy[:,1], gxy[:,1]) Sxy = np.dot(gxy[:,0], gxy[:,1]) n = len(xy[:,0]) M = np.array([[Sx, Sy, n], [Sxx, Sxy, Sx], [Sxy, Syy, Sy]]) U = np.array([Su, Sux, Suy]) V = np.array([Sv, Svx, Svy]) # The fit solutioN... # where # u = P0 + P1*x + P2*y # v = Q0 + Q1*x + Q2*y # try: invM = np.linalg.inv(M.astype(np.float64)) except np.linalg.LinAlgError: raise SingularMatrixError( "Singular matrix: suspected colinear points." ) P = np.dot(invM, U).astype(np.float64) Q = np.dot(invM, V).astype(np.float64) if not (np.all(np.isfinite(P)) and np.all(np.isfinite(Q))): raise ArithmeticError('Singular matrix.') # Return the shift, rotation, and scale changes result = build_fit(P, Q, 'general') resids = xy - np.dot(uv, result['fit_matrix']) - result['offset'] result['rms'] = resids.std(axis=0) result['resids'] = resids result['rmse'] = float(np.sqrt(np.mean(2 * resids**2))) result['mae'] = float(np.mean(np.linalg.norm(resids, axis=1))) return result
def fit_general(xy, uv)
Performs a simple fit for the shift only between matched lists of positions 'xy' and 'uv'. Output: (same as for fit_arrays) ================================= DEVELOPMENT NOTE: Checks need to be put in place to verify that enough objects are available for a fit. =================================
2.807845
2.84088
0.988372
if not isinstance(xy,np.ndarray): # cast input list as numpy ndarray for fitting xy = np.array(xy) if not isinstance(uv,np.ndarray): # cast input list as numpy ndarray for fitting uv = np.array(uv) # Set up products used for computing the fit Sx = xy[:,0].sum() Sy = xy[:,1].sum() Su = uv[:,0].sum() Sv = uv[:,1].sum() Sux = np.dot(uv[:,0], xy[:,0]) Svx = np.dot(uv[:,1], xy[:,0]) Suy = np.dot(uv[:,0], xy[:,1]) Svy = np.dot(uv[:,1], xy[:,1]) Sxx = np.dot(xy[:,0], xy[:,0]) Syy = np.dot(xy[:,1], xy[:,1]) Sxy = np.dot(xy[:,0], xy[:,1]) n = len(xy[:,0]) M = np.array([[Sx, Sy, n], [Sxx, Sxy, Sx], [Sxy, Syy, Sy]]) U = np.array([Su, Sux, Suy]) V = np.array([Sv, Svx, Svy]) # The fit solutioN... # where # u = P0 + P1*x + P2*y # v = Q0 + Q1*x + Q2*y # try: invM = np.linalg.inv(M.astype(np.float64)) except np.linalg.LinAlgError: raise SingularMatrixError( "Singular matrix: suspected colinear points." ) P = np.dot(invM, U).astype(np.float64) Q = np.dot(invM, V).astype(np.float64) if not (np.all(np.isfinite(P)) and np.all(np.isfinite(Q))): raise ArithmeticError('Singular matrix.') # Return the shift, rotation, and scale changes return build_fit(P, Q, 'general')
def fit_arrays(uv, xy)
Performs a generalized fit between matched lists of positions given by the 2 column arrays xy and uv. This function fits for translation, rotation, and scale changes between 'xy' and 'uv', allowing for different scales and orientations for X and Y axes. ================================= DEVELOPMENT NOTE: Checks need to be put in place to verify that enough objects are available for a fit. ================================= Output: (Xo,Yo),Rot,(Scale,Sx,Sy) where Xo,Yo: offset, Rot: rotation, Scale: average scale change, and Sx,Sy: scale changes in X and Y separately. Algorithm and nomenclature provided by: Colin Cox (11 Nov 2004)
2.695827
2.616877
1.03017
_theta = np.deg2rad(coeffs[1]) _mrot = np.zeros(shape=(2,2),dtype=np.float64) _mrot[0] = (np.cos(_theta),np.sin(_theta)) _mrot[1] = (-np.sin(_theta),np.cos(_theta)) new_pos = (np.dot(xy,_mrot)/coeffs[2][0]) + coeffs[0] return new_pos
def apply_old_coeffs(xy,coeffs)
Apply the offset/shift/rot values from a linear fit to an array of x,y positions.
2.964115
3.157636
0.938713
x_new = coeffs[0][2] + coeffs[0][0]*xy[:,0] + coeffs[0][1]*xy[:,1] y_new = coeffs[1][2] + coeffs[1][0]*xy[:,0] + coeffs[1][1]*xy[:,1] return x_new,y_new
def apply_fit(xy,coeffs)
Apply the coefficients from a linear fit to an array of x,y positions. The coeffs come from the 'coeffs' member of the 'fit_arrays()' output.
1.757097
2.103438
0.835345
print('FIT coeffs: ',fit['coeffs']) xn,yn = apply_fit(uv,fit['coeffs']) resids = xy - np.transpose([xn,yn]) return resids
def compute_resids(xy,uv,fit)
Compute the residuals based on fit and input arrays to the fit
5.619715
6.401985
0.877808
# Support input of filenames from command-line without a parameter name # then copy this into input_dict for merging with TEAL ConfigObj # parameters. # Load any user-specified configobj if isinstance(configobj, (str, bytes)): if configobj == 'defaults': # load "TEAL"-defaults (from ~/.teal/): configobj = teal.load(__taskname__) else: if not os.path.exists(configobj): raise RuntimeError('Cannot find .cfg file: '+configobj) configobj = teal.load(configobj, strict=False) elif configobj is None: # load 'astrodrizzle' parameter defaults as described in the docs: configobj = teal.load(__taskname__, defaults=True) if input and not util.is_blank(input): input_dict['input'] = input elif configobj is None: raise TypeError("AstroDrizzle() needs either 'input' or " "'configobj' arguments") if 'updatewcs' in input_dict: # user trying to explicitly turn on updatewcs configobj['updatewcs'] = input_dict['updatewcs'] del input_dict['updatewcs'] # If called from interactive user-interface, configObj will not be # defined yet, so get defaults using EPAR/TEAL. # # Also insure that the input_dict (user-specified values) are folded in # with a fully populated configObj instance. try: configObj = util.getDefaultConfigObj(__taskname__, configobj, input_dict, loadOnly=(not editpars)) log.debug('') log.debug("INPUT_DICT:") util.print_cfg(input_dict, log.debug) log.debug('') # If user specifies optional parameter for final_wcs specification in input_dict, # insure that the final_wcs step gets turned on util.applyUserPars_steps(configObj, input_dict, step='3a') util.applyUserPars_steps(configObj, input_dict, step='7a') except ValueError: print("Problem with input parameters. Quitting...", file=sys.stderr) return if not configObj: return configObj['mdriztab'] = mdriztab # If 'editpars' was set to True, util.getDefaultConfigObj() will have # already called 'run()'. if not editpars: run(configObj, wcsmap=wcsmap)
def AstroDrizzle(input=None, mdriztab=False, editpars=False, configobj=None, wcsmap=None, **input_dict)
AstroDrizzle command-line interface
6.409694
6.370564
1.006142
global _fidx tag = 'virtual' log.info((tag+' ')*7) for iii in imgObjList: log.info('-'*80) log.info(tag+' orig nm: '+iii._original_file_name) log.info(tag+' names.data: '+str(iii.outputNames["data"])) log.info(tag+' names.orig: '+str(iii.outputNames["origFilename"])) log.info(tag+' id: '+str(id(iii))) log.info(tag+' in.mem: '+str(iii.inmemory)) log.info(tag+' vo items...') for vok in sorted(iii.virtualOutputs.keys()): FITSOBJ = iii.virtualOutputs[vok] log.info(tag+': '+str(vok)+' = '+str(FITSOBJ)) if vok.endswith('.fits'): if not hasattr(FITSOBJ, 'data'): FITSOBJ = FITSOBJ[0] # list of PrimaryHDU ? if not hasattr(FITSOBJ, 'data'): FITSOBJ = FITSOBJ[0] # was list of HDUList ? dbgname = 'DEBUG_%02d_'%(_fidx,) dbgname+=os.path.basename(vok) _fidx+=1 FITSOBJ.writeto(dbgname) log.info(tag+' wrote: '+dbgname) log.info('\n'+vok) if hasattr(FITSOBJ, 'data'): log.info(str(FITSOBJ._summary())) log.info('min and max are: '+str( (FITSOBJ.data.min(), FITSOBJ.data.max()) )) log.info('avg and sum are: '+str( (FITSOBJ.data.mean(), FITSOBJ.data.sum()) )) # log.info(str(FITSOBJ.data)[:75]) else: log.info(vok+' has no .data attr') log.info(str(type(FITSOBJ))) log.info(vok+'\n') log.info('-'*80)
def _dbg_dump_virtual_outputs(imgObjList)
dump some helpful information. strictly for debugging
3.628543
3.643831
0.995805
darkcurrent = 0. try: darkcurrent = self._image[self.scienceExt, chip].header['MEANDARK'] except: msg = "#############################################\n" msg += "# #\n" msg += "# Error: #\n" msg += "# Cannot find the value for 'MEANDARK' #\n" msg += "# in the image header. WFC3 input images #\n" msg += "# are expected to have this header #\n" msg += "# keyword. #\n" msg += "# #\n" msg += "# Error occured in WFC3UVISInputImage class #\n" msg += "# #\n" msg += "#############################################\n" raise ValueError(msg) return darkcurrent
def getdarkcurrent(self,chip)
Return the dark current for the WFC3 UVIS detector. This value will be contained within an instrument specific keyword. Returns ------- darkcurrent: float The dark current value with **units of electrons**.
3.763229
3.715998
1.01271
# Image information _handle = fileutil.openImage(self._filename, mode='readonly', memmap=False) for chip in self.returnAllChips(extname=self.scienceExt): conversionFactor = 1.0 if '/S' in chip._bunit: conversionFactor = chip._exptime else: print("Input %s[%s,%d] already in units of ELECTRONS" %(self._filename,self.scienceExt,chip._chip)) chip._effGain = 1.0# chip._gain #1. chip._conversionFactor = conversionFactor #1. _handle.close() self._effGain= 1.0
def doUnitConversions(self)
WF3 IR data come out in electrons, and I imagine the photometry keywords will be calculated as such, so no image manipulation needs be done between native and electrons
10.691883
9.321155
1.147056
sci_chip = self._image[self.scienceExt,chip] # First attempt to get the dark image specified by the "DARKFILE" # keyword in the primary keyword of the science data. try: filename = self.header["DARKFILE"] handle = fileutil.openImage(filename, mode='readonly', memmap=False) hdu = fileutil.getExtn(handle,extn="sci,1") darkobj = hdu.data[sci_chip.ltv2:sci_chip.size2,sci_chip.ltv1:sci_chip.size1] # If the darkfile cannot be located, create the dark image from # what we know about the detector dark current and assume a # constant dark current for the whole image. except: darkobj = (np.ones(sci_chip.image_shape, dtype=sci_chip.image_dtype) * self.getdarkcurrent()) return darkobj
def getdarkimg(self,chip)
Return an array representing the dark image for the detector. Returns ------- dark: array Dark image array in the same shape as the input image with **units of cps**
6.231945
6.509984
0.95729
sci_chip = self._image[self.scienceExt,chip] skyimg = np.ones(sci_chip.image_shape,dtype=sci_chip.image_dtype)*sci_chip.subtractedSky if sci_chip._conversionFactor != 1.0: # If units are not already ELECTRONS skyimg *= self.getexptimeimg(chip) return skyimg
def getskyimg(self,chip)
Notes ===== Return an array representing the sky image for the detector. The value of the sky is what would actually be subtracted from the exposure by the skysub step. :units: electrons
9.571353
8.949107
1.069532
darkcurrent=0. try: darkcurrent = self._image[self.scienceExt,extver].header['MEANDARK'] except: str = "#############################################\n" str += "# #\n" str += "# Error: #\n" str += "# Cannot find the value for 'MEANDARK' #\n" str += "# in the image header. ACS input images #\n" str += "# are expected to have this header #\n" str += "# keyword. #\n" str += "# #\n" str += "# Error occured in the ACSInputImage class #\n" str += "# #\n" str += "#############################################\n" raise ValueError(str) return darkcurrent
def getdarkcurrent(self,extver)
Return the dark current for the ACS detector. This value will be contained within an instrument specific keyword. The value in the image header will be converted to units of electrons. Returns ------- darkcurrent: float Dark current value for the ACS detector in **units of electrons**.
3.807354
3.718319
1.023945
pri_header = self._image[0].header if self._isNotValid (instrpars['gain'], instrpars['gnkeyword']): instrpars['gnkeyword'] = None if self._isNotValid (instrpars['rdnoise'], instrpars['rnkeyword']): instrpars['rnkeyword'] = None if self._isNotValid (instrpars['exptime'], instrpars['expkeyword']): instrpars['expkeyword'] = 'EXPTIME' # We need to treat Read Noise and Gain as a special case since it is # not populated in the SBC primary header for the MAMA for chip in self.returnAllChips(extname=self.scienceExt): chip._gain = 1.0 #self.getInstrParameter("", pri_header, # instrpars['gnkeyword']) chip._rdnoise = 0.0 #self.getInstrParameter("", pri_header, # instrpars['rnkeyword']) chip._exptime = self.getInstrParameter(instrpars['exptime'], pri_header, instrpars['expkeyword']) if chip._exptime is None: print('ERROR: invalid instrument task parameter') raise ValueError # We need to determine if the user has used the default readnoise/gain value # since if not, they will need to supply a gain/readnoise value as well usingDefaultGain = instrpars['gnkeyword'] is None usingDefaultReadnoise = instrpars['rnkeyword'] is None # Set the default readnoise or gain values based upon the amount of user input given. # Case 1: User supplied no gain or readnoise information if usingDefaultReadnoise and usingDefaultGain: # Set the default gain and readnoise values self._setSBCchippars() # Case 2: The user has supplied a value for gain elif usingDefaultReadnoise and not usingDefaultGain: # Set the default readnoise value self._setDefaultSBCReadnoise() # Case 3: The user has supplied a value for readnoise elif not usingDefaultReadnoise and usingDefaultGain: # Set the default gain value self._setDefaultSBCGain() else: # In this case, the user has specified both a gain and readnoise values. Just use them as is. pass
def setInstrumentParameters(self,instrpars)
Sets the instrument parameters.
4.326445
4.33398
0.998261
if numarrayObjectList in [None, []]: return None tsum = np.zeros(numarrayObjectList[0].shape, dtype=numarrayObjectList[0].dtype) for image in numarrayObjectList: tsum += image return tsum
def _sumImages(self,numarrayObjectList)
Sum a list of numarray objects.
2.963388
2.96224
1.000388
return lambda x, y: height * np.exp(-0.5* (a*(x-x0)**2 + b*(x-x0)*(y-y0) + c*(y-y0)**2))
def gaussian1(height, x0, y0, a, b, c)
height - the amplitude of the gaussian x0, y0, - center of the gaussian a, b, c - ellipse parameters (coefficients in the quadratic form)
2.660871
2.583029
1.030136
xsigma = fwhm / FWHM2SIG ysigma = ratio * xsigma f = nsigma**2/2. theta = np.deg2rad(theta) cost = np.cos(theta) sint = np.sin(theta) if ratio == 0: # 1D Gaussian if theta == 0 or theta == 180: a = 1/xsigma**2 b = 0.0 c = 0.0 elif theta == 90: a = 0.0 b = 0.0 c = 1/xsigma**2 else: print('Unable to construct 1D Gaussian with these parameters\n') raise ValueError nx = 2 * int(max(2, (xsigma*nsigma*np.abs(cost))))+1 ny = 2 * int(max(2, (xsigma*nsigma*np.abs(sint))))+1 else: #2D gaussian xsigma2 = xsigma * xsigma ysigma2 = ysigma * ysigma a = cost**2/xsigma2 + sint**2/ysigma2 b = 2 * cost * sint *(1.0/xsigma2-1.0/ysigma2) c = sint**2/xsigma2 + cost**2/ysigma2 d = b**2 - 4*a*c # discriminant # nx = int(2*max(2, math.sqrt(-8*c*f/d)))+1 # ny = int(2*max(2, math.sqrt(-8*a*f/d)))+1 nx = 2 * int(2*max(1, nsigma*math.sqrt(-c/d)))+1 ny = 2 * int(2*max(1, nsigma*math.sqrt(-a/d)))+1 return nx, ny, a, b, c, f
def gausspars(fwhm, nsigma=1.5, ratio=1, theta=0.)
height - the amplitude of the gaussian x0, y0, - center of the gaussian fwhm - full width at half maximum of the observation nsigma - cut the gaussian at nsigma ratio = ratio of xsigma/ysigma theta - angle of position angle of the major axis measured counter-clockwise from the x axis Returns dimensions nx and ny of the elliptical kernel as well as the ellipse parameters a, b, c, and f when defining an ellipse through the quadratic form: a*(x-x0)^2+b(x-x0)*(y-y0)+c*(y-y0)^2 <= 2*f
2.653348
2.569721
1.032543
total = data.sum() #X, Y = np.indices(data.shape) #x = (X*data).sum()/total #y = (Y*data).sum()/total x,y = cntr xi = int(x) yi = int(y) if xi < 0 or xi >= data.shape[1] or yi < 0 or yi >= data.shape[0]: raise ValueError col = data[:, xi] width_x = np.sqrt(abs(((np.arange(col.size)-y)**2*col).sum()/col.sum())) row = data[yi, :] width_y = np.sqrt(abs(((np.arange(row.size)-x)**2*row).sum()/row.sum())) height = data.max() return height, x, y, width_x, width_y
def moments(data,cntr)
Returns (height, x, y, width_x, width_y) the gaussian parameters of a 2D distribution by calculating its moments.
2.288437
1.933076
1.183832
for n in range(niter): if len(fitind) < 1: break fitarr = np.array(fitind,np.float32) fluxarr = np.array(fluxes,np.float32) inpind = np.argsort(fitarr[:,1]) npind = fitarr[inpind] fluxind = fluxarr[inpind] fitind = npind.tolist() fluxes = fluxind.tolist() dx = npind[1:,0] - npind[:-1,0] dy = npind[1:,1] - npind[:-1,1] dr = np.sqrt(np.power(dx,2)+np.power(dy,2)) nsame = np.where(dr <= separation)[0] if nsame.shape[0] > 0: for ind in nsame[-1::-1]: #continue # <- turn off filtering by source separation del fitind[ind] del fluxes[ind] else: break return fitind,fluxes
def apply_nsigma_separation(fitind,fluxes,separation,niter=10)
Remove sources which are within nsigma*fwhm/2 pixels of each other, leaving only a single valid source in that region. This algorithm only works for sources which end up sequentially next to each other based on Y position and removes enough duplicates to make the final source list more managable. It sorts the positions by Y value in order to group those at the same positions as much as possible.
2.979783
2.868359
1.038846
nyk,nxk = ker2d.shape if datamin is None: datamin = data.min() if datamax is None: datamax = data.max() # call C function for speed now... xy_val = cdriz.arrxyround(data,x0,y0,skymode,ker2d,xsigsq,ysigsq,datamin,datamax) if xy_val is None: x = None y = None round = None else: x = xy_val[0] y = xy_val[1] round = xy_val[2] return x,y,round
def xy_round(data,x0,y0,skymode,ker2d,xsigsq,ysigsq,datamin=None,datamax=None)
Compute center of source Original code from IRAF.noao.digiphot.daofind.apfind ap_xy_round()
2.766935
2.703071
1.023627
# Create arrays for the two- and four-fold symmetry computations: s4m = np.ones((nyk,nxk),dtype=np.int16) s4m[yc, xc] = 0 s2m = np.ones((nyk,nxk),dtype=np.int16) s2m[yc, xc] = 0 s2m[yc:nyk, 0:xc] = -1; s2m[0:yc+1, xc+1:nxk] = -1; return s2m, s4m
def precompute_sharp_round(nxk, nyk, xc, yc)
Pre-computes mask arrays to be used by the 'sharp_round' function for roundness computations based on two- and four-fold symmetries.
3.003249
2.629695
1.142052
# Compute the first estimate of roundness: sum2 = np.sum(s2m*density) sum4 = np.sum(s4m*abs(density)) if sum2 == 0.0: round = 0.0 elif sum4 <= 0.0: # eps? round = None else: round = 2.0 * sum2 / sum4 # Eliminate the sharpness test if the central pixel is bad: mid_data_pix = data[yc, xc] mid_dens_pix = density[yc, xc] if mid_data_pix > datamax: return True, round, None if mid_data_pix < datamin: return False, round, None ######################## # Sharpness statistics: satur = np.max(kskip*data) > datamax # Exclude pixels (create a mask) outside the [datamin, datamax] range: uskip = np.where((data >= datamin) & (data <= datamax), 1, 0) # Update the mask with the "skipped" values from the convolution kernel: uskip *= kskip # Also, exclude central pixel: uskip[yc, xc] = 0 npixels = np.sum(uskip) if (npixels < 1 or mid_dens_pix <= 0.0): return satur, round, None sharp = (mid_data_pix - np.sum(uskip*data)/npixels) / mid_dens_pix #sharp = (mid_data_pix - np.mean(uskip*data)) / mid_dens_pix return satur, round, sharp
def sharp_round(data, density, kskip, xc, yc, s2m, s4m, nxk, nyk, datamin, datamax)
sharp_round -- Compute first estimate of the roundness and sharpness of the detected objects. A Python translation of the AP_SHARP_ROUND IRAF/DAOFIND function.
3.871356
3.798002
1.019314
perimeter = im.shape[0]*2 +im.shape[1]*2 -4 area = im.size return 4*np.pi*area/perimeter**2
def roundness(im)
from astropy.io import fits as pyfits data=pyfits.getdata('j94f05bgq_flt.fits',ext=1) star0=data[403:412,423:432] star=data[396:432,3522:3558] In [53]: findobj.roundness(star0) Out[53]: 0.99401955054989544 In [54]: findobj.roundness(star) Out[54]: 0.83091919980660645
4.019491
4.180829
0.96141
x = list(range(im.shape[1])) y = list(range(im.shape[0])) #coord=np.array([x.flatten(),y.flatten()]).T moment = np.sum([i**p*j**q*im[i,j] for j in x for i in y], dtype=np.float64) return moment
def immoments(im, p,q)
moment = 0 momentx = 0 for i in x.flatten(): moment+=momentx sumx=0 for j in y.flatten(): sumx+=i**0*j**0*star0[i,j]
4.36978
4.692201
0.931286
# These calls point to Python version of moments function m00 = cdriz.arrmoments(im,0,0) m10 = cdriz.arrmoments(im, 1,0) m01 = cdriz.arrmoments(im,0,1) ycen = m10 / m00 xcen = m01 / m00 return xcen, ycen
def centroid(im)
Computes the centroid of an image using the image moments: centroid = {m10/m00, m01/m00} These calls point to Python version of moments function m00 = immoments(im,0,0) m10 = immoments(im, 1,0) m01 = immoments(im,0,1)
4.317641
2.592157
1.665656
# Get the MDRIZTAB table file name from the primary header. # It is gotten from the first file in the input list. No # consistency checks are performed. _fileName = files[0] _header = fileutil.getHeader(_fileName) if 'MDRIZTAB' in _header: _tableName = _header['MDRIZTAB'] else: raise KeyError("No MDRIZTAB found in file " + _fileName) _tableName = fileutil.osfn(_tableName) # Now get the filters from the primary header. _filters = fileutil.getFilterNames(_header) # Specifically check to see whether the MDRIZTAB file can be found mtab_path = os.path.split(_tableName)[0] # protect against no path given for _tableName if mtab_path and not os.path.exists(mtab_path): # check path first, if given raise IOError("Directory for MDRIZTAB '%s' could not be accessed!"%mtab_path) if not os.path.exists(_tableName): # then check for the table itself raise IOError("MDRIZTAB table '%s' could not be found!"%_tableName) # Open MDRIZTAB file. try: _mdriztab = fits.open(_tableName, memmap=False) except: raise IOError("MDRIZTAB table '%s' not valid!" % _tableName) # Look for matching rows based on filter name. If no # match, pick up rows for the default filter. _rows = _getRowsByFilter(_mdriztab, _filters) if _rows == []: _rows = _getRowsByFilter(_mdriztab, 'ANY') # Now look for the row that matches the number of images. # The logic below assumes that rows for a given filter # are arranged in ascending order of the 'numimage' field. _nimages = len(files) _row = 0 for i in _rows: _numimages = _mdriztab[1].data.field('numimages')[i] if _nimages >= _numimages: _row = i print('- MDRIZTAB: AstroDrizzle parameters read from row %s.'%(_row+1)) mpars = _mdriztab[1].data[_row] _mdriztab.close() interpreted = _interpretMdriztabPars(mpars) if "staticfile" in interpreted: interpreted.pop("staticfile") return interpreted
def getMdriztabParameters(files)
Gets entry in MDRIZTAB where task parameters live. This method returns a record array mapping the selected row.
4.323597
4.355784
0.992611
tabdict = {} # for each entry in the record... for indx in range(len(rec.array.names)): # ... get the name, format, and value. _name = rec.array.names[indx] _format = rec.array.formats[indx] _value = rec.field(_name) # Translate names from MDRIZTAB columns names to # input parameter names found in IRAF par file. # #if _name.find('final') > -1: _name = 'driz_'+_name if _name in ['shiftfile','mdriztab']: continue drizstep_names = ['driz_sep_','final_'] if _name in ['refimage','bits']: for dnames in drizstep_names: tabdict[dnames+_name] = _value continue if _name in ['driz_sep_bits','final_bits']: tabdict[_name] = str(_value) continue if _name == 'coeffs': _val = True if _value in ['INDEF',None,"None",'',' ']: _val = False tabdict[_name] = _val continue par_table = {'subsky':'skysub','crbitval':'crbit','readnoise':'rdnoise'} if _name in par_table: _name = par_table[_name] # We do not care about the first two columns at this point # as they are only used for selecting the rows if _name != 'filter' and _name != 'numimages': # start by determining the format type of the parameter _fmt = findFormat(_format) # Based on format type, apply proper conversion/cleaning if (_fmt == 'a') or (_fmt == 'A'): _val = cleanBlank(_value) if _val is None: _val = '' elif (_format == 'i1') or (_format=='1L'): _val = toBoolean(_value) elif (_format == 'i4') or (_format == '1J'): _val = cleanInt(_value) elif ('E' in _format) or (_format == 'f4') : _val = cleanNaN(_value) else: print('MDRIZTAB column ',_name,' has unrecognized format',_format) raise ValueError if _name in ['ra','dec']: for dnames in drizstep_names: tabdict[dnames+_name] = _val else: tabdict[_name] = _val return tabdict
def _interpretMdriztabPars(rec)
Collect task parameters from the MDRIZTAB record and update the master parameters list with those values Note that parameters read from the MDRIZTAB record must be cleaned up in a similar way that parameters read from the user interface are.
4.964106
4.888687
1.015427
distortion_pars = configObj['Distortion Model'] outwcs = build(configObj['outwcs'], configObj['wcsname'], configObj['refimage'], undistort = configObj['undistort'], usecoeffs=distortion_pars['applycoeffs'], coeffsfile=distortion_pars['coeffsfile'], **configObj['User WCS Parameters'])
def run(configObj,wcsmap=None)
Interpret parameters from TEAL/configObj interface as set interactively by the user and build the new WCS instance
7.658157
7.554023
1.013785
# Insure that the User WCS parameters have values for all the parameters, # even if that value is 'None' user_wcs_pars = convert_user_pars(wcspars) userwcs = wcspars['userwcs'] ### Build WCS from refimage and/or user pars if util.is_blank(refimage) and not userwcs: print('WARNING: No WCS specified... No WCS created!') return customwcs = None if util.is_blank(refimage) and userwcs: # create HSTWCS object from user parameters complete_wcs = True for key in user_wcs_pars: if util.is_blank(user_wcs_pars[key]): complete_wcs = False break if complete_wcs: customwcs = wcs_functions.build_hstwcs(user_wcs_pars['crval1'],user_wcs_pars['crval2'], user_wcs_pars['crpix1'],user_wcs_pars['crpix2'], user_wcs_pars['naxis1'],user_wcs_pars['naxis2'], user_wcs_pars['pscale'],user_wcs_pars['orientat']) else: print('WARNING: Not enough WCS information provided by user!') raise ValueError if not util.is_blank(refimage): refwcs = stwcs.wcsutil.HSTWCS(refimage) else: refwcs = customwcs ### Apply distortion model (if any) to update WCS if applycoeffs and not util.is_blank(coeffsfile): if not util.is_blank(refimage): replace_model(refwcs, coeffsfile) else: if not undistort: add_model(refwcs,coeffsfile) # Only working with custom WCS from user, no distortion # so apply model to WCS, including modifying the CD matrix apply_model(refwcs) ### Create undistorted WCS, if requested if undistort: outwcs = undistortWCS(refwcs) else: outwcs = refwcs if userwcs: # replace (some/all?) WCS values from refimage with user WCS values # by running 'updatewcs' functions on input WCS outwcs = mergewcs(outwcs,customwcs,user_wcs_pars) ### Create the final headerlet and write it out, if specified if not util.is_blank(refimage): template = refimage elif not util.is_blank(coeffsfile): template = coeffsfile else: template = None # create default WCSNAME if None was given wcsname = create_WCSname(wcsname) print('Creating final headerlet with name ',wcsname,' using template ',template) outhdr = generate_headerlet(outwcs,template,wcsname,outname=outname) # synchronize this new WCS with the rest of the chips in the image for ext in outhdr: if 'extname' in ext.header and ext.header['extname'] == 'SIPWCS': ext_wcs = wcsutil.HSTWCS(ext) stwcs.updatewcs.makewcs.MakeWCS.updateWCS(ext_wcs,outwcs) return outwcs
def build(outname, wcsname, refimage, undistort=False, applycoeffs=False, coeffsfile=None, **wcspars)
Core functionality to create a WCS instance from a reference image WCS, user supplied parameters or user adjusted reference WCS. The distortion information can either be read in as part of the reference image WCS or given in 'coeffsfile'. Parameters ---------- outname : string filename of output WCS wcsname : string WCSNAME ID for generated WCS refimage : string filename of image with source WCS used as basis for output WCS undistort : bool Create an undistorted WCS? applycoeffs : bool Apply coefficients from refimage to generate undistorted WCS? coeffsfile : string If specified, read distortion coeffs from separate file
4.127549
4.111147
1.003989
if util.is_blank(wcsname): ptime = fileutil.getDate() wcsname = "User_"+ptime return wcsname
def create_WCSname(wcsname)
Verify that a valid WCSNAME has been provided, and if not, create a default WCSNAME based on current date.
12.336861
10.18452
1.211335
default_pars = default_user_wcs.copy() for kw in user_hstwcs_pars: default_pars[user_hstwcs_pars[kw]] = wcspars[kw] return default_pars
def convert_user_pars(wcspars)
Convert the parameters provided by the configObj into the corresponding parameters from an HSTWCS object
4.69199
4.558364
1.029314
# start by working on a copy of the refwcs if outwcs.sip is not None: wcslin = stwcs.distortion.utils.undistortWCS(outwcs) outwcs.wcs.cd = wcslin.wcs.cd outwcs.wcs.set() outwcs.setOrient() outwcs.setPscale() else: wcslin = outwcs if customwcs is None: # update valid pars from wcspars if wcspars['crval1'] is not None: outwcs.wcs.crval = np.array([wcspars['crval1'],wcspars['crval2']]) if wcspars['crpix1'] is not None: outwcs.wcs.crpix = np.array([wcspars['crpix1'],wcspars['crpix2']]) if wcspars['naxis1'] is not None: outwcs.pixel_shape = (wcspars['naxis1'], wcspars['naxis2']) outwcs.wcs.crpix = np.array(outwcs.pixel_shape) / 2.0 pscale = wcspars['pscale'] orient = wcspars['orientat'] if pscale is not None or orient is not None: if pscale is None: pscale = wcslin.pscale if orient is None: orient = wcslin.orientat pix_ratio = pscale/wcslin.pscale delta_rot = wcslin.orientat - orient delta_rot_mat = fileutil.buildRotMatrix(delta_rot) outwcs.wcs.cd = np.dot(outwcs.wcs.cd,delta_rot_mat)*pix_ratio # apply model to new linear CD matrix apply_model(outwcs) else: # A new fully described WCS was provided in customwcs outwcs.wcs.cd = customwcs.wcs.cd outwcs.wcs.crval = customwcs.wcs.crval outwcs.wcs.crpix = customwcs.wcs.crpix outwcs.pixel_shape = customwcs.pixel_shape return outwcs
def mergewcs(outwcs, customwcs, wcspars)
Merge the WCS keywords from user specified values into a full HSTWCS object This function will essentially follow the same algorithm as used by updatehdr only it will use direct calls to updatewcs.Makewcs methods instead of using 'updatewcs' as a whole
2.76598
2.77174
0.997922
# Update refwcs with distortion model for kw in model_attrs: if newcoeffs.__dict__[key] is not None: refwcs.__dict__[key] = newcoeffs.__dict__[key]
def add_model(refwcs, newcoeffs)
Add (new?) distortion model to existing HSTWCS object
6.018258
5.417564
1.110879
# apply distortion model to CD matrix if 'ocx10' in refwcs.__dict__ and refwcs.ocx10 is not None: linmat = np.array([[refwcs.ocx11,refwcs.ocx10],[refwcs.ocy11,refwcs.ocy10]])/refwcs.idcscale refwcs.wcs.cd = np.dot(refwcs.wcs.cd,linmat) refwcs.wcs.set() refwcs.setOrient() refwcs.setPscale()
def apply_model(refwcs)
Apply distortion model to WCS, including modifying CD with linear distortion terms
5.056518
4.988747
1.013585
print('WARNING:') print(' Replacing existing distortion model with one') print(' not necessarily matched to the observation!') # create linear version of WCS to be updated by new model wcslin = stwcs.distortion.utils.undistortWCS(refwcs) outwcs = refwcs.deepcopy() outwcs.wcs.cd = wcslin.wcs.cd outwcs.wcs.set() outwcs.setOrient() outwcs.setPscale() # add new model to updated WCS object add_model(outwcs,newcoeffs) # Update CD matrix with new model apply_model(outwcs) # replace original input WCS with newly updated WCS refwcs = outwcs.deepcopy()
def replace_model(refwcs, newcoeffs)
Replace the distortion model in a current WCS with a new model Start by creating linear WCS, then run
6.82323
6.392217
1.067428
wcslin = stwcs.distortion.utils.output_wcs([refwcs]) outwcs = stwcs.wcsutil.HSTWCS() outwcs.wcs = wcslin.wcs outwcs.wcs.set() outwcs.setPscale() outwcs.setOrient() outwcs.sip = None # Update instrument specific keywords outwcs.inst_kw = refwcs.inst_kw for kw in refwcs.inst_kw: outwcs.__dict__[kw] = refwcs.__dict__[kw] outwcs.pixel_shape = wcslin.pixel_shape return outwcs
def undistortWCS(refwcs)
Generate an undistorted HSTWCS from an HSTWCS object with a distortion model
4.5539
4.544784
1.002006
# Create header object from HSTWCS object siphdr = True if outwcs.sip is None: siphdr = False outwcs_hdr = outwcs.wcs2header(sip2hdr=siphdr) outwcs_hdr['NPIX1'] = outwcs.pixel_shape[0] outwcs_hdr['NPIX2'] = outwcs.pixel_shape[1] # create headerlet object in memory; either from a file or from scratch if template is not None and siphdr: print('Creating headerlet from template...') fname,extn = fileutil.parseFilename(template) extnum = fileutil.parseExtn(extn) extname = ('sipwcs',extnum[1]) hdrlet = headerlet.createHeaderlet(fname,wcsname) # update hdrlet with header values from outwcs for kw in outwcs_hdr.items(): hdrlet[extname].header[kw[0]] = kw[1] hdrlet[extname].header['WCSNAME'] = wcsname else: print('Creating headerlet from scratch...') hdrlet = fits.HDUList() hdrlet.append(fits.PrimaryHDU()) siphdr = fits.ImageHDU(header=outwcs_hdr) siphdr.header['EXTNAME'] = 'SIPWCS' siphdr.header['WCSNAME'] = wcsname hdrlet.append(siphdr) # Write out header to a file as the final product if outname is not None: if outname.find('_hdr.fits') < 0: outname += '_hdr.fits' if os.path.exists(outname): print('Overwrite existing file "%s"'%outname) os.remove(outname) hdrlet.writeto(outname) print('Wrote out headerlet :',outname)
def generate_headerlet(outwcs,template,wcsname,outname=None)
Create a headerlet based on the updated HSTWCS object This function uses 'template' as the basis for the headerlet. This file can either be the original wcspars['refimage'] or wcspars['coeffsfile'], in this order of preference. If 'template' is None, then a simple Headerlet will be generated with a single SIPWCS extension and no distortion
3.252537
3.189299
1.019828
#Shift images +/- 1 in Y. for y in range(-1,2,2): if y == -1: #shift input image 1 pixel right tmpArray[0:(naxis1-1),1:(naxis2-1)] = array[0:(naxis1-1),0:(naxis2-2)] #print "Y shift = 1" else: #shift input image 1 pixel left tmpArray[0:(naxis1-1),0:(naxis2-2)] = array[0:(naxis1-1),1:(naxis2-1)] #print "Y shift = -1" #print "call _absoluteSubtract()" (tmpArray,outArray) = _absoluteSubtract(array,tmpArray,outArray) #Shift images +/- 1 in X. for x in range(-1,2,2): if x == -1: #shift input image 1 pixel right tmpArray[1:(naxis1-1),0:(naxis2-1)] = array[0:(naxis1-2),0:(naxis2-1)] #print "X shift = 1" else: #shift input image 1 pixel left tmpArray[0:(naxis1-2),0:(naxis2-1)] = array[1:(naxis1-1),0:(naxis2-1)] #print "X shift = -1" #print "call _absoluteSubtract()" (tmpArray,outArray) = _absoluteSubtract(array,tmpArray,outArray) return outArray.astype(np.float32)
def qderiv(array): # TAKE THE ABSOLUTE DERIVATIVE OF A NUMARRY OBJECT #Create 2 empty arrays in memory of the same dimensions as 'array' tmpArray = np.zeros(array.shape,dtype=np.float64) outArray = np.zeros(array.shape, dtype=np.float64) # Get the length of an array side (naxis1,naxis2) = array.shape #print "The input image size is (",naxis1,",",naxis2,")." #Main derivate loop
Take the absolute derivate of an image in memory.
1.781657
1.780073
1.00089