code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
# Initialize the random number generator seed(seedValue) # Get the contents of the table dataTable = Table.read(tableName, format='ascii.csv') numRows = len(dataTable) # Generate a sequence of integers the size of the table, and then # obtain a random subset of the sequence with no duplicate selections sequence = list(range(numRows)) subset = sample(sequence, numEntries) # Extract the subset rows... outputTable = dataTable[subset] #outputTable = dataTable[0:numEntries] # Returns the outputTable which is an Astropy Table object return(outputTable)
def randomSelectFromCSV(tableName, numEntries, seedValue)
Function to extract random entries (lines) from a CSV file Parameters ========== tableName: str Filename of the input master CSV file containing individual images or association names, as well as observational information regarding the images numEntries : int Number of entries/rows to extract from the master input CSV file seedValue : int Value used to initialize the random number generator for the selection of random entries Returns ======= outputTable : object Astropy Table object
5.15855
4.637916
1.112256
hdrwcs = wcsutil.HSTWCS(hdulist,ext=extnum) hdrwcs.filename = filename hdrwcs.expname = hdulist[extnum].header['expname'] hdrwcs.extver = hdulist[extnum].header['extver'] return hdrwcs
def get_hstwcs(filename,hdulist,extnum)
Return the HSTWCS object for a given chip.
2.894021
3.12518
0.926033
rotmat = fileutil.buildRotMatrix(delta_rot)*delta_scale new_lincd = np.dot(cdmat,rotmat) cxymat = np.array([[cx[1],cx[0]],[cy[1],cy[0]]]) new_cd = np.dot(new_lincd,cxymat) return new_cd
def update_linCD(cdmat, delta_rot=0.0, delta_scale=1.0, cx=[0.0,1.0], cy=[1.0,0.0])
Modify an existing linear CD matrix with rotation and/or scale changes and return a new CD matrix. If 'cx' and 'cy' are specified, it will return a distorted CD matrix. Only those terms which are varying need to be specified on input.
3.492619
3.988126
0.875754
cxymat = np.array([[cx[1],cx[0]],[cy[1],cy[0]]]) rotmat = fileutil.buildRotMatrix(orient)*scale/3600. new_cd = np.dot(rotmat,cxymat) return new_cd
def create_CD(orient, scale, cx=None, cy=None)
Create a (un?)distorted CD matrix from the basic inputs. The 'cx' and 'cy' parameters, if given, provide the X and Y coefficients of the distortion as returned by reading the IDCTAB. Only the first 2 elements are used and should correspond to the 'OC[X/Y]10' and 'OC[X/Y]11' terms in that order as read from the expanded SIP headers. The units of 'scale' should be 'arcseconds/pixel' of the reference pixel. The value of 'orient' should be the absolute orientation on the sky of the reference pixel.
5.592545
5.835594
0.958351
xskyh = xsky /15. xskym = (xskyh - np.floor(xskyh)) * 60. xskys = (xskym - np.floor(xskym)) * 60. yskym = (np.abs(ysky) - np.floor(np.abs(ysky))) * 60. yskys = (yskym - np.floor(yskym)) * 60. fmt = "%."+repr(precision)+"f" if isinstance(xskyh,np.ndarray): rah,dech = [],[] for i in range(len(xskyh)): rastr = repr(int(xskyh[i]))+':'+repr(int(xskym[i]))+':'+fmt%(xskys[i]) decstr = repr(int(ysky[i]))+':'+repr(int(yskym[i]))+':'+fmt%(yskys[i]) rah.append(rastr) dech.append(decstr) if verbose: print('RA = ',rastr,', Dec = ',decstr) else: rastr = repr(int(xskyh))+':'+repr(int(xskym))+':'+fmt%(xskys) decstr = repr(int(ysky))+':'+repr(int(yskym))+':'+fmt%(yskys) rah = rastr dech = decstr if verbose: print('RA = ',rastr,', Dec = ',decstr) return rah,dech
def ddtohms(xsky,ysky,verbose=False,precision=6)
Convert sky position(s) from decimal degrees to HMS format.
1.713357
1.68785
1.015112
def_scale = (wcs.pscale) / 3600. def_orientat = np.deg2rad(wcs.orientat) perfect_cd = def_scale * np.array( [[-np.cos(def_orientat),np.sin(def_orientat)], [np.sin(def_orientat),np.cos(def_orientat)]] ) return perfect_cd
def make_perfect_cd(wcs)
Create a perfect (square, orthogonal, undistorted) CD matrix from the input WCS.
2.936706
3.063397
0.958644
naxis1 = shape[1] naxis2 = shape[0] # build up arrays for pixel positions for the edges # These arrays need to be: array([(x,y),(x1,y1),...]) numpix = naxis1*2 + naxis2*2 border = np.zeros(shape=(numpix,2),dtype=np.float64) # Now determine the appropriate values for this array # We also need to account for any subarray offsets xmin = 1. xmax = naxis1 ymin = 1. ymax = naxis2 # Build range of pixel values for each side # Add 1 to make them consistent with pixel numbering in IRAF # Also include the LTV offsets to represent position in full chip # since the model works relative to full chip positions. xside = np.arange(naxis1) + xmin yside = np.arange(naxis2) + ymin #Now apply them to the array to generate the appropriate tuples #bottom _range0 = 0 _range1 = naxis1 border[_range0:_range1,0] = xside border[_range0:_range1,1] = ymin #top _range0 = _range1 _range1 = _range0 + naxis1 border[_range0:_range1,0] = xside border[_range0:_range1,1] = ymax #left _range0 = _range1 _range1 = _range0 + naxis2 border[_range0:_range1,0] = xmin border[_range0:_range1,1] = yside #right _range0 = _range1 _range1 = _range0 + naxis2 border[_range0:_range1,0] = xmax border[_range0:_range1,1] = yside edges = wcs.all_pix2world(border[:,0],border[:,1],1) return edges
def calcNewEdges(wcs, shape)
This method will compute sky coordinates for all the pixels around the edge of an image AFTER applying the geometry model. Parameters ---------- wcs : obj HSTWCS object for image shape : tuple numpy shape tuple for size of image Returns ------- border : arr array which contains the new positions for all pixels around the border of the edges in alpha,dec
3.410788
3.356396
1.016205
from . import imageObject outwcs = imageObject.WCSObject(output) outwcs.default_wcs = default_wcs outwcs.wcs = default_wcs.copy() outwcs.final_wcs = default_wcs.copy() outwcs.single_wcs = default_wcs.copy() outwcs.updateContextImage(imageObjectList[0].createContext) # # Add exptime information for use with drizzle # outwcs._exptime,outwcs._expstart,outwcs._expend = util.compute_texptime(imageObjectList) outwcs.nimages = util.countImages(imageObjectList) return outwcs
def createWCSObject(output,default_wcs,imageObjectList)
Converts a PyWCS WCS object into a WCSObject(baseImageObject) instance.
4.799067
4.913929
0.976625
original_logging_level = log.level log.setLevel(logutil.logging.WARNING) try: hdr = hdulist[extlist[0]].header wkeys = altwcs.wcskeys(hdr) if ' ' in wkeys: wkeys.remove(' ') for extn in extlist: for wkey in wkeys: if wkey == 'O': continue altwcs.deleteWCS(hdulist,extn,wkey) # Forcibly remove OPUS WCS Keywords, since deleteWCS will not do it hwcs = readAltWCS(hdulist,extn,wcskey='O') if hwcs is None: continue for k in hwcs.keys(): if k not in ['DATE-OBS','MJD-OBS'] and k in hdr: try: del hdr[k] except KeyError: pass except: raise finally: log.setLevel(original_logging_level)
def removeAllAltWCS(hdulist,extlist)
Removes all alternate WCS solutions from the header
4.05561
4.061349
0.998587
if not isinstance(imageObjectList,list): imageObjectList = [imageObjectList] output_wcs.restoreWCS() updateImageWCS(imageObjectList, output_wcs)
def restoreDefaultWCS(imageObjectList, output_wcs)
Restore WCS information to default values, and update imageObject accordingly.
3.524223
3.663812
0.961901
if hasattr(x, '__iter__'): rx = np.empty_like(x) m = x >= 0.0 rx[m] = np.floor(x[m] + 0.5) m = np.logical_not(m) rx[m] = np.ceil(x[m] - 0.5) return rx else: if x >= 0.0: return np.floor(x + 0.5) else: return np.ceil(x - 0.5)
def _py2round(x)
This function returns a rounded up value of the argument, similar to Python 2.
1.799015
1.951771
0.921734
drizwcs[0] = inwcs.crpix[0] drizwcs[1] = inwcs.crval[0] drizwcs[2] = inwcs.crpix[1] drizwcs[3] = inwcs.crval[1] drizwcs[4] = inwcs.cd[0][0] drizwcs[5] = inwcs.cd[1][0] drizwcs[6] = inwcs.cd[0][1] drizwcs[7] = inwcs.cd[1][1] return drizwcs
def convertWCS(inwcs,drizwcs)
Copy WCSObject WCS into Drizzle compatible array.
1.38794
1.354349
1.024802
crpix = np.array([drizwcs[0],drizwcs[2]], dtype=np.float64) crval = np.array([drizwcs[1],drizwcs[3]], dtype=np.float64) cd = np.array([[drizwcs[4],drizwcs[6]],[drizwcs[5],drizwcs[7]]], dtype=np.float64) inwcs.cd = cd inwcs.crval = crval inwc.crpix = crpix inwcs.pscale = N.sqrt(N.power(inwcs.cd[0][0],2)+N.power(inwcs.cd[1][0],2)) * 3600. inwcs.orient = N.arctan2(inwcs.cd[0][1],inwcs.cd[1][1]) * 180./N.pi
def updateWCS(drizwcs,inwcs)
Copy output WCS array from Drizzle into WCSObject.
1.968554
1.965008
1.001805
# Define objects that we need to use for the fit... #in_refpix = img_geom.model.refpix wmap = WCSMap(img_wcs,ref_wcs) cx, cy = coeff_converter.sip2idc(img_wcs) # Convert the RA/Dec positions back to X/Y in output product image #_cpix_xyref = np.zeros((4,2),dtype=np.float64) # Start by setting up an array of points +/-0.5 pixels around CRVAL1,2 # However, we must shift these positions by 1.0pix to match what # drizzle will use as its reference position for 'align=center'. _cpix = (img_wcs.wcs.crpix[0],img_wcs.wcs.crpix[1]) _cpix_arr = np.array([_cpix,(_cpix[0],_cpix[1]+1.), (_cpix[0]+1.,_cpix[1]+1.),(_cpix[0]+1.,_cpix[1])], dtype=np.float64) # Convert these positions to RA/Dec _cpix_rd = wmap.xy2rd(img_wcs,_cpix_arr[:,0],_cpix_arr[:,1]) #for pix in xrange(len(_cpix_rd[0])): _cpix_xref,_cpix_yref = wmap.rd2xy(ref_wcs,_cpix_rd[0],_cpix_rd[1]) _cpix_xyref = np.zeros((4,2),dtype=np.float64) _cpix_xyref[:,0] = _cpix_xref _cpix_xyref[:,1] = _cpix_yref offx, offy = (0.0,0.0) # Now, apply distortion model to input image XY positions #_cpix_xyc = np.zeros((4,2),dtype=np.float64) _cpix_xyc = utils.apply_idc(_cpix_arr, cx, cy, img_wcs.wcs.crpix, img_wcs.pscale, order=1) # Need to get the XDELTA,YDELTA values included here in order to get this # to work with MDTng. #if in_refpix: # _cpix_xyc += (in_refpix['XDELTA'], in_refpix['YDELTA']) # Perform a fit between: # - undistorted, input positions: _cpix_xyc # - X/Y positions in reference frame: _cpix_xyref abxt,cdyt = fitlin(_cpix_xyc,_cpix_xyref) # This correction affects the final fit when you are fitting # a WCS to itself (no distortion coeffs), so it needs to be # taken out in the coeffs file by modifying the zero-point value. # WJH 17-Mar-2005 abxt[2] -= ref_wcs.wcs.crpix[0] + offx cdyt[2] -= ref_wcs.wcs.crpix[1] + offy return abxt,cdyt
def wcsfit(img_wcs, ref_wcs)
Perform a linear fit between 2 WCS for shift, rotation and scale. Based on the WCSLIN function from 'drutil.f'(Drizzle V2.9) and modified to allow for differences in reference positions assumed by PyDrizzle's distortion model and the coeffs used by 'drizzle'. Parameters ---------- img : obj ObsGeometry instance for input image ref_wcs : obj Undistorted WCSObject instance for output frame
5.055343
4.924044
1.026665
# Initialize variables _mat = np.zeros((3,3),dtype=np.float64) _xorg = imgarr[0][0] _yorg = imgarr[0][1] _xoorg = refarr[0][0] _yoorg = refarr[0][1] _sigxox = 0. _sigxoy = 0. _sigxo = 0. _sigyox = 0. _sigyoy = 0. _sigyo = 0. _npos = len(imgarr) # Populate matrices for i in range(_npos): _mat[0][0] += np.power((imgarr[i][0] - _xorg),2) _mat[0][1] += (imgarr[i][0] - _xorg) * (imgarr[i][1] - _yorg) _mat[0][2] += (imgarr[i][0] - _xorg) _mat[1][1] += np.power((imgarr[i][1] - _yorg),2) _mat[1][2] += imgarr[i][1] - _yorg _sigxox += (refarr[i][0] - _xoorg)*(imgarr[i][0] - _xorg) _sigxoy += (refarr[i][0] - _xoorg)*(imgarr[i][1] - _yorg) _sigxo += refarr[i][0] - _xoorg _sigyox += (refarr[i][1] - _yoorg)*(imgarr[i][0] -_xorg) _sigyoy += (refarr[i][1] - _yoorg)*(imgarr[i][1] - _yorg) _sigyo += refarr[i][1] - _yoorg _mat[2][2] = _npos _mat[1][0] = _mat[0][1] _mat[2][0] = _mat[0][2] _mat[2][1] = _mat[1][2] # Now invert this matrix _mat = linalg.inv(_mat) _a = _sigxox*_mat[0][0]+_sigxoy*_mat[0][1]+_sigxo*_mat[0][2] _b = -1*(_sigxox*_mat[1][0]+_sigxoy*_mat[1][1]+_sigxo*_mat[1][2]) #_x0 = _sigxox*_mat[2][0]+_sigxoy*_mat[2][1]+_sigxo*_mat[2][2] _c = _sigyox*_mat[1][0]+_sigyoy*_mat[1][1]+_sigyo*_mat[1][2] _d = _sigyox*_mat[0][0]+_sigyoy*_mat[0][1]+_sigyo*_mat[0][2] #_y0 = _sigyox*_mat[2][0]+_sigyoy*_mat[2][1]+_sigyo*_mat[2][2] _xt = _xoorg - _a*_xorg+_b*_yorg _yt = _yoorg - _d*_xorg-_c*_yorg return [_a,_b,_xt],[_c,_d,_yt]
def fitlin(imgarr,refarr)
Compute the least-squares fit between two arrays. A Python translation of 'FITLIN' from 'drutil.f' (Drizzle V2.9).
1.622593
1.627256
0.997135
mu = uv[:,0].mean() mv = uv[:,1].mean() mx = xy[:,0].mean() my = xy[:,1].mean() u = uv[:,0] - mu v = uv[:,1] - mv x = xy[:,0] - mx y = xy[:,1] - my Sxx = np.dot(x,x) Syy = np.dot(y,y) Sux = np.dot(u,x) Suy = np.dot(u,y) Svx = np.dot(v,x) Svy = np.dot(v,y) # implement parity check if (np.dot(Sux,Svy) > 0): p = 1 else: p = -1 XX = p*Sux + Svy YY = Suy - p*Svx # derive output values theta_deg = np.rad2deg(np.arctan2(YY,XX))% 360.0 scale = np.sqrt(XX**2 + YY**2) / (Sxx+Syy) shift = (mu-mx,mv-my) if verbose: print('Linear RSCALE fit: rotation = ',theta_deg,' scale = ',scale,' offset = ',shift) coeffs = scale * fileutil.buildRotMatrix(-theta_deg) P = [coeffs[0,0],coeffs[0,1],shift[0]] Q = [coeffs[1,1],coeffs[1,0],shift[1]] return P,Q
def fitlin_rscale(xy,uv,verbose=False)
Performs a linear, orthogonal fit between matched lists of positions 'xy' (input) and 'uv' (output). Output: (same as for fit_arrays_general)
2.82526
2.885547
0.979107
fitting_funcs = {'rscale':fitlin_rscale,'general':fitlin} # Get the fitting function to be used fit_func = fitting_funcs[mode.lower()] # Perform the initial fit P,Q = fit_func(xy,uv) xyc = apply_fitlin(xy,P,Q) # compute residuals from fit for input positions dx = uv[:,0] - xyc[0] dy = uv[:,1] - xyc[1] fit_rms = [dx.std(),dy.std()] if nclip > 0: data = xy.copy() outdata = uv.copy() numclipped = 0 for i in range(nclip): iterclipped = 0 xyc = apply_fitlin(data,P,Q) # compute residuals from fit for input positions dx = outdata[:,0] - xyc[0] dy = outdata[:,1] - xyc[1] # find indices of outliers in x and y xout = np.where(np.abs(dx - dx.mean()) > reject*dx.std()) yout = np.where(np.abs(dy - dy.mean()) > reject*dy.std()) # concatenate those indices and sort them outliers_indx = xout[0].tolist()+yout[0].tolist() outliers_indx.sort() # define the full range of indices for the data points left full_indx = list(range(data.shape[0])) # remove all unique indices specified in outliers from full range for o in outliers_indx: # only remove if it has not been removed already # accounts for the same point being an outlier in both x and y if full_indx.count(o) > 0: full_indx.remove(o) iterclipped += 1 if iterclipped == 0: break numclipped += iterclipped if verbose: print('Removed a total of ',numclipped,' points through iteration ',i+1) # create clipped data data_iter = np.zeros([len(full_indx),2],dtype=data.dtype) if verbose: print('Iter #',i+1,' data:',data.shape,data_iter.shape,len(full_indx)) data_iter[:,0] = data[:,0][full_indx] data_iter[:,1] = data[:,1][full_indx] outdata_iter = np.zeros([len(full_indx),2],dtype=data.dtype) outdata_iter[:,0] = outdata[:,0][full_indx] outdata_iter[:,1] = outdata[:,1][full_indx] # perform the fit again with the clipped data and go to the next iteration data = data_iter outdata = outdata_iter P,Q = fit_func(data,outdata) # compute residuals from fit for input positions xyc = apply_fitlin(data,P,Q) dx = outdata[:,0] - xyc[0] dy = outdata[:,1] - xyc[1] fit_rms = [dx.std(),dy.std()] if verbose: print('Fit clipped ',numclipped,' points over ',nclip,' iterations.') return P,Q,fit_rms
def fitlin_clipped(xy,uv,verbose=False,mode='rscale',nclip=3,reject=3)
Perform a clipped fit based on the number of iterations and rejection limit (in sigma) specified by the user. This will more closely replicate the results obtained by 'geomap' using 'maxiter' and 'reject' parameters.
2.617204
2.608327
1.003403
if isinstance(fobj, str): fobj = fits.open(fobj, memmap=False) hdr = altwcs._getheader(fobj, ext) try: original_logging_level = log.level log.setLevel(logutil.logging.WARNING) nwcs = pywcs.WCS(hdr, fobj=fobj, key=wcskey) except KeyError: if verbose: print('readAltWCS: Could not read WCS with key %s' % wcskey) print(' Skipping %s[%s]' % (fobj.filename(), str(ext))) return None finally: log.setLevel(original_logging_level) # restore original logging level hwcs = nwcs.to_header() if nwcs.wcs.has_cd(): hwcs = altwcs.pc2cd(hwcs, key=wcskey) return hwcs
def readAltWCS(fobj, ext, wcskey=' ', verbose=False)
Reads in alternate primary WCS from specified extension. Parameters ---------- fobj : str, `astropy.io.fits.HDUList` fits filename or fits file object containing alternate/primary WCS(s) to be converted wcskey : str [" ",A-Z] alternate/primary WCS key that will be replaced by the new key ext : int fits extension number Returns ------- hdr: fits.Header header object with ONLY the keywords for specified alternate WCS
3.657502
3.969568
0.921385
# This matches WTRAXY results to better than 1e-4 pixels. skyx,skyy = self.input.all_pix2world(pixx,pixy,self.origin) result= self.output.wcs_world2pix(skyx,skyy,self.origin) return result
def forward(self,pixx,pixy)
Transform the input pixx,pixy positions in the input frame to pixel positions in the output frame. This method gets passed to the drizzle algorithm.
11.209837
12.254428
0.914758
skyx,skyy = self.output.wcs_pix2world(pixx,pixy,self.origin) result = self.input.all_world2pix(skyx,skyy,self.origin) return result
def backward(self,pixx,pixy)
Transform pixx,pixy positions from the output frame back onto their original positions in the input frame.
5.615163
4.784395
1.173641
if input is not None: inputDict["static_sig"]=static_sig inputDict["group"]=group inputDict["updatewcs"]=False inputDict["input"]=input else: print >> sys.stderr, "Please supply an input image\n" raise ValueError #this accounts for a user-called init where config is not defined yet configObj = util.getDefaultConfigObj(__taskname__,configObj,inputDict,loadOnly=(not editpars)) if configObj is None: return if not editpars: run(configObj)
def createMask(input=None, static_sig=4.0, group=None, editpars=False, configObj=None, **inputDict)
The user can input a list of images if they like to create static masks as well as optional values for static_sig and inputDict. The configObj.cfg file will set the defaults and then override them with the user options.
6.21915
7.024632
0.885335
suffix = buildSignatureKey(signature) filename = os.path.join('.', suffix) return filename
def constructFilename(signature)
Construct an output filename for the given signature:: signature=[instr+detector,(nx,ny),detnum] The signature is in the image object.
9.327806
16.356258
0.57029
numchips=imagePtr._numchips log.info("Computing static mask:\n") chips = imagePtr.group if chips is None: chips = imagePtr.getExtensions() #for chip in range(1,numchips+1,1): for chip in chips: chipid=imagePtr.scienceExt + ','+ str(chip) chipimage=imagePtr.getData(chipid) signature=imagePtr[chipid].signature # If this is a new signature, create a new Static Mask file which is empty # only create a new mask if one doesn't already exist if ((signature not in self.masklist) or (len(self.masklist) == 0)): self.masklist[signature] = self._buildMaskArray(signature) maskname = constructFilename(signature) self.masknames[signature] = maskname else: chip_sig = buildSignatureKey(signature) for s in self.masknames: if chip_sig in self.masknames[s]: maskname = self.masknames[s] break imagePtr[chipid].outputNames['staticMask'] = maskname stats = ImageStats(chipimage,nclip=3,fields='mode') mode = stats.mode rms = stats.stddev nbins = len(stats.histogram) del stats log.info(' mode = %9f; rms = %7f; static_sig = %0.2f' % (mode, rms, self.static_sig)) if nbins >= 2: # only combine data from new image if enough data to mask sky_rms_diff = mode - (self.static_sig*rms) np.bitwise_and(self.masklist[signature], np.logical_not(np.less(chipimage, sky_rms_diff)), self.masklist[signature]) del chipimage
def addMember(self, imagePtr=None)
Combines the input image with the static mask that has the same signature. Parameters ---------- imagePtr : object An imageObject reference Notes ----- The signature parameter consists of the tuple:: (instrument/detector, (nx,ny), chip_id) The signature is defined in the image object for each chip
6.532228
6.067286
1.076631
if signature in self.masklist: mask = self.masklist[signature] else: mask = None return mask
def getMaskArray(self, signature)
Returns the appropriate StaticMask array for the image.
4.085654
3.854501
1.05997
filename=constructFilename(signature) if(fileutil.checkFileExists(filename)): return filename else: print("\nmMask file for ", str(signature), " does not exist on disk", file=sys.stderr) return None
def getFilename(self,signature)
Returns the name of the output mask file that should reside on disk for the given signature.
9.947441
8.6493
1.150086
for key in self.masklist.keys(): self.masklist[key] = None self.masklist = {}
def close(self)
Deletes all static mask objects.
6.956882
3.760504
1.849987
if signature in self.masklist: self.masklist[signature] = None else: log.warning("No matching mask")
def deleteMask(self,signature)
Delete just the mask that matches the signature given.
5.399329
5.18045
1.042251
virtual = imageObjectList[0].inmemory for key in self.masklist.keys(): #check to see if the file already exists on disk filename = self.masknames[key] #create a new fits image with the mask array and a standard header #open a new header and data unit newHDU = fits.PrimaryHDU() newHDU.data = self.masklist[key] if virtual: for img in imageObjectList: img.saveVirtualOutputs({filename:newHDU}) else: try: newHDU.writeto(filename, overwrite=True) log.info("Saving static mask to disk: %s" % filename) except IOError: log.error("Problem saving static mask file: %s to " "disk!\n" % filename) raise IOError
def saveToFile(self,imageObjectList)
Saves the static mask to a file it uses the signatures associated with each mask to contruct the filename for the output mask image.
6.138467
5.700214
1.076884
if (shape[0] % image.shape[0]) or (shape[1] % image.shape[1]): raise ValueError("Output shape must be an integer multiple of input " "image shape.") sx = shape[1] // image.shape[1] sy = shape[0] // image.shape[0] ox = (sx - 1.0) / (2.0 * sx) oy = (sy - 1.0) / (2.0 * sy) # generate output coordinates: y, x = np.indices(shape, dtype=np.float) x = x / sx - ox y = y / sy - oy # interpolate: return bilinear_interp(image, x, y)
def expand_image(image, shape)
Expand image from original shape to requested shape. Output shape must be an integer multiple of input image shape for each axis.
2.663307
2.434038
1.094193
x = np.asarray(x) y = np.asarray(y) if x.shape != y.shape: raise ValueError("X- and Y-coordinates must have identical shapes.") out_shape = x.shape out_size = x.size x = x.ravel() y = y.ravel() x0 = np.empty(out_size, dtype=np.int) y0 = np.empty(out_size, dtype=np.int) np.clip(x, 0, data.shape[1] - 2, out=x0) np.clip(y, 0, data.shape[0] - 2, out=y0) x1 = x0 + 1 y1 = y0 + 1 f00 = data[(y0, x0)] f10 = data[(y1, x0)] f01 = data[(y0, x1)] f11 = data[(y1, x1)] w00 = (x1 - x) * (y1 - y) w10 = (x1 - x) * (y - y0) w01 = (x - x0) * (y1 - y) w11 = (x - x0) * (y - y0) interp = w00 * f00 + w10 * f10 + w01 * f01 + w11 * f11 return interp.reshape(out_shape).astype(data.dtype.type)
def bilinear_interp(data, x, y)
Interpolate input ``data`` at "pixel" coordinates ``x`` and ``y``.
1.556594
1.534679
1.01428
sci_chip = self._image[self.scienceExt,chip] exten = self.errExt+','+str(chip) # The keyword for STIS flat fields in the primary header of the flt lflatfile = fileutil.osfn(self._image["PRIMARY"].header['LFLTFILE']) pflatfile = fileutil.osfn(self._image["PRIMARY"].header['PFLTFILE']) # Try to open the file in the location specified by LFLTFILE. try: handle = fileutil.openImage(lflatfile, mode='readonly', memmap=False) hdu = fileutil.getExtn(handle,extn=exten) lfltdata = hdu.data if lfltdata.shape != self.full_shape: lfltdata = expand_image(lfltdata, self.full_shape) except IOError: lfltdata = np.ones(self.full_shape, dtype=sci_chip.data.dtype) print("Cannot find file '{:s}'. Treating flatfield constant value " "of '1'.\n".format(lflatfile)) # Try to open the file in the location specified by PFLTFILE. try: handle = fileutil.openImage(pflatfile, mode='readonly', memmap=False) hdu = fileutil.getExtn(handle,extn=exten) pfltdata = hdu.data except IOError: pfltdata = np.ones(self.full_shape, dtype=sci_chip.data.dtype) print("Cannot find file '{:s}'. Treating flatfield constant value " "of '1'.\n".format(pflatfile)) flat = lfltdata * pfltdata return flat
def getflat(self, chip)
Method for retrieving a detector's flat field. For STIS there are three. This method will return an array the same shape as the image.
3.322961
3.131629
1.061097
sci_chip = self._image[self.scienceExt,chip] ny=sci_chip._naxis1 nx=sci_chip._naxis2 detnum = sci_chip.detnum instr=self._instrument sig=(instr+self._detector,(nx,ny),int(detnum)) #signature is a tuple sci_chip.signature=sig
def _assignSignature(self, chip)
Assign a unique signature for the image based on the instrument, detector, chip, and size this will be used to uniquely identify the appropriate static mask for the image. This also records the filename for the static mask to the outputNames dictionary.
11.006943
8.609087
1.278526
if self.proc_unit == 'native': return self._rdnoise / self._gain() return self._rdnoise
def getReadNoise(self)
Method for returning the readnoise of a detector (in DN). :units: DN This should work on a chip, since different chips to be consistant with other detector classes where different chips have different gains.
13.272764
12.244451
1.083982
pri_header = self._image[0].header if self._isNotValid (instrpars['gain'], instrpars['gnkeyword']): instrpars['gnkeyword'] = 'ATODGAIN' if self._isNotValid (instrpars['rdnoise'], instrpars['rnkeyword']): instrpars['rnkeyword'] = 'READNSE' if self._isNotValid (instrpars['exptime'], instrpars['expkeyword']): instrpars['expkeyword'] = 'EXPTIME' for chip in self.returnAllChips(extname=self.scienceExt): chip._gain = self.getInstrParameter(instrpars['gain'], pri_header, instrpars['gnkeyword']) chip._rdnoise = self.getInstrParameter(instrpars['rdnoise'], pri_header, instrpars['rnkeyword']) chip._exptime = self.getInstrParameter(instrpars['exptime'], chip.header, instrpars['expkeyword']) if chip._gain is None or chip._rdnoise is None or chip._exptime is None: print('ERROR: invalid instrument task parameter') raise ValueError chip._effGain = chip._gain self._assignSignature(chip._chip) #this is used in the static mask self.doUnitConversions()
def setInstrumentParameters(self, instrpars)
This method overrides the superclass to set default values into the parameter dictionary, in case empty entries are provided.
4.221634
4.192528
1.006942
for det in range(1,self._numchips+1,1): chip=self._image[self.scienceExt,det] conversionFactor = self.effGain chip._gain = self.effGain #1. chip.effGain = self.effGain chip._conversionFactor = conversionFactor
def doUnitConversions(self)
Convert the data to electrons. This converts all science data extensions and saves the results back to disk. We need to make sure the data inside the chips already in memory is altered as well.
14.682862
10.453593
1.404576
pri_header = self._image[0].header usingDefaultGain = False usingDefaultReadnoise = False if self._isNotValid (instrpars['gain'], instrpars['gnkeyword']): instrpars['gnkeyword'] = None if self._isNotValid (instrpars['rdnoise'], instrpars['rnkeyword']): instrpars['rnkeyword'] = None if self._isNotValid (instrpars['exptime'], instrpars['expkeyword']): instrpars['expkeyword'] = 'EXPTIME' for chip in self.returnAllChips(extname=self.scienceExt): #pri_header=chip.header #stis stores stuff in the science data header chip.cte_dir=0 chip._exptime = self.getInstrParameter( instrpars['exptime'], chip.header, instrpars['expkeyword'] ) if chip._exptime is None: print('ERROR: invalid instrument task parameter') raise ValueError if instrpars['rnkeyword'] is not None: chip._rdnoise = self.getInstrParameter( instrpars['rdnoise'], pri_header, instrpars['rnkeyword'] ) else: chip._rdnoise = None usingDefaultReadnoise = True if instrpars['gnkeyword'] is not None: chip._gain = self.getInstrParameter( instrpars['gain'], pri_header, instrpars['gnkeyword'] ) else: chip._gain = None usingDefaultGain = True if chip._exptime is None: print('ERROR: invalid instrument task parameter') raise ValueError # We need to determine if the user has used the default readnoise/gain value # since if not, they will need to supply a gain/readnoise value as well if usingDefaultReadnoise: chip._rdnoise= self._setMAMADefaultReadnoise() if usingDefaultGain: chip._gain = self._setMAMADefaultGain() self._assignSignature(chip._chip) #this is used in the static mask chip._effGain=chip._gain # Convert the science data to electrons if specified by the user. self.doUnitConversions()
def setInstrumentParameters(self, instrpars)
This method overrides the superclass to set default values into the parameter dictionary, in case empty entries are provided.
4.384194
4.369817
1.00329
log.info('Dataset ' + filename + ' has (keyword = value) of (' + key + ' = ' + str(value) + ').') if msg == Messages.NOPROC.value: log.info('Dataset cannot be aligned.') else: log.info('Dataset can be aligned, but the result may be compromised.')
def generate_msg(filename, msg, key, value)
Generate a message for the output log indicating the file/association will not be processed as the characteristics of the data are known to be inconsistent with alignment.
9.697117
8.123498
1.193712
log.debug(inputDict) inputDict["input"] = input configObj = util.getDefaultConfigObj(__taskname__, configObj, inputDict, loadOnly=(not editpars)) if configObj is None: return if not editpars: run(configObj)
def drizCR(input=None, configObj=None, editpars=False, **inputDict)
Look for cosmic rays.
6.224267
6.312708
0.98599
# Remove the existing cor file if it exists if os.path.isfile(outfile): os.remove(outfile) print("Removing old corr file: '{:s}'".format(outfile)) with fits.open(template, memmap=False) as ftemplate: for arr in arrlist: ftemplate[arr['sciext']].data = arr['corrFile'] if arr['dqext'][0] != arr['sciext'][0]: ftemplate[arr['dqext']].data = arr['dqMask'] ftemplate.writeto(outfile) print("Created CR corrected file: '{:s}'".format(outfile))
def createCorrFile(outfile, arrlist, template)
Create a _cor file with the same format as the original input image. The DQ array will be replaced with the mask array used to create the _cor file.
3.395052
3.415493
0.994015
paramDict = { 'gain': 7, # Detector gain, e-/ADU 'grow': 1, # Radius around CR pixel to mask [default=1 for # 3x3 for non-NICMOS] 'ctegrow': 0, # Length of CTE correction to be applied 'rn': 5, # Read noise in electrons 'snr': '4.0 3.0', # Signal-to-noise ratio 'scale': '0.5 0.4', # scaling factor applied to the derivative 'backg': 0, # Background value 'expkey': 'exptime' # exposure time keyword } if len(configObj) > 0: for key in configObj: paramDict[key] = configObj[key] return paramDict
def setDefaults(configObj={})
Return a dictionary of the default parameters which also been updated with the user overrides.
7.417766
7.435336
0.997637
helpstr = getHelpAsString(docstring=True, show_ver=True) if file is None: print(helpstr) else: with open(file, mode='w') as f: f.write(helpstr)
def help(file=None)
Print out syntax help for running ``astrodrizzle`` Parameters ---------- file : str (Default = None) If given, write out help to the filename specified by this parameter Any previously existing file with this name will be deleted before writing out the help.
3.809956
4.624146
0.823926
install_dir = os.path.dirname(__file__) taskname = util.base_taskname(__taskname__, __package__) htmlfile = os.path.join(install_dir, 'htmlhelp', taskname + '.html') helpfile = os.path.join(install_dir, taskname + '.help') if docstring or (not docstring and not os.path.exists(htmlfile)): if show_ver: helpString = "\n{:s} Version {:s} updated on {:s}\n\n".format( __taskname__, __version__, __version_date__ ) else: helpString = '' if os.path.exists(helpfile): helpString += teal.getHelpFileAsString(taskname, __file__) elif __doc__ is not None: helpString += __doc__ + os.linesep else: helpString = 'file://' + htmlfile return helpString
def getHelpAsString(docstring=False, show_ver=True)
Return useful help from a file in the script directory called ``__taskname__.help``
3.230413
3.002516
1.075902
from . import imageObject if outwcs is None: output_mem = 0 else: if isinstance(outwcs,imageObject.WCSObject): owcs = outwcs.final_wcs else: owcs = outwcs output_mem = np.prod(owcs.pixel_shape) * 4 * 3 # bytes used for output arrays img1 = imageObjectList[0] numchips = 0 input_mem = 0 for img in imageObjectList: numchips += img._nmembers # account for group parameter set by user # if we have the cpus and s/w, ok, but still allow user to set pool size pool_size = util.get_pool_size(num_cores, None) pool_size = pool_size if (numchips >= pool_size) else numchips inimg = 0 chip_mem = 0 for img in imageObjectList: for chip in range(1,img._numchips+1): cmem = img[chip].shape[0]*img[chip].shape[1]*4 inimg += 1 if inimg < pool_size: input_mem += cmem*2 if chip_mem == 0: chip_mem = cmem max_mem = (input_mem + output_mem*pool_size + chip_mem*2)//(1024*1024) print('*'*80) print('*') print('* Estimated memory usage: up to %d Mb.'%(max_mem)) print('* Output image size: {:d} X {:d} pixels. '.format(*owcs.pixel_shape)) print('* Output image file: ~ %d Mb. '%(output_mem//(1024*1024))) print('* Cores available: %d'%(pool_size)) print('*') print('*'*80) if interactive: print('Continue with processing?') while True: if sys.version_info[0] >= 3: k = input("(y)es or (n)o").strip()[0].lower() else: k = raw_input("(y)es or (n)o").strip()[0].lower() if k not in ['n', 'y']: continue if k == 'n': raise KeyboardInterrupt("Execution aborted")
def reportResourceUsage(imageObjectList, outwcs, num_cores, interactive=False)
Provide some information to the user on the estimated resource usage (primarily memory) for this run.
3.942587
3.957891
0.996133
filelist,output,ivmlist,oldasndict=processFilenames(input,None) try: mdrizdict = mdzhandler.getMdriztabParameters(filelist) except KeyError: print('No MDRIZTAB found for "%s". Parameters remain unchanged.'%(filelist[0])) mdrizdict = {} return mdrizdict
def getMdriztabPars(input)
High-level function for getting the parameters from MDRIZTAB Used primarily for TEAL interface.
12.143699
12.533627
0.968889
if ivmlist is None: return for img,ivmname in zip(imageObjectList,ivmlist): img.updateIVMName(ivmname)
def addIVMInputs(imageObjectList,ivmlist)
Add IVM filenames provided by user to outputNames dictionary for each input imageObject.
4.236021
3.72262
1.137914
f,i,o,a=buildFileList(input) return len(f) > 1
def checkMultipleFiles(input)
Evaluates the input to determine whether there is 1 or more than 1 valid input file.
15.141136
12.730499
1.189359
imageObjList = [] mtflag = False mt_refimg = None for img in files: image = _getInputImage(img,group=group) image.setInstrumentParameters(instrpars) image.compute_wcslin(undistort=undistort) if 'MTFLAG' in image._image['PRIMARY'].header: # check to see whether we are dealing with moving target observations... _keyval = image._image['PRIMARY'].header['MTFLAG'] if not util.is_blank(_keyval): if isinstance(_keyval,bool): mtflag = _keyval else: if 'T' in _keyval: mtflag = True else: mtflag = False else: mtflag = False if mtflag: print("#####\nProcessing Moving Target Observations using reference image as WCS for all inputs!\n#####\n") if mt_refimg is None: mt_refimg = image else: image.set_mt_wcs(mt_refimg) image.inmemory = inmemory # set flag for inmemory processing # Now add (possibly updated) image object to list imageObjList.append(image) return imageObjList
def createImageObjectList(files,instrpars,group=None, undistort=True, inmemory=False)
Returns a list of imageObject instances, 1 for each input image in the list of input filenames.
4.634935
4.541226
1.020635
# extract primary header and SCI,1 header from input image sci_ext = 'SCI' if group in [None,'']: exten = '[sci,1]' phdu = fits.getheader(input, memmap=False) else: # change to use fits more directly here? if group.find(',') > 0: grp = group.split(',') if grp[0].isalpha(): grp = (grp[0],int(grp[1])) else: grp = int(grp[0]) else: grp = int(group) phdu = fits.getheader(input, memmap=False) phdu.extend(fits.getheader(input, ext=grp, memmap=False)) # Extract the instrument name for the data that is being processed by Multidrizzle _instrument = phdu['INSTRUME'] # Determine the instrument detector in use. NICMOS is a special case because it does # not use the 'DETECTOR' keyword. It instead used 'CAMERA' to identify which of it's # 3 camera's is in use. All other instruments support the 'DETECTOR' keyword. if _instrument == 'NICMOS': _detector = phdu['CAMERA'] else: try: _detector = phdu['DETECTOR'] except KeyError: # using the phdu as set above (fits.getheader) is MUCH faster and # works for the majority of data; but fileutil handles waivered fits phdu = fileutil.getHeader(input+exten) _detector = phdu['DETECTOR'] # if this fails, let it throw del phdu # just to keep clean # Match up the instrument and detector with the right class # only importing the instrument modules as needed. try: if _instrument == 'ACS': from . import acsData if _detector == 'HRC': return acsData.HRCInputImage(input,group=group) if _detector == 'WFC': return acsData.WFCInputImage(input,group=group) if _detector == 'SBC': return acsData.SBCInputImage(input,group=group) if _instrument == 'NICMOS': from . import nicmosData if _detector == 1: return nicmosData.NIC1InputImage(input) if _detector == 2: return nicmosData.NIC2InputImage(input) if _detector == 3: return nicmosData.NIC3InputImage(input) if _instrument == 'WFPC2': from . import wfpc2Data return wfpc2Data.WFPC2InputImage(input,group=group) if _instrument == 'STIS': from . import stisData if _detector == 'CCD': return stisData.CCDInputImage(input,group=group) if _detector == 'FUV-MAMA': return stisData.FUVInputImage(input,group=group) if _detector == 'NUV-MAMA': return stisData.NUVInputImage(input,group=group) if _instrument == 'WFC3': from . import wfc3Data if _detector == 'UVIS': return wfc3Data.WFC3UVISInputImage(input,group=group) if _detector == 'IR': return wfc3Data.WFC3IRInputImage(input,group=group) except ImportError: msg = 'No module implemented for '+str(_instrument)+'!' raise ValueError(msg) # If a supported instrument is not detected, print the following error message # and raise an exception. msg = 'Instrument: ' + str(_instrument) + '/' + str(_detector) + ' not yet supported!' raise ValueError(msg)
def _getInputImage (input,group=None)
Factory function to return appropriate imageObject class instance
3.497454
3.480979
1.004733
ivmlist = None oldasndict = None if input is None: print("No input files provided to processInput") raise ValueError if not isinstance(input, list) and ('_asn' in input or '_asc' in input): # Input is an association table # Get the input files, and run makewcs on them oldasndict = asnutil.readASNTable(input, prodonly=infilesOnly) if not infilesOnly: if output in ["",None,"None"]: output = oldasndict['output'].lower() # insure output name is lower case asnhdr = fits.getheader(input, memmap=False) # Only perform duplication check if not already completed... dupcheck = asnhdr.get('DUPCHECK',default="PERFORM") == "PERFORM" #filelist = [fileutil.buildRootname(fname) for fname in oldasndict['order']] filelist = buildASNList(oldasndict['order'],input,check_for_duplicates=dupcheck) elif (not isinstance(input, list)) and \ (input[0] == '@') : # input is an @ file f = open(input[1:]) # Read the first line in order to determine whether # IVM files have been specified in a second column... line = f.readline() f.close() # Parse the @-file with irafglob to extract the input filename filelist = irafglob.irafglob(input, atfile=util.atfile_sci) # If there is a second column... if len(line.split()) == 2: # ...parse out the names of the IVM files as well ivmlist = irafglob.irafglob(input, atfile=util.atfile_ivm) if output in ['',None,"None"]: if len(filelist) == 1: output = fileutil.buildNewRootname(filelist[0]) else: output = 'final' else: #input is a string or a python list try: filelist, output = parseinput.parseinput(input, outputname=output) if output in ['',None,"None"]: if len(filelist) == 1: output = fileutil.buildNewRootname(filelist[0]) else: output = 'final' if not isinstance(input, list): filelist.sort() except IOError: raise # sort the list of input files # this ensures the list of input files has the same order on all platforms # it can have ifferent order because listdir() uses inode order, not unix type order #filelist.sort() return filelist, output, ivmlist, oldasndict
def processFilenames(input=None,output=None,infilesOnly=False)
Process the input string which contains the input file information and return a filelist,output
5.938079
5.819006
1.020463
newfilelist, ivmlist, output, oldasndict, origflist = buildFileListOrig( input, output=output, ivmlist=ivmlist, wcskey=wcskey, updatewcs=updatewcs, **workinplace) if not newfilelist: buildEmptyDRZ(input, output) return None, None, output # run all WCS updating -- Now done in buildFileList #pydr_input = _process_input_wcs(newfilelist, wcskey, updatewcs) pydr_input = newfilelist # AsnTable will handle the case when output==None if not oldasndict:# and output is not None: oldasndict = asnutil.ASNTable(pydr_input, output=output) oldasndict.create() asndict = update_member_names(oldasndict, pydr_input) asndict['original_file_names'] = origflist # Build output filename drz_extn = '_drz.fits' for img in newfilelist: # special case logic to automatically recognize when _flc.fits files # are provided as input and produce a _drc.fits file instead if '_flc.fits' in img: drz_extn = '_drc.fits' break if output in [None,'']: output = fileutil.buildNewRootname(asndict['output'], extn=drz_extn) else: if '.fits' in output.lower(): pass elif drz_extn[:4] not in output.lower(): output = fileutil.buildNewRootname(output, extn=drz_extn) log.info('Setting up output name: %s' % output) return asndict, ivmlist, output
def process_input(input, output=None, ivmlist=None, updatewcs=True, prodonly=False, wcskey=None, **workinplace)
Create the full input list of filenames after verifying and converting files as needed.
5.478636
5.452275
1.004835
# Run parseinput though it's likely already been done in processFilenames outfiles = parseinput.parseinput(infiles)[0] # Disable parallel processing here for now until hardware I/O gets "wider". # Since this part is IO bound, parallelizing doesn't help more than a little # in most cases, and may actually slow this down on some desktop nodes. # cfgval_num_cores = None # get this from paramDict # pool_size = util.get_pool_size(cfgval_num_cores, len(outfiles)) pool_size = 1 # do the WCS updating if wcskey in ['', ' ', 'INDEF', None]: if updatewcs: log.info('Updating input WCS using "updatewcs"') else: log.info('Resetting input WCS to be based on WCS key = %s' % wcskey) if pool_size > 1: log.info('Executing %d parallel workers' % pool_size) subprocs = [] for fname in outfiles: p = multiprocessing.Process(target=_process_input_wcs_single, name='processInput._process_input_wcs()', # for err msgs args=(fname, wcskey, updatewcs) ) subprocs.append(p) mputil.launch_and_wait(subprocs, pool_size) # blocks till all done else: log.info('Executing serially') for fname in outfiles: _process_input_wcs_single(fname, wcskey, updatewcs) return outfiles
def _process_input_wcs(infiles, wcskey, updatewcs)
This is a subset of process_input(), for internal use only. This is the portion of input handling which sets/updates WCS data, and is a performance hit - a target for parallelization. Returns the expanded list of filenames.
6.042474
6.092726
0.991752
if wcskey in ['', ' ', 'INDEF', None]: if updatewcs: uw.updatewcs(fname, checkfiles=False) else: numext = fileutil.countExtn(fname) extlist = [] for extn in range(1, numext + 1): extlist.append(('SCI', extn)) if wcskey in string.ascii_uppercase: wkey = wcskey wname = ' ' else: wname = wcskey wkey = ' ' altwcs.restoreWCS(fname, extlist, wcskey=wkey, wcsname=wname) # make an asn table at the end # Make sure there is a WCSCORR table for each input image if wcskey not in ['', ' ', 'INDEF', None] or updatewcs: wcscorr.init_wcscorr(fname)
def _process_input_wcs_single(fname, wcskey, updatewcs)
See docs for _process_input_wcs. This is separated to be spawned in parallel.
5.007015
5.07453
0.986695
newfilelist, ivmlist, output, oldasndict, filelist = \ buildFileListOrig(input=input, output=output, ivmlist=ivmlist, wcskey=wcskey, updatewcs=updatewcs, **workinplace) return newfilelist, ivmlist, output, oldasndict
def buildFileList(input, output=None, ivmlist=None, wcskey=None, updatewcs=True, **workinplace)
Builds a file list which has undergone various instrument-specific checks for input to MultiDrizzle, including splitting STIS associations.
3.482675
3.720785
0.936005
# NOTE: original file name is required in order to correctly associate # user catalog files (e.g., user masks to be used with 'skymatch') with # corresponding imageObjects. filelist, output, ivmlist, oldasndict = processFilenames(input,output) # verify that all input images specified can be updated as needed filelist = util.verifyFilePermissions(filelist) if filelist is None or len(filelist) == 0: return None, None, None, None, None manageInputCopies(filelist,**workinplace) # to keep track of the original file names we do the following trick: # pack filelist with the ivmlist using zip and later unpack the zipped list. # # NOTE: this required a small modification of the checkStisFiles function # in stsci.tools.check_files to be able to handle ivmlists that are tuples. if ivmlist is None: ivmlist = len(filelist)*[None] else: assert(len(filelist) == len(ivmlist)) #TODO: remove after debugging ivmlist = list(zip(ivmlist,filelist)) # Check format of FITS files - convert Waiver/GEIS to MEF if necessary filelist, ivmlist = check_files.checkFITSFormat(filelist, ivmlist) # check for non-polynomial distortion correction if not updatewcs: # with updatewcs turned on, any problems will get resolved # so we do not need to be concerned about the state of the DGEOFILEs filelist = checkDGEOFile(filelist) # run all WCS updating updated_input = _process_input_wcs(filelist, wcskey, updatewcs) newfilelist, ivmlist = check_files.checkFiles(updated_input, ivmlist) if updatewcs: uw.updatewcs(','.join(set(newfilelist) - set(filelist))) if len(ivmlist) > 0: ivmlist, filelist = list(zip(*ivmlist)) else: filelist = [] # insure that both filelist and ivmlist are defined as empty lists return newfilelist, ivmlist, output, oldasndict, filelist
def buildFileListOrig(input, output=None, ivmlist=None, wcskey=None, updatewcs=True, **workinplace)
Builds a file list which has undergone various instrument-specific checks for input to MultiDrizzle, including splitting STIS associations. Compared to buildFileList, this version returns the list of the original file names as specified by the user (e.g., before GEIS->MEF, or WAIVER FITS->MEF conversion).
7.60367
7.127487
1.066809
# Recognize when multiple valid inputs with the same rootname are present # this would happen when both CTE-corrected (_flc) and non-CTE-corrected (_flt) # products are in the same directory as an ASN table filelist, duplicates = checkForDuplicateInputs(rootnames) if check_for_duplicates and duplicates: # Build new ASN tables for each set of input files origasn = changeSuffixinASN(asnname, 'flt') dupasn = changeSuffixinASN(asnname, 'flc') errstr = 'ERROR:\nMultiple valid input files found:\n' for fname, dname in zip(filelist, duplicates): errstr += ' %s %s\n' % (fname, dname) errstr += ('\nNew association files have been generated for each ' 'version of these files.\n %s\n %s\n\nPlease ' 're-start astrodrizzle using of these new ASN files or ' 'use widlcards for the input to only select one type of ' 'input file.' % (dupasn, origasn)) print(textutil.textbox(errstr), file=sys.stderr) # generate new ASN files for each case, # report this case of duplicate inputs to the user then quit raise ValueError return filelist
def buildASNList(rootnames, asnname, check_for_duplicates=True)
Return the list of filenames for a given set of rootnames
8.814432
8.706149
1.012438
# Start by creating a new name for the ASN table _new_asn = asnfile.replace('_asn.fits','_'+suffix+'_asn.fits') if os.path.exists(_new_asn): os.remove(_new_asn) # copy original ASN table to new table shutil.copy(asnfile,_new_asn) # Open up the new copy and convert all MEMNAME's to include suffix fasn = fits.open(_new_asn, mode='update', memmap=False) fasn[0].header['DUPCHECK'] = "COMPLETE" newdata = fasn[1].data.tolist() for i in range(len(newdata)): val = newdata[i][0].decode(encoding='UTF-8').strip() if 'prod' not in newdata[i][1].decode(encoding='UTF-8').lower(): val += '_'+suffix newdata[i] = (val,newdata[i][1].strip(),newdata[i][2]) # Redefine dtype to support longer strings for MEMNAME new_dtype = [] d = fasn[1].data.dtype msize = d.descr[0][1][1:] new_size = int(msize[1:])+8 mtype = msize[0] new_dtype.append((d.descr[0][0],d.descr[0][1].replace(msize,'{}{}'.format(mtype,new_size)))) new_dtype.append(d.descr[1]) new_dtype.append(d.descr[2]) # Assign newly created, reformatted array to extension newasn = np.array(newdata,dtype=new_dtype) fasn[1].data = newasn fasn.close() return _new_asn
def changeSuffixinASN(asnfile, suffix)
Create a copy of the original asn file and change the name of all members to include the suffix.
3.594502
3.541422
1.014988
flist = [] duplist = [] for fname in rootnames: # Look for any recognized CTE-corrected products f1 = fileutil.buildRootname(fname,ext=['_flc.fits']) f2 = fileutil.buildRootname(fname) flist.append(f2) if os.path.exists(f1) and f1 != f2: # More than 1 valid input found for this rootname duplist.append(f1) return flist,duplist
def checkForDuplicateInputs(rootnames)
Check input files specified in ASN table for duplicate versions with multiple valid suffixes (_flt and _flc, for example).
5.59954
5.529816
1.012609
if cr_bits_value > 0: for img in imageObjectList: for chip in range(1,img._numchips+1,1): sci_chip = img._image[img.scienceExt,chip] resetbits.reset_dq_bits(sci_chip.dqfile, cr_bits_value, extver=chip, extname=sci_chip.dq_extn)
def resetDQBits(imageObjectList, cr_bits_value=4096)
Reset the CR bit in each input image's DQ array
6.170682
6.416889
0.961631
omembers = oldasndict['members'].copy() nmembers = {} translated_names = [f.split('.fits')[0] for f in pydr_input] newkeys = [fileutil.buildNewRootname(file) for file in pydr_input] keys_map = list(zip(newkeys, pydr_input)) for okey, oval in list(omembers.items()): if okey in newkeys: nkey = pydr_input[newkeys.index(okey)] nmembers[nkey.split('.fits')[0]] = oval oldasndict.pop('members') # replace should be always True to cover the case when flt files were removed # and the case when names were translated oldasndict.update(members=nmembers, replace=True) oldasndict['order'] = translated_names return oldasndict
def update_member_names(oldasndict, pydr_input)
Update names in a member dictionary. Given an association dictionary with rootnames and a list of full file names, it will update the names in the member dictionary to contain '_*' extension. For example a rootname of 'u9600201m' will be replaced by 'u9600201m_c0h' making sure that a MEf file is passed as an input and not the corresponding GEIS file.
5.604814
5.460886
1.026356
# Find out what directory is being used for processing workingdir = os.getcwd() # Only create sub-directory for copies of inputs, if copies are requested # Create name of sub-directory for copies origdir = os.path.join(workingdir,'OrIg_files') if workinplace['overwrite'] or workinplace['preserve']: # if sub-directory does not exist yet, create it if not os.path.exists(origdir): os.mkdir(origdir) printMsg = True # check to see if copies already exist for each file for fname in filelist: copymade = False # If a copy is made, no need to restore copyname = os.path.join(origdir,fname) short_copyname = os.path.join('OrIg_files',fname) if workinplace['overwrite']: print('Forcibly archiving original of: ',fname, 'as ',short_copyname) # make a copy of the file in the sub-directory if os.path.exists(copyname): os.chmod(copyname, 438) # octal 666 shutil.copy(fname,copyname) os.chmod(copyname,292) # octal 444 makes files read-only if printMsg: print('\nTurning OFF "preserve" and "restore" actions...\n') printMsg = False # We only need to print this one time... copymade = True if (workinplace['preserve'] and not os.path.exists(copyname)) \ and not workinplace['overwrite']: # Preserving a copy of the input, but only if not already archived print('Preserving original of: ',fname, 'as ',short_copyname) # make a copy of the file in the sub-directory shutil.copy(fname,copyname) os.chmod(copyname,292) # octal 444 makes files read-only copymade = True if 'restore' in workinplace and not copymade: if (os.path.exists(copyname) and workinplace['restore']) and not workinplace['overwrite']: print('Restoring original input for ',fname,' from ',short_copyname) # replace current files with original version os.chmod(fname, 438) # octal 666 shutil.copy(copyname, fname) os.chmod(fname, 438)
def manageInputCopies(filelist, **workinplace)
Creates copies of all input images in a sub-directory. The copies are made prior to any processing being done to the images at all, including updating the WCS keywords. If there are already copies present, they will NOT be overwritten, but instead will be used to over-write the current working copies.
3.763831
3.676375
1.023788
msg = short_msg = for inputfile in filenames: try: dgeofile = fits.getval(inputfile, 'DGEOFILE', memmap=False) except KeyError: continue if dgeofile not in ["N/A", "n/a", ""]: message = msg.format(inputfile) try: npolfile = fits.getval(inputfile, 'NPOLFILE', memmap=False) except KeyError: ustop = userStop(message) while ustop is None: ustop = userStop(short_msg) if ustop: return None return filenames
def checkDGEOFile(filenames)
Verify that input file has been updated with NPOLFILE This function checks for the presence of 'NPOLFILE' kw in the primary header when 'DGEOFILE' kw is present and valid (i.e. 'DGEOFILE' is not blank or 'N/A'). It handles the case of science files downloaded from the archive before the new software was installed there. If 'DGEOFILE' is present and 'NPOLFILE' is missing, print a message and let the user choose whether to (q)uit and update the headers or (c)ontinue and run astrodrizzle without the non-polynomial correction. 'NPOLFILE' will be populated in the pipeline before astrodrizzle is run. In the case of WFPC2 the old style dgeo files are used to create detector to image correction at runtime. Parameters ---------- filenames : list of str file names of all images to be checked
5.45089
4.612837
1.181678
paramDict = { 'input':'*flt.fits', 'output':None, 'mdriztab':None, 'refimage':None, 'runfile':None, 'workinplace':False, 'updatewcs':True, 'proc_unit':'native', 'coeffs':True, 'context':False, 'clean':True, 'group':None, 'ra':None, 'dec':None, 'build':True, 'gain':None, 'gnkeyword':None, 'readnoise':None, 'rnkeyword':None, 'exptime':None, 'expkeyword':None, 'crbitval':4096, 'static':True, 'static_sig':4.0, 'skysub':True, 'skymethod':"globalmin+match", 'skystat':"median", 'skywidth':0.1, 'skylower':None, 'skyupper':None, 'skyclip':5, 'skylsigma':4.0, 'skyusigma':4.0, "skymask_cat":"", "use_static":True, "sky_bits":0, "skyuser":"", "skyfile":"", 'driz_separate':True, 'driz_sep_outnx':None, 'driz_sep_outny':None, 'driz_sep_crpix1':None, 'driz_sep_crpix2':None, 'driz_sep_kernel':'turbo', 'driz_sep_scale':None, 'driz_sep_pixfrac':1.0, 'driz_sep_rot':None, 'driz_sep_fillval':None, 'driz_sep_bits':0, 'median':True, 'median_newmasks':True, 'combine_type':"minmed", 'combine_nsigma':"4 3", 'combine_nlow':0, 'combine_nhigh':1, 'combine_lthresh':None, 'combine_hthresh':None, 'combine_grow':1, 'blot':True, 'blot_interp':'poly5', 'blot_sinscl':1.0, 'driz_cr':True, 'driz_cr_corr':False, 'driz_cr_snr':"3.5 3.0", 'driz_cr_scale':"1.2 0.7", 'driz_cr_cteg':0, 'driz_cr_grow':1, 'driz_combine':True, 'final_wht_type':"EXP", 'final_outnx':None, 'final_outny':None, 'final_crpix1':None, 'final_crpix2':None, 'final_kernel':'square', 'final_scale':None, 'final_pixfrac':1.0, 'final_rot':None, 'final_fillval':None, 'final_bits':0} paramDict.update(input_dict) print('\nUser Input Parameters for Init Step:') util.printParams(paramDict) return paramDict
def _setDefaults(input_dict={})
Define full set of default values for unit-testing this module.[OBSOLETE]
3.577529
3.56105
1.004628
# Read the temperature dependeant dark file. The name for the file is taken from # the TEMPFILE keyword in the primary header. tddobj = readTDD.fromcalfile(self.name) if tddobj is None: return np.ones(self.full_shape, dtype=self.image_dtype) * self.getdarkcurrent() else: # Create Dark Object from AMPGLOW and Lineark Dark components darkobj = tddobj.getampglow() + tddobj.getlindark() # Return the darkimage taking into account an subarray information available return darkobj[self.ltv2:self.size2,self.ltv1:self.size1]
def getdarkimg(self,chip)
Return an array representing the dark image for the detector. Returns ------- dark : array The dark array in the same shape as the image with **units of cps**.
14.658596
14.474668
1.012707
has_bunit = False if 'BUNIT' in self._image['sci',1].header : has_bunit = True countrate = False if (self._image[0].header['UNITCORR'].strip() == 'PERFORM') or \ (has_bunit and self._image['sci',1].header['bunit'].find('/') != -1) : countrate = True return countrate
def isCountRate(self)
isCountRate: Method or IRInputObject used to indicate if the science data is in units of counts or count rate. This method assumes that the keyword 'BUNIT' is in the header of the input FITS file.
6.070418
4.801787
1.2642
pri_header = self._image[0].header self.proc_unit = instrpars['proc_unit'] if self._isNotValid (instrpars['gain'], instrpars['gnkeyword']): instrpars['gnkeyword'] = 'ADCGAIN' #gain has been hardcoded below if self._isNotValid (instrpars['rdnoise'], instrpars['rnkeyword']): instrpars['rnkeyword'] = None if self._isNotValid (instrpars['exptime'], instrpars['expkeyword']): instrpars['expkeyword'] = 'EXPTIME' for chip in self.returnAllChips(extname=self.scienceExt): chip._gain= 5.4 #measured gain chip._rdnoise = self.getInstrParameter( instrpars['rdnoise'], pri_header, instrpars['rnkeyword'] ) chip._exptime = self.getInstrParameter( instrpars['exptime'], pri_header, instrpars['expkeyword'] ) if chip._gain is None or self._exptime is None: print('ERROR: invalid instrument task parameter') raise ValueError # We need to treat Read Noise as a special case since it is # not populated in the NICMOS primary header if chip._rdnoise is None: chip._rdnoise = self._getDefaultReadnoise() chip._darkrate=self._getDarkRate() chip.darkcurrent = self.getdarkcurrent() chip._effGain = chip._gain # this is used in the static mask, static mask name also defined # here, must be done after outputNames self._assignSignature(chip._chip) # Convert the science data to electrons if specified by the user. self.doUnitConversions()
def setInstrumentParameters(self, instrpars)
This method overrides the superclass to set default values into the parameter dictionary, in case empty entries are provided.
6.953135
6.915119
1.005497
# Merge all configobj instances into a single object configobj[section_name] = {} # Load the default full set of configuration parameters for the PSET: iparsobj_cfg = teal.load(task_name) # Identify optional parameters in input_dicts that are from this # PSET and add it to iparsobj: if input_dict is not None: for key in list(input_dict.keys()): if key in iparsobj_cfg: if iparsobj is not None and key in iparsobj: raise DuplicateKeyError("Duplicate parameter '{:s}' " "provided for task {:s}".format(key, task_name)) iparsobj_cfg[key] = input_dict[key] del input_dict[key] if iparsobj is not None: iparsobj_cfg.update(iparsobj) del iparsobj_cfg['_task_name_'] # merge these parameters into full set configobj[section_name].merge(iparsobj_cfg)
def _managePsets(configobj, section_name, task_name, iparsobj=None, input_dict=None)
Read in parameter values from PSET-like configobj tasks defined for source-finding algorithms, and any other PSET-like tasks under this task, and merge those values into the input configobj dictionary.
4.080118
4.102593
0.994522
teal.teal(imagefindpars.__taskname__, returnAs=None, autoClose=True, loadOnly=False, canExecute=False)
def edit_imagefindpars()
Allows the user to edit the imagefindpars configObj in a TEAL GUI
49.304905
40.100311
1.229539
teal.teal(refimagefindpars.__taskname__, returnAs=None, autoClose=True, loadOnly=False, canExecute=False)
def edit_refimagefindpars()
Allows the user to edit the refimagefindpars configObj in a TEAL GUI
43.990952
35.096867
1.253415
start_path = os.getcwd() try: log.info("Local repository path: {}".format(localRepoPath)) os.chdir(localRepoPath) log.info("\n== Remote URL") os.system('git remote -v') # log.info("\n== Remote Branches") # os.system("git branch -r") log.info("\n== Local Branches") os.system("git branch") log.info("\n== Most Recent Commit") os.system("git log |head -1") rv = 0 except: rv = 111 log.info("WARNING! get_git_rev_info.print_rev_id() encountered a problem and cannot continue.") finally: os.chdir(start_path) if rv != 0: sys.exit(rv)
def print_rev_id(localRepoPath)
prints information about the specified local repository to STDOUT. Expected method of execution: command-line or shell script call Parameters ---------- localRepoPath: string Local repository path. Returns ======= Nothing as such. subroutine will exit with a state of 0 if everything ran OK, and a value of '111' if something went wrong.
3.587277
3.36021
1.067575
start_path = os.getcwd() try: os.chdir(localRepoPath) instream = os.popen("git --no-pager log --max-count=1 | head -1") for streamline in instream.readlines(): streamline = streamline.strip() if streamline.startswith("commit "): rv = streamline.replace("commit ","") else: raise except: rv = "FAILURE: git revision info not found" finally: os.chdir(start_path) return(rv)
def get_rev_id(localRepoPath)
returns the current full git revision id of the specified local repository. Expected method of execution: python subroutine call Parameters ---------- localRepoPath: string Local repository path. Returns ======= full git revision ID of the specified repository if everything ran OK, and "FAILURE" if something went wrong.
3.472937
3.414477
1.017121
d2ifile = None for f in flist: fdet = fits.getval(f, 'detector', memmap=False) if fdet == detector: d2ifile = f return d2ifile
def find_d2ifile(flist,detector)
Search a list of files for one that matches the detector specified.
2.702171
2.708108
0.997808
npolfile = None for f in flist: fdet = fits.getval(f, 'detector', memmap=False) if fdet == detector: filt1 = fits.getval(f, 'filter1', memmap=False) filt2 = fits.getval(f, 'filter2', memmap=False) fdate = fits.getval(f, 'date', memmap=False) if filt1 == 'ANY' or \ (filt1 == filters[0] and filt2 == filters[1]): npolfile = f return npolfile
def find_npolfile(flist,detector,filters)
Search a list of files for one that matches the configuration of detector and filters used.
2.163857
2.260139
0.9574
if configobj is None: configobj =teal.teal(__taskname__,loadOnly=(not editpars)) update(configobj['input'],configobj['refdir'], local=configobj['local'],interactive=configobj['interactive'], wcsupdate=configobj['wcsupdate'])
def run(configobj=None,editpars=False)
Teal interface for running this code.
12.871816
14.027697
0.9176
# Interpret bits value bits = interpret_bit_flags(bits) flist, fcol = parseinput.parseinput(input) for filename in flist: # open input file in write mode to allow updating the DQ array in-place p = fits.open(filename, mode='update', memmap=False) # Identify the DQ array to be updated # If no extver is specified, build a list of all DQ arrays in the file if extver is None: extver = [] for hdu in p: # find only those extensions which match the input extname # using case-insensitive name comparisons for 'extname' if 'extver' in hdu.header and \ hdu.header['extname'].lower() == extname.lower(): extver.append(int(hdu.header['extver'])) else: # Otherwise, insure that input extver values are a list if not isinstance(extver, list): extver = [extver] # for each DQ array identified in the file... for extn in extver: dqarr = p[extname,extn].data dqdtype = dqarr.dtype # reset the desired bits p[extname,extn].data = (dqarr & ~bits).astype(dqdtype) # preserve original dtype log.info('Reset bit values of %s to a value of 0 in %s[%s,%s]' % (bits, filename, extname, extn)) # close the file with the updated DQ array(s) p.close()
def reset_dq_bits(input,bits,extver=None,extname='dq')
This function resets bits in the integer array(s) of a FITS file. Parameters ---------- filename : str full filename with path bits : str sum or list of integers corresponding to all the bits to be reset extver : int, optional List of version numbers of the DQ arrays to be corrected [Default Value: None, will do all] extname : str, optional EXTNAME of the DQ arrays in the FITS file [Default Value: 'dq'] Notes ----- The default value of None for the 'extver' parameter specifies that all extensions with EXTNAME matching 'dq' (as specified by the 'extname' parameter) will have their bits reset. Examples -------- 1. The following command will reset the 4096 bits in all the DQ arrays of the file input_file_flt.fits:: reset_dq_bits("input_file_flt.fits", 4096) 2. To reset the 2,32,64 and 4096 bits in the second DQ array, specified as 'dq,2', in the file input_file_flt.fits:: reset_dq_bits("input_file_flt.fits", "2,32,64,4096", extver=2)
4.387871
4.344067
1.010084
pixvalue = pars.get('pixvalue', np.nan) if pixvalue is None: pixvalue = np.nan # insure that None == np.nan newvalue = pars.get('newvalue', 0.0) ext = pars.get('ext',None) if ext in ['',' ','None',None]: ext = None files = parseinput.parseinput(input)[0] for f in files: fimg = fits.open(f, mode='update', memmap=False) if ext is None: # replace pixels in ALL extensions extn = [i for i in fimg] else: if type(ext) == type([]): extn = [fimg[e] for e in ext] else: extn = [fimg[ext]] for e in extn: if e.data is not None and e.is_image: # ignore empty Primary HDUs print("Converting {}[{},{}] value of {} to {}".format( f,e.name,e.ver,pixvalue,newvalue)) if np.isnan(pixvalue): e.data[np.isnan(e.data)] = newvalue else: e.data[np.where(e.data == pixvalue)] = newvalue fimg.close()
def replace(input, **pars)
Replace pixels in `input` that have a value of `pixvalue` with a value given by `newvalue`.
3.437825
3.225021
1.065985
data_kws = fits.getval(drzfile, 'd*data', ext=0, memmap=False) if len(data_kws) == 0: return None fnames = [] for kw in data_kws.cards: f = kw.value.split('[')[0] if f not in fnames: fnames.append(f) return fnames
def extract_input_filenames(drzfile)
Generate a list of filenames from a drizzled image's header
4.055209
4.05129
1.000967
orig_wcsname = None orig_key = None if orig_wcsname is None: for k,w in wnames.items(): if w[:4] == 'IDC_': orig_wcsname = w orig_key = k break if orig_wcsname is None: # No IDC_ wcsname found... revert to second to last if available if len(wnames) > 1: orig_key = wkeys[-2] orig_wcsname = wnames[orig_key] return orig_wcsname,orig_key
def determine_orig_wcsname(header, wnames, wkeys)
Determine the name of the original, unmodified WCS solution
2.973716
3.01557
0.986121
with open(input[1:]) as f: catlist = [] catdict = {} for line in f.readlines(): if line[0] == '#' or not line.strip(): continue lspl = line.split() if len(lspl) > 1: catdict[lspl[0]] = lspl[1:] catlist.append(lspl[1:]) else: catdict[lspl[0]] = None catlist.append(None) return catlist, catdict
def parse_atfile_cat(input)
Return the list of catalog filenames specified as part of the input @-file
2.288469
2.146485
1.066147
rval = make_val_float(ra) dval = make_val_float(dec) if rval is None: rval, dval = radec_hmstodd(ra, dec) return rval, dval
def parse_skypos(ra, dec)
Function to parse RA and Dec input values and turn them into decimal degrees Input formats could be: ["nn","nn","nn.nn"] "nn nn nn.nnn" "nn:nn:nn.nn" "nnH nnM nn.nnS" or "nnD nnM nn.nnS" nn.nnnnnnnn "nn.nnnnnnn"
5.871977
5.81256
1.010222
hmstrans = string.maketrans(string.ascii_letters, ' ' * len(string.ascii_letters)) if isinstance(ra, list): rastr = ':'.join(ra) elif isinstance(ra, float): rastr = None pos_ra = ra elif ra.find(':') < 0: # convert any non-numeric characters to spaces # (we already know the units) rastr = ra.translate(hmstrans).strip() rastr = rastr.replace(' ', ' ') # convert 'nn nn nn.nn' to final 'nn:nn:nn.nn' string rastr = rastr.replace(' ', ':') else: rastr = ra if isinstance(dec, list): decstr = ':'.join(dec) elif isinstance(dec, float): decstr = None pos_dec = dec elif dec.find(':') < 0: decstr = dec.translate(hmstrans).strip() decstr = decstr.replace(' ', ' ') decstr = decstr.replace(' ', ':') else: decstr = dec if rastr is None: pos = (pos_ra, pos_dec) else: pos_coord = coords.SkyCoord(rastr + ' ' + decstr, unit=(u.hourangle, u.deg)) pos = (pos_coord.ra.deg, pos_coord.dec.deg) return pos
def radec_hmstodd(ra, dec)
Function to convert HMS values into decimal degrees. This function relies on the astropy.coordinates package to perform the conversion to decimal degrees. Parameters ---------- ra : list or array List or array of input RA positions dec : list or array List or array of input Dec positions Returns ------- pos : arr Array of RA,Dec positions in decimal degrees Notes ----- This function supports any specification of RA and Dec as HMS or DMS; specifically, the formats:: ["nn","nn","nn.nn"] "nn nn nn.nnn" "nn:nn:nn.nn" "nnH nnM nn.nnS" or "nnD nnM nn.nnS" See Also -------- astropy.coordinates
2.379785
2.424126
0.981708
fname = fileutil.osfn(exclusions) if os.path.exists(fname): with open(fname) as f: flines = f.readlines() else: print('No valid exclusions file "', fname, '" could be found!') print('Skipping application of exclusions files to source catalogs.') return None # Parse out lines which can be interpreted as positions and distances exclusion_list = [] units = None for line in flines: if line[0] == '#' or 'global' in line[:6]: continue # Only interpret the part of the line prior to the comment # if a comment has been attached to the line if '#' in line: line = line.split('#')[0].rstrip() if units is None: units = 'pixels' if line[:3] in ['fk4', 'fk5', 'sky']: units = 'sky' if line[:5] in ['image', 'physi', 'pixel']: units = 'pixels' continue if 'circle(' in line: nline = line.replace('circle(', '') nline = nline.replace(')', '') nline = nline.replace('"', '') vals = nline.split(',') if ':' in vals[0]: posval = vals[0] + ' ' + vals[1] else: posval = (float(vals[0]), float(vals[1])) else: # Try to interpret unformatted line if ',' in line: split_tok = ',' else: split_tok = ' ' vals = line.split(split_tok) if len(vals) == 3: if ':' in vals[0]: posval = vals[0] + ' ' + vals[1] else: posval = (float(vals[0]), float(vals[1])) else: continue exclusion_list.append( {'pos': posval, 'distance': float(vals[2]), 'units': units} ) return exclusion_list
def parse_exclusions(exclusions)
Read in exclusion definitions from file named by 'exclusions' and return a list of positions and distances
3.402678
3.247277
1.047856
if isinstance(colname, list): cname = '' for c in colname: cname += str(c) + ',' cname = cname.rstrip(',') elif isinstance(colname, int) or colname.isdigit(): cname = str(colname) else: cname = colname if 'c' in cname[0]: cname = cname.replace('c', '') ctok = None cols = None if '-' in cname: ctok = '-' if ':' in cname: ctok = ':' if ctok is not None: cnums = cname.split(ctok) c = list(range(int(cnums[0]), int(cnums[1]) + 1)) cols = [str(i) for i in c] if cols is None: ctok = ',' if ',' in cname else ' ' cols = cname.split(ctok) return cols
def parse_colname(colname)
Common function to interpret input column names provided by the user. This function translates column specification provided by the user into a column number. Notes ----- This function will understand the following inputs:: '1,2,3' or 'c1,c2,c3' or ['c1','c2','c3'] '1-3' or 'c1-c3' '1:3' or 'c1:c3' '1 2 3' or 'c1 c2 c3' '1' or 'c1' 1 Parameters ---------- colname : Column name or names to be interpreted Returns ------- cols : list The return value will be a list of strings.
2.384884
2.236319
1.066433
if _is_str_none(infile) is None: return None if infile.endswith('.fits'): outarr = read_FITS_cols(infile, cols=cols) else: outarr = read_ASCII_cols(infile, cols=cols) return outarr
def readcols(infile, cols=None)
Function which reads specified columns from either FITS tables or ASCII files This function reads in the columns specified by the user into numpy arrays regardless of the format of the input table (ASCII or FITS table). Parameters ---------- infile : string Filename of the input file cols : string or list of strings Columns to be read into arrays Returns ------- outarr : array Numpy array or arrays of columns from the table
4.405979
4.230569
1.041462
extnum = 0 extfound = False for extn in ftab: if 'tfields' in extn.header: extfound = True break extnum += 1 if not extfound: print('ERROR: No catalog table found in ', infile) raise ValueError # Now, read columns from the table in this extension if no column names # were provided by user, simply read in all columns from table if _is_str_none(cols[0]) is None: cols = ftab[extnum].data.names # Define the output outarr = [ftab[extnum].data.field(c) for c in cols] return outarr
def read_FITS_cols(infile, cols=None): # noqa: N802 with fits.open(infile, memmap=False) as ftab
Read columns from FITS table
6.032477
5.959029
1.012326
flines = f.readlines() for l in flines: # interpret each line from catalog file if l[0].lstrip() == '#' or l.lstrip() == '': continue else: # convert first row of data into column definitions using indices coldict = {str(i + 1): i for i, _ in enumerate(l.split())} break numcols = len(cols) outarr = [[] for _ in range(numcols)] convert_radec = False # Now, map specified columns to columns in file and populate output arrays for l in flines: # interpret each line from catalog file l = l.strip() lspl = l.split() # skip blank lines, comment lines, or lines with # fewer columns than requested by user if not l or len(lspl) < numcols or l[0] == '#' or "INDEF" in l: continue # For each 'column' requested by user, pull data from row for c, i in zip(cols, list(range(numcols))): cnames = parse_colname(c) if len(cnames) > 1: # interpret multi-column specification as one value outval = '' for cn in cnames: cnum = coldict[cn] cval = lspl[cnum] outval += cval + ' ' outarr[i].append(outval) convert_radec = True else: # pull single value from row for this column cnum = coldict[cnames[0]] if isfloat(lspl[cnum]): cval = float(lspl[cnum]) else: cval = lspl[cnum] # Check for multi-column values given as "nn:nn:nn.s" if ':' in cval: cval = cval.replace(':', ' ') convert_radec = True outarr[i].append(cval) # convert multi-column RA/Dec specifications if convert_radec: outra = [] outdec = [] for ra, dec in zip(outarr[0], outarr[1]): radd, decdd = radec_hmstodd(ra, dec) outra.append(radd) outdec.append(decdd) outarr[0] = outra outarr[1] = outdec # convert all lists to numpy arrays for c in range(len(outarr)): outarr[c] = np.array(outarr[c]) return outarr
def read_ASCII_cols(infile, cols=[1, 2, 3]): # noqa: N802 # build dictionary representing format of each row # Format of dictionary: {'colname':col_number,...} # This provides the mapping between column name and column number coldict = {} with open(infile, 'r') as f
Interpret input ASCII file to return arrays for specified columns. Notes ----- The specification of the columns should be expected to have lists for each 'column', with all columns in each list combined into a single entry. For example:: cols = ['1,2,3','4,5,6',7] where '1,2,3' represent the X/RA values, '4,5,6' represent the Y/Dec values and 7 represents the flux value for a total of 3 requested columns of data to be returned. Returns ------- outarr : list of arrays The return value will be a list of numpy arrays, one for each 'column'.
3.285439
3.25209
1.010255
rows = '' nrows = 0 for img in image_list: row = img.get_shiftfile_row() if row is not None: rows += row nrows += 1 if nrows == 0: # If there are no fits to report, do not write out a file return # write out reference WCS now if os.path.exists(outwcs): os.remove(outwcs) p = fits.HDUList() p.append(fits.PrimaryHDU()) p.append(createWcsHDU(image_list[0].refWCS)) p.writeto(outwcs) # Write out shiftfile to go with reference WCS with open(filename, 'w') as f: f.write('# frame: output\n') f.write('# refimage: %s[wcs]\n' % outwcs) f.write('# form: delta\n') f.write('# units: pixels\n') f.write(rows) print('Writing out shiftfile :', filename)
def write_shiftfile(image_list, filename, outwcs='tweak_wcs.fits')
Write out a shiftfile for a given list of input Image class objects
3.599049
3.493934
1.030085
orientat = wcs.orientat else: # find orientat from CD or PC matrix if wcs.wcs.has_cd(): cd12 = wcs.wcs.cd[0][1] cd22 = wcs.wcs.cd[1][1] elif wcs.wcs.has_pc(): cd12 = wcs.wcs.cdelt[0] * wcs.wcs.pc[0][1] cd22 = wcs.wcs.cdelt[1] * wcs.wcs.pc[1][1] else: raise ValueError("Invalid WCS: WCS does not contain neither " "a CD nor a PC matrix.") orientat = np.rad2deg(np.arctan2(cd12, cd22)) header['ORIENTAT'] = (orientat, "position angle of " "image y axis (deg. e of n)") return fits.ImageHDU(None, header)
def createWcsHDU(wcs): # noqa: N802 header = wcs.to_header() header['EXTNAME'] = 'WCS' header['EXTVER'] = 1 # Now, update original image size information header['NPIX1'] = (wcs.pixel_shape[0], "Length of array axis 1") header['NPIX2'] = (wcs.pixel_shape[1], "Length of array axis 2") header['PIXVALUE'] = (0.0, "values of pixels in array") if hasattr(wcs, 'orientat')
Generate a WCS header object that can be used to populate a reference WCS HDU. For most applications, stwcs.wcsutil.HSTWCS.wcs2header() will work just as well.
2.61492
2.668705
0.979846
if ny is None: ny = nx if sigma_x is None: if fwhm is None: print('A value for either "fwhm" or "sigma_x" needs to be ' 'specified!') raise ValueError else: # Convert input FWHM into sigma sigma_x = fwhm / (2 * np.sqrt(2 * np.log(2))) if sigma_y is None: sigma_y = sigma_x xradius = nx // 2 yradius = ny // 2 # Create grids of distance from center in X and Y xarr = np.abs(np.arange(-xradius, xradius + 1)) yarr = np.abs(np.arange(-yradius, yradius + 1)) hnx = gauss(xarr, sigma_x) hny = gauss(yarr, sigma_y) hny = hny.reshape((ny, 1)) h = hnx * hny # Normalize gaussian kernel to a sum of 1 h = h / np.abs(h).sum() if zero_norm: h -= h.mean() return h
def gauss_array(nx, ny=None, fwhm=1.0, sigma_x=None, sigma_y=None, zero_norm=False)
Computes the 2D Gaussian with size nx*ny. Parameters ---------- nx : int ny : int [Default: None] Size of output array for the generated Gaussian. If ny == None, output will be an array nx X nx pixels. fwhm : float [Default: 1.0] Full-width, half-maximum of the Gaussian to be generated sigma_x : float [Default: None] sigma_y : float [Default: None] Sigma_x and sigma_y are the stddev of the Gaussian functions. zero_norm : bool [Default: False] The kernel will be normalized to a sum of 1 when True. Returns ------- gauss_arr : array A numpy array with the generated gaussian function
2.531483
2.62782
0.96334
return (np.exp(-np.power(x, 2) / (2 * np.power(sigma, 2))) / (sigma * np.sqrt(2 * np.pi)))
def gauss(x, sigma)
Compute 1-D value of gaussian at position x relative to center.
1.949486
2.060942
0.945919
# find level of noise in histogram istats = imagestats.ImageStats(img.astype(np.float32), nclip=1, fields='stddev,mode,mean,max,min') if istats.stddev == 0.0: istats = imagestats.ImageStats(img.astype(np.float32), fields='stddev,mode,mean,max,min') imgsum = img.sum() # clip out all values below mean+3*sigma from histogram imgc = img[:, :].copy() imgc[imgc < istats.mode + istats.stddev * sigma] = 0.0 # identify position of peak yp0, xp0 = np.where(imgc == imgc.max()) # Perform bounds checking on slice from img ymin = max(0, int(yp0[0]) - 3) ymax = min(img.shape[0], int(yp0[0]) + 4) xmin = max(0, int(xp0[0]) - 3) xmax = min(img.shape[1], int(xp0[0]) + 4) # take sum of at most a 7x7 pixel box around peak xp_slice = (slice(ymin, ymax), slice(xmin, xmax)) yp, xp = ndimage.measurements.center_of_mass(img[xp_slice]) if np.isnan(xp) or np.isnan(yp): xp = 0.0 yp = 0.0 flux = 0.0 zpqual = None else: xp += xp_slice[1].start yp += xp_slice[0].start # compute S/N criteria for this peak: flux/sqrt(mean of rest of array) flux = imgc[xp_slice].sum() delta_size = float(img.size - imgc[xp_slice].size) if delta_size == 0: delta_size = 1 delta_flux = float(imgsum - flux) if flux > imgc[xp_slice].max(): delta_flux = flux - imgc[xp_slice].max() else: delta_flux = flux zpqual = flux / np.sqrt(delta_flux / delta_size) if np.isnan(zpqual) or np.isinf(zpqual): zpqual = None if center is not None: xp -= center[0] yp -= center[1] flux = imgc[xp_slice].max() del imgc return xp, yp, flux, zpqual
def find_xy_peak(img, center=None, sigma=3.0)
Find the center of the peak of offsets
3.322655
3.339182
0.995051
from matplotlib import pyplot as plt xp = pars['xp'] yp = pars['yp'] searchrad = int(pars['searchrad'] + 0.5) plt.figure(num=pars['figure_id']) plt.clf() if pars['interactive']: plt.ion() else: plt.ioff() plt.imshow(pars['data'], vmin=0, vmax=pars['vmax'], interpolation='nearest') plt.viridis() plt.colorbar() plt.title(pars['title_str']) plt.plot(xp + searchrad, yp + searchrad, color='red', marker='+', markersize=24) plt.plot(searchrad, searchrad, color='yellow', marker='+', markersize=120) plt.text(searchrad, searchrad, "Offset=0,0", verticalalignment='bottom', color='yellow') plt.xlabel("Offset in X (pixels)") plt.ylabel("Offset in Y (pixels)") if pars['interactive']: plt.show() if pars['plotname']: suffix = pars['plotname'][-4:] output = pars['plotname'] if '.' not in suffix: output += '.png' format = 'png' else: if suffix[1:] in ['png', 'pdf', 'ps', 'eps', 'svg']: format = suffix[1:] plt.savefig(output, format=format)
def plot_zeropoint(pars)
Plot 2d histogram. Pars will be a dictionary containing: data, figure_id, vmax, title_str, xp,yp, searchrad
2.961052
2.657535
1.11421
print('Computing initial guess for X and Y shifts...') # run C function to create ZP matrix zpmat = cdriz.arrxyzero(imgxy.astype(np.float32), refxy.astype(np.float32), searchrad) xp, yp, flux, zpqual = find_xy_peak(zpmat, center=(searchrad, searchrad)) if zpqual is not None: print('Found initial X and Y shifts of ', xp, yp) print(' with significance of ', zpqual, 'and ', flux, ' matches') else: # try with a lower sigma to detect a peak in a sparse set of sources xp, yp, flux, zpqual = find_xy_peak( zpmat, center=(searchrad, searchrad), sigma=1.0 ) if zpqual: print('Found initial X and Y shifts of ', xp, yp) print(' with significance of ', zpqual, 'and ', flux, ' matches') else: print('!' * 80) print('!') print('! WARNING: No valid shift found within a search radius of ', searchrad, ' pixels.') print('!') print('!' * 80) if histplot: zpstd = flux // 5 if zpstd < 10: zpstd = 10 if zpqual is None: zpstd = 10 title_str = ("Histogram of offsets: Peak has %d matches at " "(%0.4g, %0.4g)" % (flux, xp, yp)) plot_pars = {'data': zpmat, 'figure_id': figure_id, 'vmax': zpstd, 'xp': xp, 'yp': yp, 'searchrad': searchrad, 'title_str': title_str, 'plotname': plotname, 'interactive': interactive} plot_zeropoint(plot_pars) return xp, yp, flux, zpqual
def build_xy_zeropoint(imgxy, refxy, searchrad=3.0, histplot=False, figure_id=1, plotname=None, interactive=True)
Create a matrix which contains the delta between each XY position and each UV position.
3.828872
3.844245
0.996001
# Build X and Y arrays dx = end[0] - start[0] if dx < 0: nstart = end end = start start = nstart dx = -dx stepx = dx / nstep # Perform linear fit to find exact line that connects start and end xarr = np.arange(start[0], end[0] + stepx / 2.0, stepx) yarr = np.interp(xarr, [start[0], end[0]], [start[1], end[1]]) # create grid of positions if mesh: xa, ya = np.meshgrid(xarr, yarr) xarr = xa.ravel() yarr = ya.ravel() return xarr, yarr
def build_pos_grid(start, end, nstep, mesh=False)
Return a grid of positions starting at X,Y given by 'start', and ending at X,Y given by 'end'. The grid will be completely filled in X and Y by every 'step' interval.
3.059148
3.104922
0.985257
dqfile = None # Look for additional file with DQ array, primarily for WFPC2 data indx = self._filename.find('.fits') if indx > 3: suffix = self._filename[indx-4:indx] dqfile = self._filename.replace(suffix[:3],'_c1') elif indx < 0 and len(self._filename) > 3 and \ self._filename[-4] == os.extsep and \ self._filename[-1].lower() == 'h': # assume we've got a GEIS file dqfile = self._filename[:-2]+'1'+self._filename[-1] hdulist = readgeis.readgeis(dqfile) prih = hdulist[0].header if 'FILETYPE' in prih: dq_suffix = prih['FILETYPE'].strip().upper() else: # assume extension name is 'SDQ' for WFPC2 GEIS files dq_suffix = 'SDQ' hdulist.close() return dqfile,dq_suffix else: raise ValueError("Input file {} does not appear to be neither " \ "a FITS file nor a GEIS file.".format(self._filename)) if os.path.exists(dqfile): dq_suffix = fits.getval(dqfile, "EXTNAME", ext=1, memmap=False) else: dq_suffix = "SCI" return dqfile, dq_suffix
def find_DQ_extension(self)
Return the suffix for the data quality extension and the name of the file which that DQ extension should be read from.
4.273942
4.317579
0.989893
pri_header = self._image[0].header self.proc_unit = instrpars['proc_unit'] instrpars['gnkeyword'] = 'ATODGAIN' # hard-code for WFPC2 data instrpars['rnkeyword'] = None if self._isNotValid (instrpars['exptime'], instrpars['expkeyword']): instrpars['expkeyword'] = 'EXPTIME' for chip in self.returnAllChips(extname=self.scienceExt): chip._headergain = self.getInstrParameter( instrpars['gain'], pri_header, instrpars['gnkeyword'] ) chip._exptime = self.getInstrParameter( instrpars['exptime'], pri_header, instrpars['expkeyword'] ) # We need to treat Read Noise as a special case since it is # not populated in the WFPC2 primary header if instrpars['rnkeyword'] is None: chip._rdnoise = None else: chip._rdnoise = self.getInstrParameter( instrpars['rdnoise'], pri_header, instrpars['rnkeyword'] ) if chip._headergain is None or chip._exptime is None: print('ERROR: invalid instrument task parameter') raise ValueError # We need to determine if the user has used the default readnoise/gain value # since if not, they will need to supply a gain/readnoise value as well usingDefaultGain = instrpars['gnkeyword'] == 'ATODGAIN' usingDefaultReadnoise = instrpars['rnkeyword'] in [None, 'None'] # If the user has specified either the readnoise or the gain, we need to make sure # that they have actually specified both values. In the default case, the readnoise # of the system depends on what the gain if usingDefaultReadnoise and usingDefaultGain: self._setchippars() elif usingDefaultReadnoise and not usingDefaultGain: raise ValueError("ERROR: You need to supply readnoise information\n when not using the default gain for WFPC2.") elif not usingDefaultReadnoise and usingDefaultGain: raise ValueError("ERROR: You need to supply gain information when\n not using the default readnoise for WFPC2.") else: # In this case, the user has specified both a gain and readnoise values. Just use them as is. for chip in self.returnAllChips(extname=self.scienceExt): chip._gain = chip._headergain print("Using user defined values for gain and readnoise") # Convert the science data to electrons self.doUnitConversions()
def setInstrumentParameters(self, instrpars)
This method overrides the superclass to set default values into the parameter dictionary, in case empty entries are provided.
4.542056
4.517882
1.005351
# Image information _handle = fileutil.openImage(self._filename, mode='readonly', memmap=False) # Now convert the SCI array(s) units for det in range(1,self._numchips+1): chip=self._image[self.scienceExt,det] conversionFactor = 1.0 # add D2IMFILE to outputNames for removal by 'clean()' method later if 'D2IMFILE' in _handle[0].header and _handle[0].header['D2IMFILE'] not in ["","N/A"]: chip.outputNames['d2imfile'] = _handle[0].header['D2IMFILE'] if chip._gain is not None: conversionFactor = chip._gain chip._effGain = chip._gain #1. chip._conversionFactor = conversionFactor #1. else: msg = "Invalid gain value for data, no conversion done" print(msg) raise ValueError(msg) # Close the files and clean-up _handle.close() self._effGain = conversionFactor
def doUnitConversions(self)
Apply unit conversions to all the chips, ignoring the group parameter. This insures that all the chips get the same conversions when this gets done, even if only 1 chip was specified to be processed.
8.771903
8.559925
1.024764
darkrate = 0.005 # electrons / s if self.proc_unit == 'native': darkrate = darkrate / self.getGain(exten) #count/s try: chip = self._image[0] darkcurrent = chip.header['DARKTIME'] * darkrate except: msg = "#############################################\n" msg += "# #\n" msg += "# Error: #\n" msg += "# Cannot find the value for 'DARKTIME' #\n" msg += "# in the image header. WFPC2 input #\n" msg += "# images are expected to have this header #\n" msg += "# keyword. #\n" msg += "# #\n" msg += "# Error occured in the WFPC2InputImage class#\n" msg += "# #\n" msg += "#############################################\n" raise ValueError(msg) return darkcurrent
def getdarkcurrent(self,exten)
Return the dark current for the WFPC2 detector. This value will be contained within an instrument specific keyword. The value in the image header will be converted to units of electrons. Returns ------- darkcurrent : float Dark current for the WFPC3 detector in **units of counts/electrons**.
4.441569
4.191189
1.05974
rn = self._image[exten]._rdnoise if self.proc_unit == 'native': rn = self._rdnoise / self.getGain(exten) return rn
def getReadNoise(self, exten)
Method for returning the readnoise of a detector (in counts). Returns ------- readnoise : float The readnoise of the detector in **units of counts/electrons**.
8.850035
11.301612
0.783077
sci_chip = self._image[self.scienceExt,chip] ### For WFPC2 Data, build mask files using: maskname = sci_chip.dqrootname+'_dqmask.fits' dqmask_name = buildmask.buildShadowMaskImage(sci_chip.dqfile,sci_chip.detnum,sci_chip.extnum,maskname,bitvalue=bits,binned=sci_chip.binned) sci_chip.dqmaskname = dqmask_name sci_chip.outputNames['dqmask'] = dqmask_name sci_chip.outputNames['tmpmask'] = 'wfpc2_inmask%d.fits'%(sci_chip.detnum) dqmask = fits.getdata(dqmask_name, ext=0, memmap=False) return dqmask
def buildMask(self, chip, bits=0, write=False)
Build masks as specified in the user parameters found in the configObj object.
6.419304
6.41363
1.000885
if not isinstance(catalog,Catalog): if mode == 'automatic': # if an array is provided as the source # Create a new catalog directly from the image catalog = ImageCatalog(wcs,catalog,src_find_filters,**kwargs) else: # a catalog file was provided as the catalog source catalog = UserCatalog(wcs,catalog,**kwargs) return catalog
def generateCatalog(wcs, mode='automatic', catalog=None, src_find_filters=None, **kwargs)
Function which determines what type of catalog object needs to be instantiated based on what type of source selection algorithm the user specified. Parameters ---------- wcs : obj WCS object generated by STWCS or PyWCS catalog : str or ndarray Filename of existing catalog or ndarray of image for generation of source catalog. kwargs : dict Parameters needed to interpret source catalog from input catalog with `findmode` being required. Returns ------- catalog : obj A Catalog-based class instance for keeping track of WCS and associated source catalog
6.490111
7.197229
0.901751