sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def gauss(x, sigma):
""" Compute 1-D value of gaussian at position x relative to center."""
return (np.exp(-np.power(x, 2) / (2 * np.power(sigma, 2))) /
(sigma * np.sqrt(2 * np.pi)))
|
Compute 1-D value of gaussian at position x relative to center.
|
entailment
|
def make_vector_plot(coordfile, columns=[1, 2, 3, 4], data=None,
figure_id=None, title=None, axes=None, every=1,
labelsize=8, ylimit=None, limit=None, xlower=None,
ylower=None, output=None, headl=4, headw=3,
xsh=0.0, ysh=0.0, fit=None, scale=1.0, vector=True,
textscale=5, append=False, linfit=False, rms=True,
plotname=None):
""" Convert a XYXYMATCH file into a vector plot or set of residuals plots.
This function provides a single interface for generating either a
vector plot of residuals or a set of 4 plots showing residuals.
The data being plotted can also be adjusted for a linear fit
on-the-fly.
Parameters
----------
coordfile : string
Name of file with matched sets of coordinates. This input file can
be a file compatible for use with IRAF's geomap.
columns : list [Default: [0,1,2,3]]
Column numbers for the X,Y positions from each image
data : list of arrays
If specified, this can be used to input matched data directly
title : string
Title to be used for the generated plot
axes : list
List of X and Y min/max values to customize the plot axes
every : int [Default: 1]
Slice value for the data to be plotted
limit : float
Radial offset limit for selecting which sources are included in
the plot
labelsize : int [Default: 8] or str
Font size to use for tick labels, either in font points or as a
string understood by tick_params().
ylimit : float
Limit to use for Y range of plots.
xlower : float
ylower : float
Limit in X and/or Y offset for selecting which sources are included
in the plot
output : string
Filename of output file for generated plot
headl : int [Default: 4]
Length of arrow head to be used in vector plot
headw : int [Default: 3]
Width of arrow head to be used in vector plot
xsh : float
ysh : float
Shift in X and Y from linear fit to be applied to source positions
from the first image
scale : float
Scale from linear fit to be applied to source positions from the
first image
fit : array
Array of linear coefficients for rotation (and scale?) in X and Y
from a linear fit to be applied to source positions from the
first image
vector : bool [Default: True]
Specifies whether or not to generate a vector plot. If False, task
will generate a set of 4 residuals plots instead
textscale : int [Default: 5]
Scale factor for text used for labelling the generated plot
append : bool [Default: False]
If True, will overplot new plot on any pre-existing plot
linfit : bool [Default: False]
If True, a linear fit to the residuals will be generated and
added to the generated residuals plots
rms : bool [Default: True]
Specifies whether or not to report the RMS of the residuals as a
label on the generated plot(s).
plotname : str [Default: None]
Write out plot to a file with this name if specified.
"""
from matplotlib import pyplot as plt
if data is None:
data = readcols(coordfile, cols=columns)
xy1x = data[0]
xy1y = data[1]
xy2x = data[2]
xy2y = data[3]
numpts = xy1x.shape[0]
if fit is not None:
xy1x, xy1y = apply_db_fit(data, fit, xsh=xsh, ysh=ysh)
dx = xy2x - xy1x
dy = xy2y - xy1y
else:
dx = xy2x - xy1x - xsh
dy = xy2y - xy1y - ysh
# apply scaling factor to deltas
dx *= scale
dy *= scale
print('Total # points: {:d}'.format(len(dx)))
if limit is not None:
indx = np.sqrt(dx**2 + dy**2) <= limit
dx = dx[indx].copy()
dy = dy[indx].copy()
xy1x = xy1x[indx].copy()
xy1y = xy1y[indx].copy()
if xlower is not None:
xindx = np.abs(dx) >= xlower
dx = dx[xindx].copy()
dy = dy[xindx].copy()
xy1x = xy1x[xindx].copy()
xy1y = xy1y[xindx].copy()
print('# of points after clipping: {:d}'.format(len(dx)))
dr = np.sqrt(dx**2 + dy**2)
max_vector = dr.max()
if output is not None:
write_xy_file(output, [xy1x, xy1y, dx, dy])
fig = plt.figure(num=figure_id)
if not append:
plt.clf()
if vector:
dxs = imagestats.ImageStats(dx.astype(np.float32))
dys = imagestats.ImageStats(dy.astype(np.float32))
minx = xy1x.min()
maxx = xy1x.max()
miny = xy1y.min()
maxy = xy1y.max()
plt_xrange = maxx - minx
plt_yrange = maxy - miny
qplot = plt.quiver(xy1x[::every], xy1y[::every], dx[::every],
dy[::every], units='y', headwidth=headw,
headlength=headl)
key_dx = 0.01 * plt_xrange
key_dy = 0.005 * plt_yrange * textscale
maxvec = max_vector / 2.
key_len = round(maxvec + 0.005, 2)
plt.xlabel('DX: %.4f to %.4f +/- %.4f' % (dxs.min, dxs.max,
dxs.stddev))
plt.ylabel('DY: %.4f to %.4f +/- %.4f' % (dys.min, dys.max,
dys.stddev))
plt.title(r"$Vector\ plot\ of\ %d/%d\ residuals:\ %s$" %
(xy1x.shape[0], numpts, title))
plt.quiverkey(qplot, minx + key_dx, miny - key_dy, key_len,
"%0.2f pixels" % (key_len),
coordinates='data', labelpos='E', labelcolor='Maroon',
color='Maroon')
else:
plot_defs = [[xy1x, dx, "X (pixels)", "DX (pixels)"],
[xy1y, dx, "Y (pixels)", "DX (pixels)"],
[xy1x, dy, "X (pixels)", "DY (pixels)"],
[xy1y, dy, "Y (pixels)", "DY (pixels)"]]
if axes is None:
# Compute a global set of axis limits for all plots
minx = min(xy1x.min(), xy1y.min())
maxx = max(xy1x.max(), xy1y.max())
miny = min(dx.min(), dy.min())
maxy = max(dx.max(), dy.max())
else:
minx = axes[0][0]
maxx = axes[0][1]
miny = axes[1][0]
maxy = axes[1][1]
if ylimit is not None:
miny = -ylimit
maxy = ylimit
rms_labelled = False
if title is None:
fig.suptitle("Residuals [%d/%d]" % (xy1x.shape[0], numpts),
ha='center', fontsize=labelsize + 6)
else:
# This definition of the title supports math symbols in the title
fig.suptitle(r"$" + title + "$", ha='center',
fontsize=labelsize + 6)
for pnum, p in enumerate(plot_defs):
pn = pnum + 1
ax = fig.add_subplot(2, 2, pn)
plt.plot(
p[0], p[1], 'b.',
label='RMS(X) = %.4f, RMS(Y) = %.4f' % (dx.std(), dy.std())
)
lx = [int((p[0].min() - 500) / 500) * 500,
int((p[0].max() + 500) / 500) * 500]
plt.plot(lx, [0.0, 0.0], 'k', linewidth=3)
plt.axis([minx, maxx, miny, maxy])
if rms and not rms_labelled:
leg_handles, leg_labels = ax.get_legend_handles_labels()
fig.legend(leg_handles, leg_labels, loc='center left',
fontsize='small', frameon=False,
bbox_to_anchor=(0.33, 0.51), borderaxespad=0)
rms_labelled = True
ax.tick_params(labelsize=labelsize)
# Fine-tune figure; hide x ticks for top plots and y ticks for
# right plots
if pn <= 2:
plt.setp(ax.get_xticklabels(), visible=False)
else:
ax.set_xlabel(plot_defs[pnum][2])
if pn % 2 == 0:
plt.setp(ax.get_yticklabels(), visible=False)
else:
ax.set_ylabel(plot_defs[pnum][3])
if linfit:
lxr = int((lx[-1] - lx[0]) / 100)
lyr = int((p[1].max() - p[1].min()) / 100)
a = np.vstack([p[0], np.ones(len(p[0]))]).T
m, c = np.linalg.lstsq(a, p[1])[0]
yr = [m * lx[0] + c, lx[-1] * m + c]
plt.plot([lx[0], lx[-1]], yr, 'r')
plt.text(
lx[0] + lxr, p[1].max() + lyr,
"%0.5g*x + %0.5g [%0.5g,%0.5g]" % (m, c, yr[0], yr[1]),
color='r'
)
plt.draw()
if plotname:
suffix = plotname[-4:]
if '.' not in suffix:
output += '.png'
format = 'png'
else:
if suffix[1:] in ['png', 'pdf', 'ps', 'eps', 'svg']:
format = suffix[1:]
plt.savefig(plotname, format=format)
|
Convert a XYXYMATCH file into a vector plot or set of residuals plots.
This function provides a single interface for generating either a
vector plot of residuals or a set of 4 plots showing residuals.
The data being plotted can also be adjusted for a linear fit
on-the-fly.
Parameters
----------
coordfile : string
Name of file with matched sets of coordinates. This input file can
be a file compatible for use with IRAF's geomap.
columns : list [Default: [0,1,2,3]]
Column numbers for the X,Y positions from each image
data : list of arrays
If specified, this can be used to input matched data directly
title : string
Title to be used for the generated plot
axes : list
List of X and Y min/max values to customize the plot axes
every : int [Default: 1]
Slice value for the data to be plotted
limit : float
Radial offset limit for selecting which sources are included in
the plot
labelsize : int [Default: 8] or str
Font size to use for tick labels, either in font points or as a
string understood by tick_params().
ylimit : float
Limit to use for Y range of plots.
xlower : float
ylower : float
Limit in X and/or Y offset for selecting which sources are included
in the plot
output : string
Filename of output file for generated plot
headl : int [Default: 4]
Length of arrow head to be used in vector plot
headw : int [Default: 3]
Width of arrow head to be used in vector plot
xsh : float
ysh : float
Shift in X and Y from linear fit to be applied to source positions
from the first image
scale : float
Scale from linear fit to be applied to source positions from the
first image
fit : array
Array of linear coefficients for rotation (and scale?) in X and Y
from a linear fit to be applied to source positions from the
first image
vector : bool [Default: True]
Specifies whether or not to generate a vector plot. If False, task
will generate a set of 4 residuals plots instead
textscale : int [Default: 5]
Scale factor for text used for labelling the generated plot
append : bool [Default: False]
If True, will overplot new plot on any pre-existing plot
linfit : bool [Default: False]
If True, a linear fit to the residuals will be generated and
added to the generated residuals plots
rms : bool [Default: True]
Specifies whether or not to report the RMS of the residuals as a
label on the generated plot(s).
plotname : str [Default: None]
Write out plot to a file with this name if specified.
|
entailment
|
def find_xy_peak(img, center=None, sigma=3.0):
""" Find the center of the peak of offsets """
# find level of noise in histogram
istats = imagestats.ImageStats(img.astype(np.float32), nclip=1,
fields='stddev,mode,mean,max,min')
if istats.stddev == 0.0:
istats = imagestats.ImageStats(img.astype(np.float32),
fields='stddev,mode,mean,max,min')
imgsum = img.sum()
# clip out all values below mean+3*sigma from histogram
imgc = img[:, :].copy()
imgc[imgc < istats.mode + istats.stddev * sigma] = 0.0
# identify position of peak
yp0, xp0 = np.where(imgc == imgc.max())
# Perform bounds checking on slice from img
ymin = max(0, int(yp0[0]) - 3)
ymax = min(img.shape[0], int(yp0[0]) + 4)
xmin = max(0, int(xp0[0]) - 3)
xmax = min(img.shape[1], int(xp0[0]) + 4)
# take sum of at most a 7x7 pixel box around peak
xp_slice = (slice(ymin, ymax),
slice(xmin, xmax))
yp, xp = ndimage.measurements.center_of_mass(img[xp_slice])
if np.isnan(xp) or np.isnan(yp):
xp = 0.0
yp = 0.0
flux = 0.0
zpqual = None
else:
xp += xp_slice[1].start
yp += xp_slice[0].start
# compute S/N criteria for this peak: flux/sqrt(mean of rest of array)
flux = imgc[xp_slice].sum()
delta_size = float(img.size - imgc[xp_slice].size)
if delta_size == 0:
delta_size = 1
delta_flux = float(imgsum - flux)
if flux > imgc[xp_slice].max():
delta_flux = flux - imgc[xp_slice].max()
else:
delta_flux = flux
zpqual = flux / np.sqrt(delta_flux / delta_size)
if np.isnan(zpqual) or np.isinf(zpqual):
zpqual = None
if center is not None:
xp -= center[0]
yp -= center[1]
flux = imgc[xp_slice].max()
del imgc
return xp, yp, flux, zpqual
|
Find the center of the peak of offsets
|
entailment
|
def plot_zeropoint(pars):
""" Plot 2d histogram.
Pars will be a dictionary containing:
data, figure_id, vmax, title_str, xp,yp, searchrad
"""
from matplotlib import pyplot as plt
xp = pars['xp']
yp = pars['yp']
searchrad = int(pars['searchrad'] + 0.5)
plt.figure(num=pars['figure_id'])
plt.clf()
if pars['interactive']:
plt.ion()
else:
plt.ioff()
plt.imshow(pars['data'], vmin=0, vmax=pars['vmax'],
interpolation='nearest')
plt.viridis()
plt.colorbar()
plt.title(pars['title_str'])
plt.plot(xp + searchrad, yp + searchrad, color='red', marker='+',
markersize=24)
plt.plot(searchrad, searchrad, color='yellow', marker='+', markersize=120)
plt.text(searchrad, searchrad, "Offset=0,0", verticalalignment='bottom',
color='yellow')
plt.xlabel("Offset in X (pixels)")
plt.ylabel("Offset in Y (pixels)")
if pars['interactive']:
plt.show()
if pars['plotname']:
suffix = pars['plotname'][-4:]
output = pars['plotname']
if '.' not in suffix:
output += '.png'
format = 'png'
else:
if suffix[1:] in ['png', 'pdf', 'ps', 'eps', 'svg']:
format = suffix[1:]
plt.savefig(output, format=format)
|
Plot 2d histogram.
Pars will be a dictionary containing:
data, figure_id, vmax, title_str, xp,yp, searchrad
|
entailment
|
def build_xy_zeropoint(imgxy, refxy, searchrad=3.0, histplot=False,
figure_id=1, plotname=None, interactive=True):
""" Create a matrix which contains the delta between each XY position and
each UV position.
"""
print('Computing initial guess for X and Y shifts...')
# run C function to create ZP matrix
zpmat = cdriz.arrxyzero(imgxy.astype(np.float32), refxy.astype(np.float32),
searchrad)
xp, yp, flux, zpqual = find_xy_peak(zpmat, center=(searchrad, searchrad))
if zpqual is not None:
print('Found initial X and Y shifts of ', xp, yp)
print(' with significance of ', zpqual, 'and ', flux, ' matches')
else:
# try with a lower sigma to detect a peak in a sparse set of sources
xp, yp, flux, zpqual = find_xy_peak(
zpmat, center=(searchrad, searchrad), sigma=1.0
)
if zpqual:
print('Found initial X and Y shifts of ', xp, yp)
print(' with significance of ', zpqual, 'and ',
flux, ' matches')
else:
print('!' * 80)
print('!')
print('! WARNING: No valid shift found within a search radius of ',
searchrad, ' pixels.')
print('!')
print('!' * 80)
if histplot:
zpstd = flux // 5
if zpstd < 10:
zpstd = 10
if zpqual is None:
zpstd = 10
title_str = ("Histogram of offsets: Peak has %d matches at "
"(%0.4g, %0.4g)" % (flux, xp, yp))
plot_pars = {'data': zpmat, 'figure_id': figure_id, 'vmax': zpstd,
'xp': xp, 'yp': yp, 'searchrad': searchrad,
'title_str': title_str, 'plotname': plotname,
'interactive': interactive}
plot_zeropoint(plot_pars)
return xp, yp, flux, zpqual
|
Create a matrix which contains the delta between each XY position and
each UV position.
|
entailment
|
def build_pos_grid(start, end, nstep, mesh=False):
"""
Return a grid of positions starting at X,Y given by 'start', and ending
at X,Y given by 'end'. The grid will be completely filled in X and Y by
every 'step' interval.
"""
# Build X and Y arrays
dx = end[0] - start[0]
if dx < 0:
nstart = end
end = start
start = nstart
dx = -dx
stepx = dx / nstep
# Perform linear fit to find exact line that connects start and end
xarr = np.arange(start[0], end[0] + stepx / 2.0, stepx)
yarr = np.interp(xarr, [start[0], end[0]], [start[1], end[1]])
# create grid of positions
if mesh:
xa, ya = np.meshgrid(xarr, yarr)
xarr = xa.ravel()
yarr = ya.ravel()
return xarr, yarr
|
Return a grid of positions starting at X,Y given by 'start', and ending
at X,Y given by 'end'. The grid will be completely filled in X and Y by
every 'step' interval.
|
entailment
|
def find_DQ_extension(self):
""" Return the suffix for the data quality extension and the name of
the file which that DQ extension should be read from.
"""
dqfile = None
# Look for additional file with DQ array, primarily for WFPC2 data
indx = self._filename.find('.fits')
if indx > 3:
suffix = self._filename[indx-4:indx]
dqfile = self._filename.replace(suffix[:3],'_c1')
elif indx < 0 and len(self._filename) > 3 and \
self._filename[-4] == os.extsep and \
self._filename[-1].lower() == 'h':
# assume we've got a GEIS file
dqfile = self._filename[:-2]+'1'+self._filename[-1]
hdulist = readgeis.readgeis(dqfile)
prih = hdulist[0].header
if 'FILETYPE' in prih:
dq_suffix = prih['FILETYPE'].strip().upper()
else:
# assume extension name is 'SDQ' for WFPC2 GEIS files
dq_suffix = 'SDQ'
hdulist.close()
return dqfile,dq_suffix
else:
raise ValueError("Input file {} does not appear to be neither " \
"a FITS file nor a GEIS file.".format(self._filename))
if os.path.exists(dqfile):
dq_suffix = fits.getval(dqfile, "EXTNAME", ext=1, memmap=False)
else:
dq_suffix = "SCI"
return dqfile, dq_suffix
|
Return the suffix for the data quality extension and the name of
the file which that DQ extension should be read from.
|
entailment
|
def setInstrumentParameters(self, instrpars):
""" This method overrides the superclass to set default values into
the parameter dictionary, in case empty entries are provided.
"""
pri_header = self._image[0].header
self.proc_unit = instrpars['proc_unit']
instrpars['gnkeyword'] = 'ATODGAIN' # hard-code for WFPC2 data
instrpars['rnkeyword'] = None
if self._isNotValid (instrpars['exptime'], instrpars['expkeyword']):
instrpars['expkeyword'] = 'EXPTIME'
for chip in self.returnAllChips(extname=self.scienceExt):
chip._headergain = self.getInstrParameter(
instrpars['gain'], pri_header, instrpars['gnkeyword']
)
chip._exptime = self.getInstrParameter(
instrpars['exptime'], pri_header, instrpars['expkeyword']
)
# We need to treat Read Noise as a special case since it is
# not populated in the WFPC2 primary header
if instrpars['rnkeyword'] is None:
chip._rdnoise = None
else:
chip._rdnoise = self.getInstrParameter(
instrpars['rdnoise'], pri_header, instrpars['rnkeyword']
)
if chip._headergain is None or chip._exptime is None:
print('ERROR: invalid instrument task parameter')
raise ValueError
# We need to determine if the user has used the default readnoise/gain value
# since if not, they will need to supply a gain/readnoise value as well
usingDefaultGain = instrpars['gnkeyword'] == 'ATODGAIN'
usingDefaultReadnoise = instrpars['rnkeyword'] in [None, 'None']
# If the user has specified either the readnoise or the gain, we need to make sure
# that they have actually specified both values. In the default case, the readnoise
# of the system depends on what the gain
if usingDefaultReadnoise and usingDefaultGain:
self._setchippars()
elif usingDefaultReadnoise and not usingDefaultGain:
raise ValueError("ERROR: You need to supply readnoise information\n when not using the default gain for WFPC2.")
elif not usingDefaultReadnoise and usingDefaultGain:
raise ValueError("ERROR: You need to supply gain information when\n not using the default readnoise for WFPC2.")
else:
# In this case, the user has specified both a gain and readnoise values. Just use them as is.
for chip in self.returnAllChips(extname=self.scienceExt):
chip._gain = chip._headergain
print("Using user defined values for gain and readnoise")
# Convert the science data to electrons
self.doUnitConversions()
|
This method overrides the superclass to set default values into
the parameter dictionary, in case empty entries are provided.
|
entailment
|
def doUnitConversions(self):
""" Apply unit conversions to all the chips, ignoring the group parameter.
This insures that all the chips get the same conversions when this
gets done, even if only 1 chip was specified to be processed.
"""
# Image information
_handle = fileutil.openImage(self._filename, mode='readonly', memmap=False)
# Now convert the SCI array(s) units
for det in range(1,self._numchips+1):
chip=self._image[self.scienceExt,det]
conversionFactor = 1.0
# add D2IMFILE to outputNames for removal by 'clean()' method later
if 'D2IMFILE' in _handle[0].header and _handle[0].header['D2IMFILE'] not in ["","N/A"]:
chip.outputNames['d2imfile'] = _handle[0].header['D2IMFILE']
if chip._gain is not None:
"""
# Multiply the values of the sci extension pixels by the gain.
print "Converting %s[%d] from COUNTS to ELECTRONS"%(self._filename,det)
# If the exptime is 0 the science image will be zeroed out.
np.multiply(_handle[self.scienceExt,det].data,chip._gain,_handle[self.scienceExt,det].data)
chip.data=_handle[self.scienceExt,det].data
# Set the BUNIT keyword to 'electrons'
chip._bunit = 'ELECTRONS'
chip.header.update('BUNIT','ELECTRONS')
_handle[self.scienceExt,det].header.update('BUNIT','ELECTRONS')
# Update the PHOTFLAM value
photflam = _handle[self.scienceExt,det].header['PHOTFLAM']
_handle[self.scienceExt,det].header.update('PHOTFLAM',(photflam/chip._gain))
"""
conversionFactor = chip._gain
chip._effGain = chip._gain #1.
chip._conversionFactor = conversionFactor #1.
else:
msg = "Invalid gain value for data, no conversion done"
print(msg)
raise ValueError(msg)
# Close the files and clean-up
_handle.close()
self._effGain = conversionFactor
|
Apply unit conversions to all the chips, ignoring the group parameter.
This insures that all the chips get the same conversions when this
gets done, even if only 1 chip was specified to be processed.
|
entailment
|
def getdarkcurrent(self,exten):
"""
Return the dark current for the WFPC2 detector. This value
will be contained within an instrument specific keyword.
The value in the image header will be converted to units
of electrons.
Returns
-------
darkcurrent : float
Dark current for the WFPC3 detector in **units of counts/electrons**.
"""
darkrate = 0.005 # electrons / s
if self.proc_unit == 'native':
darkrate = darkrate / self.getGain(exten) #count/s
try:
chip = self._image[0]
darkcurrent = chip.header['DARKTIME'] * darkrate
except:
msg = "#############################################\n"
msg += "# #\n"
msg += "# Error: #\n"
msg += "# Cannot find the value for 'DARKTIME' #\n"
msg += "# in the image header. WFPC2 input #\n"
msg += "# images are expected to have this header #\n"
msg += "# keyword. #\n"
msg += "# #\n"
msg += "# Error occured in the WFPC2InputImage class#\n"
msg += "# #\n"
msg += "#############################################\n"
raise ValueError(msg)
return darkcurrent
|
Return the dark current for the WFPC2 detector. This value
will be contained within an instrument specific keyword.
The value in the image header will be converted to units
of electrons.
Returns
-------
darkcurrent : float
Dark current for the WFPC3 detector in **units of counts/electrons**.
|
entailment
|
def getReadNoise(self, exten):
"""
Method for returning the readnoise of a detector (in counts).
Returns
-------
readnoise : float
The readnoise of the detector in **units of counts/electrons**.
"""
rn = self._image[exten]._rdnoise
if self.proc_unit == 'native':
rn = self._rdnoise / self.getGain(exten)
return rn
|
Method for returning the readnoise of a detector (in counts).
Returns
-------
readnoise : float
The readnoise of the detector in **units of counts/electrons**.
|
entailment
|
def buildMask(self, chip, bits=0, write=False):
""" Build masks as specified in the user parameters found in the
configObj object.
"""
sci_chip = self._image[self.scienceExt,chip]
### For WFPC2 Data, build mask files using:
maskname = sci_chip.dqrootname+'_dqmask.fits'
dqmask_name = buildmask.buildShadowMaskImage(sci_chip.dqfile,sci_chip.detnum,sci_chip.extnum,maskname,bitvalue=bits,binned=sci_chip.binned)
sci_chip.dqmaskname = dqmask_name
sci_chip.outputNames['dqmask'] = dqmask_name
sci_chip.outputNames['tmpmask'] = 'wfpc2_inmask%d.fits'%(sci_chip.detnum)
dqmask = fits.getdata(dqmask_name, ext=0, memmap=False)
return dqmask
|
Build masks as specified in the user parameters found in the
configObj object.
|
entailment
|
def generateCatalog(wcs, mode='automatic', catalog=None,
src_find_filters=None, **kwargs):
""" Function which determines what type of catalog object needs to be
instantiated based on what type of source selection algorithm the user
specified.
Parameters
----------
wcs : obj
WCS object generated by STWCS or PyWCS
catalog : str or ndarray
Filename of existing catalog or ndarray of image for generation of
source catalog.
kwargs : dict
Parameters needed to interpret source catalog from input catalog
with `findmode` being required.
Returns
-------
catalog : obj
A Catalog-based class instance for keeping track of WCS and
associated source catalog
"""
if not isinstance(catalog,Catalog):
if mode == 'automatic': # if an array is provided as the source
# Create a new catalog directly from the image
catalog = ImageCatalog(wcs,catalog,src_find_filters,**kwargs)
else: # a catalog file was provided as the catalog source
catalog = UserCatalog(wcs,catalog,**kwargs)
return catalog
|
Function which determines what type of catalog object needs to be
instantiated based on what type of source selection algorithm the user
specified.
Parameters
----------
wcs : obj
WCS object generated by STWCS or PyWCS
catalog : str or ndarray
Filename of existing catalog or ndarray of image for generation of
source catalog.
kwargs : dict
Parameters needed to interpret source catalog from input catalog
with `findmode` being required.
Returns
-------
catalog : obj
A Catalog-based class instance for keeping track of WCS and
associated source catalog
|
entailment
|
def generateRaDec(self):
""" Convert XY positions into sky coordinates using STWCS methods. """
self.prefix = self.PAR_PREFIX
if not isinstance(self.wcs,pywcs.WCS):
print(
textutil.textbox(
'WCS not a valid PyWCS object. '
'Conversion of RA/Dec not possible...'
),
file=sys.stderr
)
raise ValueError
if self.xypos is None or len(self.xypos[0]) == 0:
self.xypos = None
warnstr = textutil.textbox(
'WARNING: \n'
'No objects found for this image...'
)
for line in warnstr.split('\n'):
log.warning(line)
print(warnstr)
return
if self.radec is None:
print(' Found {:d} objects.'.format(len(self.xypos[0])))
if self.wcs is not None:
ra, dec = self.wcs.all_pix2world(self.xypos[0], self.xypos[1], self.origin)
self.radec = [ra, dec] + copy.deepcopy(self.xypos[2:])
else:
# If we have no WCS, simply pass along the XY input positions
# under the assumption they were already sky positions.
self.radec = copy.deepcopy(self.xypos)
|
Convert XY positions into sky coordinates using STWCS methods.
|
entailment
|
def apply_exclusions(self,exclusions):
""" Trim sky catalog to remove any sources within regions specified by
exclusions file.
"""
# parse exclusion file into list of positions and distances
exclusion_coords = tweakutils.parse_exclusions(exclusions)
if exclusion_coords is None:
return
excluded_list = []
radec_indx = list(range(len(self.radec[0])))
for ra,dec,indx in zip(self.radec[0],self.radec[1],radec_indx):
src_pos = coords.SkyCoord(ra=ra,dec=dec,unit=(u.hourangle,u.deg))
# check to see whether this source is within an exclusion region
for reg in exclusion_coords:
if reg['units'] == 'sky':
regpos = reg['pos']
regdist = reg['distance'] # units: arcsec
else:
regradec = self.wcs.all_pix2world([reg['pos']],1)[0]
regpos = (regradec[0],regradec[1])
regdist = reg['distance']*self.wcs.pscale # units: arcsec
epos = coords.SkyCoord(ra=regpos[0],dec=regpos[1],unit=(u.hourangle,u.deg))
if float(epos.separation(src_pos).to_string(unit=u.arcsec,decimal=True)) <= regdist:
excluded_list.append(indx)
break
# create a list of all 'good' sources outside all exclusion regions
for e in excluded_list: radec_indx.remove(e)
radec_indx = np.array(radec_indx,dtype=int)
num_excluded = len(excluded_list)
if num_excluded > 0:
radec_trimmed = []
xypos_trimmed = []
for arr in self.radec:
radec_trimmed.append(arr[radec_indx])
for arr in self.xypos:
xypos_trimmed.append(arr[radec_indx])
xypos_trimmed[-1] = np.arange(len(xypos_trimmed[0]))
self.radec = radec_trimmed
self.xypos = xypos_trimmed
log.info('Excluded %d sources from catalog.'%num_excluded)
|
Trim sky catalog to remove any sources within regions specified by
exclusions file.
|
entailment
|
def apply_flux_limits(self):
""" Apply any user-specified limits on source selection
Limits based on fluxes.
"""
if not self._apply_flux_limits:
return
# only if limits are set should they be applied
if ((self.maxflux is None and self.minflux is None) or
self.fluxunits is None):
return
print("\n Applying flux limits...")
print(" minflux = {}".format(self.minflux))
print(" maxflux = {}".format(self.maxflux))
print(" fluxunits = '{:s}'".format(self.fluxunits))
print(" nbright = {}".format(self.nbright))
# start by checking to see whether fluxes were read in to use for
# applying the limits
if not self.flux_col:
print(" WARNING: Catalog did not contain fluxes for use in trimming...")
return
if self.xypos is not None and self.radec is not None:
if len(self.xypos) < len(self.radec):
src_cat = self.radec
else:
src_cat = self.xypos
else:
src_cat = self.radec if self.xypos is None else self.xypos
if src_cat is None:
raise RuntimeError("No catalogs available for filtering")
if len(src_cat) < 3:
print(" WARNING: No fluxes read in for catalog for use in trimming...")
return
fluxes = copy.deepcopy(src_cat[2])
# apply limits equally to all .radec and .xypos entries
# Start by clipping by any specified flux range
if self.fluxunits == 'mag':
if self.minflux is None:
flux_mask = fluxes >= self.maxflux
elif self.maxflux is None:
flux_mask = fluxes <= self.minflux
else:
flux_mask = (fluxes <= self.minflux) & (fluxes >= self.maxflux)
else:
if self.minflux is None:
flux_mask = fluxes <= self.maxflux
elif self.maxflux is None:
flux_mask = fluxes >= self.minflux
else:
flux_mask = (fluxes >= self.minflux) & (fluxes <= self.maxflux)
if self.radec is None:
all_radec = None
else:
all_radec = [rd[flux_mask].copy() for rd in self.radec]
if self.xypos is None:
all_xypos = None
else:
all_xypos = [xy[flux_mask].copy() for xy in self.xypos]
nrem = flux_mask.size - np.count_nonzero(flux_mask)
print(" Removed {:d} sources based on flux limits.".format(nrem))
if self.nbright is not None:
print("Selecting catalog based on {} brightest sources".format(self.nbright))
fluxes = fluxes[flux_mask]
# find indices of brightest sources
idx = np.argsort(fluxes)
if self.fluxunits == 'mag':
idx = idx[:self.nbright]
else:
idx = (idx[::-1])[:self.nbright]
# pick out only the brightest 'nbright' sources
if all_radec is not None:
all_radec = [rd[idx] for rd in all_radec]
if all_xypos is not None:
all_xypos = [xy[idx] for xy in all_xypos]
self.radec = all_radec
self.xypos = all_xypos
if len(self.radec[0]) == 0:
print("Trimming of catalog resulted in NO valid sources! ")
raise ValueError
|
Apply any user-specified limits on source selection
Limits based on fluxes.
|
entailment
|
def buildCatalogs(self, exclusions=None, **kwargs):
""" Primary interface to build catalogs based on user inputs.
"""
self.generateXY(**kwargs)
self.generateRaDec()
if exclusions:
self.apply_exclusions(exclusions)
# apply selection limits as specified by the user:
self.apply_flux_limits()
|
Primary interface to build catalogs based on user inputs.
|
entailment
|
def plotXYCatalog(self, **kwargs):
"""
Method which displays the original image and overlays the positions
of the detected sources from this image's catalog.
Plotting `kwargs` that can be provided are:
vmin, vmax, cmap, marker
Default colormap is `summer`.
"""
try:
from matplotlib import pyplot as pl
except:
pl = None
if pl is not None: # If the pyplot package could be loaded...
pl.clf()
pars = kwargs.copy()
if 'marker' not in pars:
pars['marker'] = 'b+'
if 'cmap' in pars:
pl_cmap = pars['cmap']
del pars['cmap']
else:
pl_cmap = 'summer'
pl_vmin = None
pl_vmax = None
if 'vmin' in pars:
pl_vmin = pars['vmin']
del pars['vmin']
if 'vmax' in pars:
pl_vmax = pars['vmax']
del pars['vmax']
pl.imshow(self.source,cmap=pl_cmap,vmin=pl_vmin,vmax=pl_vmax)
pl.plot(self.xypos[0]-1,self.xypos[1]-1,pars['marker'])
|
Method which displays the original image and overlays the positions
of the detected sources from this image's catalog.
Plotting `kwargs` that can be provided are:
vmin, vmax, cmap, marker
Default colormap is `summer`.
|
entailment
|
def writeXYCatalog(self,filename):
""" Write out the X,Y catalog to a file
"""
if self.xypos is None:
warnstr = textutil.textbox(
'WARNING: \n No X,Y source catalog to write to file. ')
for line in warnstr.split('\n'):
log.warning(line)
print(warnstr)
return
f = open(filename,'w')
f.write("# Source catalog derived for %s\n"%self.wcs.filename)
f.write("# Columns: \n")
if self.use_sharp_round:
f.write('# X Y Flux ID Sharp Round1 Round2\n')
else:
f.write('# X Y Flux ID\n')
f.write('# (%s) (%s)\n'%(self.in_units,self.in_units))
for row in range(len(self.xypos[0])):
for i in range(len(self.xypos)):
f.write("%g "%(self.xypos[i][row]))
f.write("\n")
f.close()
|
Write out the X,Y catalog to a file
|
entailment
|
def generateXY(self, **kwargs):
""" Generate source catalog from input image using DAOFIND-style algorithm
"""
#x,y,flux,sharp,round = idlphot.find(array,self.pars['hmin'],self.pars['fwhm'],
# roundlim=self.pars['roundlim'], sharplim=self.pars['sharplim'])
print(" # Source finding for '{}', EXT={} started at: {}"
.format(self.fnamenoext, self.wcs.extname, util._ptime()[0]))
if self.pars['computesig']:
# compute sigma for this image
sigma = self._compute_sigma()
else:
sigma = self.pars['skysigma']
skymode = sigma**2
log.info(' Finding sources using sky sigma = %f'%sigma)
if self.pars['threshold'] in [None,"INDEF",""," "]:
hmin = skymode
else:
hmin = sigma*self.pars['threshold']
if 'mask' in kwargs and kwargs['mask'] is not None:
dqmask = np.asarray(kwargs['mask'], dtype=bool)
else:
dqmask = None
# get the mask for source finding:
mask = self._combine_exclude_mask(dqmask)
x, y, flux, src_id, sharp, round1, round2 = tweakutils.ndfind(
self.source,
hmin,
self.pars['conv_width'],
skymode,
sharplim=[self.pars['sharplo'],self.pars['sharphi']],
roundlim=[self.pars['roundlo'],self.pars['roundhi']],
peakmin=self.pars['peakmin'],
peakmax=self.pars['peakmax'],
fluxmin=self.pars['fluxmin'],
fluxmax=self.pars['fluxmax'],
nsigma=self.pars['nsigma'],
ratio=self.pars['ratio'],
theta=self.pars['theta'],
mask=mask,
use_sharp_round=self.use_sharp_round,
nbright=self.nbright
)
if len(x) == 0:
if not self.pars['computesig']:
sigma = self._compute_sigma()
hmin = sigma * self.pars['threshold']
log.info('No sources found with original thresholds. Trying automatic settings.')
x, y, flux, src_id, sharp, round1, round2 = tweakutils.ndfind(
self.source,
hmin,
self.pars['conv_width'],
skymode,
sharplim=[self.pars['sharplo'],self.pars['sharphi']],
roundlim=[self.pars['roundlo'],self.pars['roundhi']],
peakmin=self.pars['peakmin'],
peakmax=self.pars['peakmax'],
fluxmin=self.pars['fluxmin'],
fluxmax=self.pars['fluxmax'],
nsigma=self.pars['nsigma'],
ratio=self.pars['ratio'],
theta=self.pars['theta'],
mask = mask,
use_sharp_round = self.use_sharp_round,
nbright=self.nbright
)
if len(x) == 0:
xypostypes = 3*[float]+[int]+(3 if self.use_sharp_round else 0)*[float]
self.xypos = [np.empty(0, dtype=i) for i in xypostypes]
warnstr = textutil.textbox('WARNING: \n'+
'No valid sources found with the current parameter values!')
for line in warnstr.split('\n'):
log.warning(line)
print(warnstr)
else:
# convert the positions from numpy 0-based to FITS 1-based
if self.use_sharp_round:
self.xypos = [x+1, y+1, flux, src_id+self.start_id, sharp, round1, round2]
else:
self.xypos = [x+1, y+1, flux, src_id+self.start_id]
log.info('###Source finding finished at: %s'%(util._ptime()[0]))
self.in_units = 'pixels' # Not strictly necessary, but documents units when determined
self.sharp = sharp
self.round1 = round1
self.round2 = round2
self.numcols = 7 if self.use_sharp_round else 4
self.num_objects = len(x)
self._apply_flux_limits = False
|
Generate source catalog from input image using DAOFIND-style algorithm
|
entailment
|
def generateXY(self, **kwargs):
"""
Method to interpret input catalog file as columns of positions and fluxes.
"""
self.num_objects = 0
xycols = self._readCatalog()
if xycols is not None:
# convert the catalog into attribute
self.xypos = xycols[:3]
# convert optional columns if they are present
if self.numcols > 3:
self.xypos.append(np.asarray(xycols[3], dtype=int)) # source ID
if self.numcols > 4:
self.sharp = xycols[4]
if self.numcols > 5:
self.round1 = xycols[5]
if self.numcols > 6:
self.round2 = xycols[6]
self.num_objects = len(xycols[0])
if self.numcols < 3: # account for flux column
self.xypos.append(np.zeros(self.num_objects, dtype=float))
self.flux_col = False
if self.numcols < 4: # add source ID column
self.xypos.append(np.arange(self.num_objects)+self.start_id)
if self.use_sharp_round:
for i in range(len(self.xypos), 7):
self.xypos.append(np.zeros(self.num_objects, dtype=float))
self.sharp_col = False
if self.pars['xyunits'] == 'degrees':
self.radec = [x.copy() for x in self.xypos]
if self.wcs is not None:
self.xypos[:2] = list(self.wcs.all_world2pix(np.array(self.xypos[:2]).T, self.origin).T)
|
Method to interpret input catalog file as columns of positions and fluxes.
|
entailment
|
def plotXYCatalog(self, **kwargs):
"""
Plots the source catalog positions using matplotlib's `pyplot.plot()`
Plotting `kwargs` that can also be passed include any keywords understood
by matplotlib's `pyplot.plot()` function such as::
vmin, vmax, cmap, marker
"""
try:
from matplotlib import pyplot as pl
except:
pl = None
if pl is not None:
pl.clf()
pl.plot(self.xypos[0],self.xypos[1],**kwargs)
|
Plots the source catalog positions using matplotlib's `pyplot.plot()`
Plotting `kwargs` that can also be passed include any keywords understood
by matplotlib's `pyplot.plot()` function such as::
vmin, vmax, cmap, marker
|
entailment
|
def run(configObj,wcsmap=None):
"""
Run the blot task based on parameters provided interactively by the user.
"""
# Insure all output filenames specified have .fits extensions
if configObj['outdata'][-5:] != '.fits': configObj['outdata'] += '.fits'
scale_pars = configObj['Data Scaling Parameters']
user_wcs_pars = configObj['User WCS Parameters']
# PyFITS can be used here as it will always operate on
# output from PyDrizzle (which will always be a FITS file)
# Open the input (drizzled?) image
_fname,_sciextn = fileutil.parseFilename(configObj['data'])
_inimg = fileutil.openImage(_fname, memmap=False)
_expin = fileutil.getKeyword(configObj['data'],scale_pars['expkey'],handle=_inimg)
# Return the PyFITS HDU corresponding to the named extension
_scihdu = fileutil.getExtn(_inimg,_sciextn)
_insci = _scihdu.data.copy()
_inexptime = 1.0
if scale_pars['in_units'] == 'counts':
if scale_pars['expkey'] in _inimg['PRIMARY'].header:
_inexptime = _inimg['PRIMARY'].header[scale_pars['expkey']]
elif 'DRIZEXPT' in _inimg['PRIMARY'].header:
# Try keyword written out by new 'drizzle' if no valid 'expkey' was given
_inexptime = _inimg['PRIMARY'].header['DRIZEXPT']
else:
raise ValueError('No valid exposure time keyword could be found '
'for input %s' % configObj['data'])
# always convert input to 'cps' for blot() algorithm
if _inexptime != 0.0 or _inexptime != 1.0:
np.divide(_insci, _inexptime, _insci)
_inimg.close()
del _inimg
# read in WCS from source (drizzled) image
source_wcs = stwcs.wcsutil.HSTWCS(configObj['data'])
if source_wcs.wcs.is_unity():
print("WARNING: No valid WCS found for input drizzled image: {}!".format(configObj['data']))
# define blot_wcs
blot_wcs = None
_refname,_refextn = fileutil.parseFilename(configObj['reference'])
if os.path.exists(_refname):
# read in WCS from pre-existing output image
blot_wcs = stwcs.wcsutil.HSTWCS(configObj['reference'])
if blot_wcs.wcs.is_unity():
print("WARNING: No valid WCS found for output image: {} !".format(configObj['reference']))
# define blot WCS based on input images or specified reference WCS values
if user_wcs_pars['user_wcs']:
blot_wcs = wcs_functions.build_hstwcs(
user_wcs_pars['raref'], user_wcs_pars['decref'],
user_wcs_pars['xrefpix'], user_wcs_pars['yrefpix'],
user_wcs_pars['outnx'], user_wcs_pars['outny'],
user_wcs_pars['outscale'], user_wcs_pars['orient'] )
configObj['coeffs'] = None
# If blot_wcs is still not defined at this point, we have a problem...
if blot_wcs is None:
blot_wcs = stwcs.distortion.utils.output_wcs([source_wcs],undistort=False)
out_wcs = blot_wcs.copy()
# perform blotting operation now
_outsci = do_blot(_insci, source_wcs, out_wcs, _expin, coeffs=configObj['coeffs'],
interp=configObj['interpol'], sinscl=configObj['sinscl'],
stepsize=configObj['stepsize'], wcsmap=wcsmap)
# create output with proper units and exptime-scaling
if scale_pars['out_units'] == 'counts':
if scale_pars['expout'] == 'input':
_outscale = fileutil.getKeyword(configObj['reference'],scale_pars['expkey'])
#_outscale = _expin
else:
_outscale = float(scale_pars['expout'])
print("Output blotted images scaled by exptime of {}".format(_outscale))
np.multiply(_outsci, _outscale, _outsci)
# Add sky back in to the blotted image, as specified by the user
if configObj['addsky']:
skyval = _scihdu.header['MDRIZSKY']
else:
skyval = configObj['skyval']
print("Added {} counts back in to blotted image as sky.".format(skyval))
_outsci += skyval
del _scihdu
# Write output Numpy objects to a PyFITS file
# Blotting only occurs from a drizzled SCI extension
# to a blotted SCI extension...
outputimage.writeSingleFITS(_outsci,blot_wcs, configObj['outdata'],configObj['reference'])
|
Run the blot task based on parameters provided interactively by the user.
|
entailment
|
def runBlot(imageObjectList, output_wcs, configObj={},
wcsmap=wcs_functions.WCSMap, procSteps=None):
"""
runBlot(imageObjectList, output_wcs, configObj={},
wcsmap=wcs_functions.WCSMap, procSteps=None)
"""
if procSteps is not None:
procSteps.addStep('Blot')
blot_name = util.getSectionName(configObj, _blot_step_num_)
# This can be called directly from MultiDrizle, so only execute if
# switch has been turned on (no guarantee MD will check before calling).
if configObj[blot_name]['blot']:
paramDict = buildBlotParamDict(configObj)
log.info('USER INPUT PARAMETERS for Blot Step:')
util.printParams(paramDict, log=log)
run_blot(imageObjectList, output_wcs.single_wcs, paramDict,
wcsmap=wcsmap)
else:
log.info('Blot step not performed.')
if procSteps is not None:
procSteps.endStep('Blot')
|
runBlot(imageObjectList, output_wcs, configObj={},
wcsmap=wcs_functions.WCSMap, procSteps=None)
|
entailment
|
def run_blot(imageObjectList,output_wcs,paramDict,wcsmap=wcs_functions.WCSMap):
"""
run_blot(imageObjectList, output_wcs, paramDict, wcsmap=wcs_functions.WCSMap)
Perform the blot operation on the list of images.
"""
# Insure that input imageObject is a list
if not isinstance(imageObjectList, list):
imageObjectList = [imageObjectList]
#
# Setup the versions info dictionary for output to PRIMARY header
# The keys will be used as the name reported in the header, as-is
#
_versions = {'AstroDrizzle':__version__,
'PyFITS':util.__fits_version__,
'Numpy':util.__numpy_version__}
_hdrlist = []
for img in imageObjectList:
for chip in img.returnAllChips(extname=img.scienceExt):
print(' Blot: creating blotted image: ',chip.outputNames['data'])
#### Check to see what names need to be included here for use in _hdrlist
chip.outputNames['driz_version'] = _versions['AstroDrizzle']
outputvals = chip.outputNames.copy()
outputvals.update(img.outputValues)
outputvals['blotnx'] = chip.wcs.naxis1
outputvals['blotny'] = chip.wcs.naxis2
_hdrlist.append(outputvals)
plist = outputvals.copy()
plist.update(paramDict)
# PyFITS can be used here as it will always operate on
# output from PyDrizzle (which will always be a FITS file)
# Open the input science file
medianPar = 'outMedian'
outMedianObj = img.getOutputName(medianPar)
if img.inmemory:
outMedian = img.outputNames[medianPar]
_fname,_sciextn = fileutil.parseFilename(outMedian)
_inimg = outMedianObj
else:
outMedian = outMedianObj
_fname,_sciextn = fileutil.parseFilename(outMedian)
_inimg = fileutil.openImage(_fname, memmap=False)
# Return the PyFITS HDU corresponding to the named extension
_scihdu = fileutil.getExtn(_inimg,_sciextn)
_insci = _scihdu.data.copy()
_inimg.close()
del _inimg, _scihdu
_outsci = do_blot(_insci, output_wcs,
chip.wcs, chip._exptime, coeffs=paramDict['coeffs'],
interp=paramDict['blot_interp'], sinscl=paramDict['blot_sinscl'],
wcsmap=wcsmap)
# Apply sky subtraction and unit conversion to blotted array to
# match un-modified input array
if paramDict['blot_addsky']:
skyval = chip.computedSky
else:
skyval = paramDict['blot_skyval']
_outsci /= chip._conversionFactor
if skyval is not None:
_outsci += skyval
log.info('Applying sky value of %0.6f to blotted image %s'%
(skyval,chip.outputNames['data']))
# Write output Numpy objects to a PyFITS file
# Blotting only occurs from a drizzled SCI extension
# to a blotted SCI extension...
_outimg = outputimage.OutputImage(_hdrlist, paramDict, build=False, wcs=chip.wcs, blot=True)
_outimg.outweight = None
_outimg.outcontext = None
outimgs = _outimg.writeFITS(plist['data'],_outsci,None,
versions=_versions,blend=False,
virtual=img.inmemory)
img.saveVirtualOutputs(outimgs)
#_buildOutputFits(_outsci,None,plist['outblot'])
_hdrlist = []
del _outsci
del _outimg
|
run_blot(imageObjectList, output_wcs, paramDict, wcsmap=wcs_functions.WCSMap)
Perform the blot operation on the list of images.
|
entailment
|
def do_blot(source, source_wcs, blot_wcs, exptime, coeffs = True,
interp='poly5', sinscl=1.0, stepsize=10, wcsmap=None):
""" Core functionality of performing the 'blot' operation to create a single
blotted image from a single source image.
All distortion information is assumed to be included in the WCS specification
of the 'output' blotted image given in 'blot_wcs'.
This is the simplest interface that can be called for stand-alone
use of the blotting function.
Parameters
----------
source
Input numpy array of undistorted source image in units of 'cps'.
source_wcs
HSTWCS object representing source image distortion-corrected WCS.
blot_wcs
(py)wcs.WCS object representing the blotted image WCS.
exptime
exptime to use for scaling output blot image. A value of 1 will
result in output blot image in units of 'cps'.
coeffs
Flag to specify whether or not to use distortion coefficients
associated with blot_wcs. If False, do not apply any distortion
model.
interp
Form of interpolation to use when blotting pixels. Valid options::
"nearest","linear","poly3", "poly5"(default), "spline3", "sinc"
sinscl
Scale for sinc interpolation kernel (in output, blotted pixels)
stepsize
Number of pixels for WCS interpolation
wcsmap
Custom mapping class to use to provide transformation from
drizzled to blotted WCS. Default will be to use
`drizzlepac.wcs_functions.WCSMap`.
"""
_outsci = np.zeros(blot_wcs.array_shape, dtype=np.float32)
# Now pass numpy objects to callable version of Blot...
build=False
misval = 0.0
kscale = 1.0
xmin = 1
ymin = 1
xmax, ymax = source_wcs.pixel_shape
# compute the undistorted 'natural' plate scale for this chip
if coeffs:
wcslin = distortion.utils.make_orthogonal_cd(blot_wcs)
else:
wcslin = blot_wcs
blot_wcs.sip = None
blot_wcs.cpdis1 = None
blot_wcs.cpdis2 = None
blot_wcs.det2im = None
if wcsmap is None and cdriz is not None:
"""
Use default C mapping function.
"""
print('Using default C-based coordinate transformation...')
mapping = cdriz.DefaultWCSMapping(
blot_wcs, source_wcs,
blot_wcs.pixel_shape[0], blot_wcs.pixel_shape[1],
stepsize
)
pix_ratio = source_wcs.pscale/wcslin.pscale
else:
#
##Using the Python class for the WCS-based transformation
#
# Use user provided mapping function
print('Using coordinate transformation defined by user...')
if wcsmap is None:
wcsmap = wcs_functions.WCSMap
wmap = wcsmap(blot_wcs,source_wcs)
mapping = wmap.forward
pix_ratio = source_wcs.pscale/wcslin.pscale
t = cdriz.tblot(
source, _outsci,xmin,xmax,ymin,ymax,
pix_ratio, kscale, 1.0, 1.0,
'center',interp, exptime,
misval, sinscl, 1, mapping)
del mapping
return _outsci
|
Core functionality of performing the 'blot' operation to create a single
blotted image from a single source image.
All distortion information is assumed to be included in the WCS specification
of the 'output' blotted image given in 'blot_wcs'.
This is the simplest interface that can be called for stand-alone
use of the blotting function.
Parameters
----------
source
Input numpy array of undistorted source image in units of 'cps'.
source_wcs
HSTWCS object representing source image distortion-corrected WCS.
blot_wcs
(py)wcs.WCS object representing the blotted image WCS.
exptime
exptime to use for scaling output blot image. A value of 1 will
result in output blot image in units of 'cps'.
coeffs
Flag to specify whether or not to use distortion coefficients
associated with blot_wcs. If False, do not apply any distortion
model.
interp
Form of interpolation to use when blotting pixels. Valid options::
"nearest","linear","poly3", "poly5"(default), "spline3", "sinc"
sinscl
Scale for sinc interpolation kernel (in output, blotted pixels)
stepsize
Number of pixels for WCS interpolation
wcsmap
Custom mapping class to use to provide transformation from
drizzled to blotted WCS. Default will be to use
`drizzlepac.wcs_functions.WCSMap`.
|
entailment
|
def process(inFile,force=False,newpath=None, inmemory=False, num_cores=None,
headerlets=True, align_to_gaia=True):
""" Run astrodrizzle on input file/ASN table
using default values for astrodrizzle parameters.
"""
# We only need to import this package if a user run the task
import drizzlepac
from drizzlepac import processInput # used for creating new ASNs for _flc inputs
from stwcs import updatewcs
from drizzlepac import alignimages
# interpret envvar variable, if specified
if envvar_compute_name in os.environ:
val = os.environ[envvar_compute_name].lower()
if val not in envvar_bool_dict:
msg = "ERROR: invalid value for {}.".format(envvar_compute_name)
msg += " \n Valid Values: on, off, yes, no, true, false"
raise ValueError(msg)
align_to_gaia = envvar_bool_dict[val]
if envvar_new_apriori_name in os.environ:
# Reset ASTROMETRY_STEP_CONTROL based on this variable
# This provides backward-compatibility until ASTROMETRY_STEP_CONTROL
# gets removed entirely.
val = os.environ[envvar_new_apriori_name].lower()
if val not in envvar_dict:
msg = "ERROR: invalid value for {}.".format(envvar_new_apriori_name)
msg += " \n Valid Values: on, off, yes, no, true, false"
raise ValueError(msg)
os.environ[envvar_old_apriori_name] = envvar_dict[val]
if headerlets or align_to_gaia:
from stwcs.wcsutil import headerlet
# Open the input file
try:
# Make sure given filename is complete and exists...
inFilename = fileutil.buildRootname(inFile,ext=['.fits'])
if not os.path.exists(inFilename):
print("ERROR: Input file - %s - does not exist." % inFilename)
return
except TypeError:
print("ERROR: Inappropriate input file.")
return
#If newpath was specified, move all files to that directory for processing
if newpath:
orig_processing_dir = os.getcwd()
new_processing_dir = _createWorkingDir(newpath,inFilename)
_copyToNewWorkingDir(new_processing_dir,inFilename)
os.chdir(new_processing_dir)
# Initialize for later use...
_mname = None
_new_asn = None
_calfiles = []
# Identify WFPC2 inputs to account for differences in WFPC2 inputs
wfpc2_input = fits.getval(inFilename, 'instrume') == 'WFPC2'
cal_ext = None
# Check input file to see if [DRIZ/DITH]CORR is set to PERFORM
if '_asn' in inFilename:
# We are working with an ASN table.
# Use asnutil code to extract filename
inFilename = _lowerAsn(inFilename)
_new_asn = [inFilename]
_asndict = asnutil.readASNTable(inFilename,None,prodonly=False)
_cal_prodname = _asndict['output'].lower()
#_fname = fileutil.buildRootname(_cal_prodname,ext=['_drz.fits'])
# Retrieve the first member's rootname for possible use later
_fimg = fits.open(inFilename, memmap=False)
for name in _fimg[1].data.field('MEMNAME'):
if name[-1] != '*':
_mname = name.split('\0', 1)[0].lower()
break
_fimg.close()
del _fimg
else:
# Check to see if input is a _RAW file
# If it is, strip off the _raw.fits extension...
_indx = inFilename.find('_raw')
if _indx < 0: _indx = len(inFilename)
# ... and build the CALXXX product rootname.
if wfpc2_input:
# force code to define _c0m file as calibrated product to be used
cal_ext = ['_c0m.fits']
_mname = fileutil.buildRootname(inFilename[:_indx], ext=cal_ext)
_cal_prodname = inFilename[:_indx]
# Reset inFilename to correspond to appropriate input for
# drizzle: calibrated product name.
inFilename = _mname
if _mname is None:
errorMsg = 'Could not find calibrated product!'
raise Exception(errorMsg)
# Create trailer filenames based on ASN output filename or
# on input name for single exposures
if '_raw' in inFile:
# Output trailer file to RAW file's trailer
_trlroot = inFile[:inFile.find('_raw')]
elif '_asn' in inFile:
# Output trailer file to ASN file's trailer, not product's trailer
_trlroot = inFile[:inFile.find('_asn')]
else:
# Default: trim off last suffix of input filename
# and replacing with .tra
_indx = inFile.rfind('_')
if _indx > 0:
_trlroot = inFile[:_indx]
else:
_trlroot = inFile
_trlfile = _trlroot + '.tra'
# Open product and read keyword value
# Check to see if product already exists...
dkey = 'DRIZCORR'
# ...if product does NOT exist, interrogate input file
# to find out whether 'dcorr' has been set to PERFORM
# Check if user wants to process again regardless of DRIZCORR keyword value
if force:
dcorr = 'PERFORM'
else:
if _mname :
_fimg = fits.open(fileutil.buildRootname(_mname,ext=['_raw.fits']), memmap=False)
_phdr = _fimg['PRIMARY'].header
if dkey in _phdr:
dcorr = _phdr[dkey]
else:
dcorr = None
_fimg.close()
del _fimg
else:
dcorr = None
time_str = _getTime()
_tmptrl = _trlroot + '_tmp.tra'
_drizfile = _trlroot + '_pydriz'
_drizlog = _drizfile + ".log" # the '.log' gets added automatically by astrodrizzle
_alignlog = _trlroot + '_align.log'
if dcorr == 'PERFORM':
if '_asn.fits' not in inFilename:
# Working with a singleton
# However, we always want to make sure we always use
# a calibrated product as input, if available.
_infile = fileutil.buildRootname(_cal_prodname, ext=cal_ext)
_infile_flc = fileutil.buildRootname(_cal_prodname,ext=['_flc.fits'])
_cal_prodname = _infile
_inlist = _calfiles = [_infile]
# Add CTE corrected filename as additional input if present
if os.path.exists(_infile_flc) and _infile_flc != _infile:
_inlist.append(_infile_flc)
else:
# Working with an ASN table...
_infile = inFilename
flist,duplist = processInput.checkForDuplicateInputs(_asndict['order'])
_calfiles = flist
if len(duplist) > 0:
origasn = processInput.changeSuffixinASN(inFilename,'flt')
dupasn = processInput.changeSuffixinASN(inFilename,'flc')
_inlist = [origasn,dupasn]
else:
_inlist = [_infile]
# We want to keep the original specification of the calibration
# product name, though, not a lower-case version...
_cal_prodname = inFilename
_new_asn.extend(_inlist) # kept so we can delete it when finished
# check to see whether FLC files are also present, and need to be updated
# generate list of FLC files
align_files = None
_calfiles_flc = [f.replace('_flt.fits','_flc.fits') for f in _calfiles]
# insure these files exist, if not, blank them out
# Also pick out what files will be used for additional alignment to GAIA
if not os.path.exists(_calfiles_flc[0]):
_calfiles_flc = None
align_files = _calfiles
align_update_files = None
else:
align_files = _calfiles_flc
align_update_files = _calfiles
# Run updatewcs on each list of images
updatewcs.updatewcs(_calfiles)
if _calfiles_flc:
updatewcs.updatewcs(_calfiles_flc)
if align_to_gaia:
# Perform additional alignment on the FLC files, if present
###############
#
# call hlapipeline code here on align_files list of files
#
###############
# Create trailer marker message for start of align_to_GAIA processing
_trlmsg = _timestamp("Align_to_GAIA started ")
print(_trlmsg)
ftmp = open(_tmptrl,'w')
ftmp.writelines(_trlmsg)
ftmp.close()
_appendTrlFile(_trlfile,_tmptrl)
_trlmsg = ""
# Create an empty astropy table so it can be used as input/output for the perform_align function
#align_table = Table()
try:
align_table = alignimages.perform_align(align_files,update_hdr_wcs=True, runfile=_alignlog)
for row in align_table:
if row['status'] == 0:
trlstr = "Successfully aligned {} to {} astrometric frame\n"
_trlmsg += trlstr.format(row['imageName'], row['catalog'])
else:
trlstr = "Could not align {} to absolute astrometric frame\n"
_trlmsg += trlstr.format(row['imageName'])
except Exception:
# Something went wrong with alignment to GAIA, so report this in
# trailer file
_trlmsg = "EXCEPTION encountered in alignimages...\n"
_trlmsg += " No correction to absolute astrometric frame applied!\n"
# Write the perform_align log to the trailer file...(this will delete the _alignlog)
_appendTrlFile(_trlfile,_alignlog)
# Append messages from this calling routine post-perform_align
ftmp = open(_tmptrl,'w')
ftmp.writelines(_trlmsg)
ftmp.close()
_appendTrlFile(_trlfile,_tmptrl)
_trlmsg = ""
#Check to see whether there are any additional input files that need to
# be aligned (namely, FLT images)
if align_update_files and align_table:
# Apply headerlets from alignment to FLT version of the files
for fltfile, flcfile in zip(align_update_files, align_files):
row = align_table[align_table['imageName']==flcfile]
headerletFile = row['headerletFile'][0]
if headerletFile != "None":
headerlet.apply_headerlet_as_primary(fltfile, headerletFile,
attach=True, archive=True)
# append log file contents to _trlmsg for inclusion in trailer file
_trlstr = "Applying headerlet {} as Primary WCS to {}\n"
_trlmsg += _trlstr.format(headerletFile, fltfile)
else:
_trlmsg += "No absolute astrometric headerlet applied to {}\n".format(fltfile)
# Finally, append any further messages associated with alignement from this calling routine
_trlmsg += _timestamp('Align_to_GAIA completed ')
print(_trlmsg)
ftmp = open(_tmptrl,'w')
ftmp.writelines(_trlmsg)
ftmp.close()
_appendTrlFile(_trlfile,_tmptrl)
# Run astrodrizzle and send its processing statements to _trlfile
_pyver = drizzlepac.astrodrizzle.__version__
for _infile in _inlist: # Run astrodrizzle for all inputs
# Create trailer marker message for start of astrodrizzle processing
_trlmsg = _timestamp('astrodrizzle started ')
_trlmsg += __trlmarker__
_trlmsg += '%s: Processing %s with astrodrizzle Version %s\n' % (time_str,_infile,_pyver)
print(_trlmsg)
# Write out trailer comments to trailer file...
ftmp = open(_tmptrl,'w')
ftmp.writelines(_trlmsg)
ftmp.close()
_appendTrlFile(_trlfile,_tmptrl)
_pyd_err = _trlroot+'_pydriz.stderr'
try:
b = drizzlepac.astrodrizzle.AstroDrizzle(input=_infile,runfile=_drizfile,
configobj='defaults',in_memory=inmemory,
num_cores=num_cores, **pipeline_pars)
except Exception as errorobj:
_appendTrlFile(_trlfile,_drizlog)
_appendTrlFile(_trlfile,_pyd_err)
_ftrl = open(_trlfile,'a')
_ftrl.write('ERROR: Could not complete astrodrizzle processing of %s.\n' % _infile)
_ftrl.write(str(sys.exc_info()[0])+': ')
_ftrl.writelines(str(errorobj))
_ftrl.write('\n')
_ftrl.close()
print('ERROR: Could not complete astrodrizzle processing of %s.' % _infile)
raise Exception(str(errorobj))
# Now, append comments created by PyDrizzle to CALXXX trailer file
print('Updating trailer file %s with astrodrizzle comments.' % _trlfile)
_appendTrlFile(_trlfile,_drizlog)
# Save this for when astropy.io.fits can modify a file 'in-place'
# Update calibration switch
_fimg = fits.open(_cal_prodname, mode='update', memmap=False)
_fimg['PRIMARY'].header[dkey] = 'COMPLETE'
_fimg.close()
del _fimg
# Enforce pipeline convention of all lower-case product
# names
_prodlist = glob.glob('*drz.fits')
for _prodname in _prodlist:
_plower = _prodname.lower()
if _prodname != _plower: os.rename(_prodname,_plower)
else:
# Create default trailer file messages when astrodrizzle is not
# run on a file. This will typically apply only to BIAS,DARK
# and other reference images.
# Start by building up the message...
_trlmsg = _timestamp('astrodrizzle skipped ')
_trlmsg = _trlmsg + __trlmarker__
_trlmsg = _trlmsg + '%s: astrodrizzle processing not requested for %s.\n' % (time_str,inFilename)
_trlmsg = _trlmsg + ' astrodrizzle will not be run at this time.\n'
print(_trlmsg)
# Write message out to temp file and append it to full trailer file
ftmp = open(_tmptrl,'w')
ftmp.writelines(_trlmsg)
ftmp.close()
_appendTrlFile(_trlfile,_tmptrl)
# Append final timestamp to trailer file...
_final_msg = '%s: Finished processing %s \n' % (time_str,inFilename)
_final_msg += _timestamp('astrodrizzle completed ')
_trlmsg += _final_msg
ftmp = open(_tmptrl,'w')
ftmp.writelines(_trlmsg)
ftmp.close()
_appendTrlFile(_trlfile,_tmptrl)
# If we created a new ASN table, we need to remove it
if _new_asn is not None:
for _name in _new_asn: fileutil.removeFile(_name)
# Clean up any generated OrIg_files directory
if os.path.exists("OrIg_files"):
# check to see whether this directory is empty
flist = glob.glob('OrIg_files/*.fits')
if len(flist) == 0:
os.rmdir("OrIg_files")
else:
print('OrIg_files directory NOT removed as it still contained images...')
# If headerlets have already been written out by alignment code,
# do NOT write out this version of the headerlets
if headerlets:
# Generate headerlets for each updated FLT image
hlet_msg = _timestamp("Writing Headerlets started")
for fname in _calfiles:
frootname = fileutil.buildNewRootname(fname)
hname = "%s_flt_hlet.fits"%frootname
# Write out headerlet file used by astrodrizzle, however,
# do not overwrite any that was already written out by alignimages
if not os.path.exists(hname):
hlet_msg += "Created Headerlet file %s \n"%hname
try:
headerlet.write_headerlet(fname,'OPUS',output='flt', wcskey='PRIMARY',
author="OPUS",descrip="Default WCS from Pipeline Calibration",
attach=False,clobber=True,logging=False)
except ValueError:
hlet_msg += _timestamp("SKIPPED: Headerlet not created for %s \n"%fname)
# update trailer file to log creation of headerlet files
hlet_msg += _timestamp("Writing Headerlets completed")
ftrl = open(_trlfile,'a')
ftrl.write(hlet_msg)
ftrl.close()
# If processing was done in a temp working dir, restore results to original
# processing directory, return to original working dir and remove temp dir
if newpath:
_restoreResults(new_processing_dir,orig_processing_dir)
os.chdir(orig_processing_dir)
_removeWorkingDir(new_processing_dir)
# Provide feedback to user
print(_final_msg)
|
Run astrodrizzle on input file/ASN table
using default values for astrodrizzle parameters.
|
entailment
|
def _lowerAsn(asnfile):
""" Create a copy of the original asn file and change
the case of all members to lower-case.
"""
# Start by creating a new name for the ASN table
_indx = asnfile.find('_asn.fits')
_new_asn = asnfile[:_indx]+'_pipeline'+asnfile[_indx:]
if os.path.exists(_new_asn):
os.remove(_new_asn)
# copy original ASN table to new table
shutil.copy(asnfile,_new_asn)
# Open up the new copy and convert all MEMNAME's to lower-case
fasn = fits.open(_new_asn, mode='update', memmap=False)
for i in range(len(fasn[1].data)):
fasn[1].data[i].setfield('MEMNAME',fasn[1].data[i].field('MEMNAME').lower())
fasn.close()
return _new_asn
|
Create a copy of the original asn file and change
the case of all members to lower-case.
|
entailment
|
def _appendTrlFile(trlfile,drizfile):
""" Append drizfile to already existing trlfile from CALXXX.
"""
if not os.path.exists(drizfile):
return
# Open already existing CALWF3 trailer file for appending
ftrl = open(trlfile,'a')
# Open astrodrizzle trailer file
fdriz = open(drizfile)
# Read in drizzle comments
_dlines = fdriz.readlines()
# Append them to CALWF3 trailer file
ftrl.writelines(_dlines)
# Close all files
ftrl.close()
fdriz.close()
# Now, clean up astrodrizzle trailer file
os.remove(drizfile)
|
Append drizfile to already existing trlfile from CALXXX.
|
entailment
|
def _timestamp(_process_name):
"""Create formatted time string recognizable by OPUS."""
_prefix= time.strftime("%Y%j%H%M%S-I-----",time.localtime())
_lenstr = 60 - len(_process_name)
return _prefix+_process_name+(_lenstr*'-')+'\n'
|
Create formatted time string recognizable by OPUS.
|
entailment
|
def _createWorkingDir(rootdir,input):
"""
Create a working directory based on input name under the parent directory specified as rootdir
"""
# extract rootname from input
rootname = input[:input.find('_')]
newdir = os.path.join(rootdir,rootname)
if not os.path.exists(newdir):
os.mkdir(newdir)
return newdir
|
Create a working directory based on input name under the parent directory specified as rootdir
|
entailment
|
def _copyToNewWorkingDir(newdir,input):
""" Copy input file and all related files necessary for processing to the new working directory.
This function works in a greedy manner, in that all files associated
with all inputs(have the same rootname) will be copied to the new
working directory.
"""
flist = []
if '_asn.fits' in input:
asndict = asnutil.readASNTable(input,None)
flist.append(input[:input.find('_')])
flist.extend(asndict['order'])
flist.append(asndict['output'])
else:
flist.append(input[:input.find('_')])
# copy all files related to these rootnames into new dir
for rootname in flist:
for fname in glob.glob(rootname+'*'):
shutil.copy(fname,os.path.join(newdir,fname))
|
Copy input file and all related files necessary for processing to the new working directory.
This function works in a greedy manner, in that all files associated
with all inputs(have the same rootname) will be copied to the new
working directory.
|
entailment
|
def _restoreResults(newdir,origdir):
""" Move (not copy) all files from newdir back to the original directory
"""
for fname in glob.glob(os.path.join(newdir,'*')):
shutil.move(fname,os.path.join(origdir,os.path.basename(fname)))
|
Move (not copy) all files from newdir back to the original directory
|
entailment
|
def median(input=None, configObj=None, editpars=False, **inputDict):
"""
Create a median image from the seperately drizzled images.
"""
if input is not None:
inputDict["input"] = input
else:
raise ValueError("Please supply an input image")
configObj = util.getDefaultConfigObj(__taskname__, configObj, inputDict,
loadOnly=(not editpars))
if configObj is None:
return
if not editpars:
run(configObj)
|
Create a median image from the seperately drizzled images.
|
entailment
|
def createMedian(imgObjList, configObj, procSteps=None):
""" Top-level interface to createMedian step called from top-level
AstroDrizzle.
This function parses the input parameters then calls the `_median()`
function to median-combine the input images into a single image.
"""
if imgObjList is None:
msg = "Please provide a list of imageObjects to the median step"
print(msg, file=sys.stderr)
raise ValueError(msg)
if procSteps is not None:
procSteps.addStep('Create Median')
step_name = util.getSectionName(configObj, _step_num_)
if not configObj[step_name]['median']:
log.info('Median combination step not performed.')
return
paramDict = configObj[step_name]
paramDict['proc_unit'] = configObj['proc_unit']
# include whether or not compression was performed
driz_sep_name = util.getSectionName(configObj, _single_step_num_)
driz_sep_paramDict = configObj[driz_sep_name]
paramDict['compress'] = driz_sep_paramDict['driz_sep_compress']
log.info('USER INPUT PARAMETERS for Create Median Step:')
util.printParams(paramDict, log=log)
_median(imgObjList, paramDict)
if procSteps is not None:
procSteps.endStep('Create Median')
|
Top-level interface to createMedian step called from top-level
AstroDrizzle.
This function parses the input parameters then calls the `_median()`
function to median-combine the input images into a single image.
|
entailment
|
def _median(imageObjectList, paramDict):
"""Create a median image from the list of image Objects
that has been given.
"""
newmasks = paramDict['median_newmasks']
comb_type = paramDict['combine_type'].lower()
nlow = paramDict['combine_nlow']
nhigh = paramDict['combine_nhigh']
grow = paramDict['combine_grow'] if 'minmed' in comb_type else 0
maskpt = paramDict['combine_maskpt']
proc_units = paramDict['proc_unit']
compress = paramDict['compress']
bufsizeMB = paramDict['combine_bufsize']
sigma = paramDict["combine_nsigma"]
sigmaSplit = sigma.split()
nsigma1 = float(sigmaSplit[0])
nsigma2 = float(sigmaSplit[1])
if paramDict['combine_lthresh'] is None:
lthresh = None
else:
lthresh = float(paramDict['combine_lthresh'])
if paramDict['combine_hthresh'] is None:
hthresh = None
else:
hthresh = float(paramDict['combine_hthresh'])
# the name of the output median file isdefined in the output wcs object and
# stuck in the image.outputValues["outMedian"] dict of every imageObject
medianfile = imageObjectList[0].outputNames["outMedian"]
# Build combined array from single drizzled images.
# Start by removing any previous products...
if os.access(medianfile, os.F_OK):
os.remove(medianfile)
# Define lists for instrument specific parameters, these should be in
# the image objects need to be passed to the minmed routine
readnoiseList = []
exposureTimeList = []
backgroundValueList = [] # list of MDRIZSKY *platescale values
singleDrizList = [] # these are the input images
singleWeightList = [] # pointers to the data arrays
wht_mean = [] # Compute the mean value of each wht image
single_hdr = None
virtual = None
# for each image object
for image in imageObjectList:
if virtual is None:
virtual = image.inmemory
det_gain = image.getGain(1)
img_exptime = image._image['sci', 1]._exptime
native_units = image.native_units
native_units_lc = native_units.lower()
if proc_units.lower() == 'native':
if native_units_lc not in ['counts', 'electrons', 'counts/s',
'electrons/s']:
raise ValueError("Unexpected native units: '{}'"
.format(native_units))
if lthresh is not None:
if native_units_lc.startswith('counts'):
lthresh *= det_gain
if native_units_lc.endswith('/s'):
lthresh *= img_exptime
if hthresh is not None:
if native_units_lc.startswith('counts'):
hthresh *= det_gain
if native_units_lc.endswith('/s'):
hthresh *= img_exptime
singleDriz = image.getOutputName("outSingle")
singleDriz_name = image.outputNames['outSingle']
singleWeight = image.getOutputName("outSWeight")
singleWeight_name = image.outputNames['outSWeight']
# If compression was used, reference ext=1 as CompImageHDU only writes
# out MEF files, not simple FITS.
if compress:
wcs_ext = '[1]'
wcs_extnum = 1
else:
wcs_ext = '[0]'
wcs_extnum = 0
if not virtual:
if isinstance(singleDriz, str):
iter_singleDriz = singleDriz + wcs_ext
iter_singleWeight = singleWeight + wcs_ext
else:
iter_singleDriz = singleDriz[wcs_extnum]
iter_singleWeight = singleWeight[wcs_extnum]
else:
iter_singleDriz = singleDriz_name + wcs_ext
iter_singleWeight = singleWeight_name + wcs_ext
# read in WCS from first single drizzle image to use as WCS for
# median image
if single_hdr is None:
if virtual:
single_hdr = singleDriz[wcs_extnum].header
else:
single_hdr = fits.getheader(singleDriz_name, ext=wcs_extnum,
memmap=False)
single_image = iterfile.IterFitsFile(iter_singleDriz)
if virtual:
single_image.handle = singleDriz
single_image.inmemory = True
singleDrizList.append(single_image) # add to an array for bookkeeping
# If it exists, extract the corresponding weight images
if (not virtual and os.access(singleWeight, os.F_OK)) or (
virtual and singleWeight):
weight_file = iterfile.IterFitsFile(iter_singleWeight)
if virtual:
weight_file.handle = singleWeight
weight_file.inmemory = True
singleWeightList.append(weight_file)
try:
tmp_mean_value = ImageStats(weight_file.data, lower=1e-8,
fields="mean", nclip=0).mean
except ValueError:
tmp_mean_value = 0.0
wht_mean.append(tmp_mean_value * maskpt)
# Extract instrument specific parameters and place in lists
# If an image has zero exposure time we will
# redefine that value as '1'. Although this will cause inaccurate
# scaling of the data to occur in the 'minmed' combination
# algorith, this is a necessary evil since it avoids divide by
# zero exceptions. It is more important that the divide by zero
# exceptions not cause AstroDrizzle to crash in the pipeline than
# it is to raise an exception for this obviously bad data even
# though this is not the type of data you would wish to process
# with AstroDrizzle.
#
# Get the exposure time from the InputImage object
#
# MRD 19-May-2011
# Changed exposureTimeList to take exposure time from img_exptime
# variable instead of hte image._exptime attribute, since
# image._exptime was just giving 1.
#
exposureTimeList.append(img_exptime)
# Use only "commanded" chips to extract subtractedSky and rdnoise:
rdnoise = 0.0
nchips = 0
bsky = None # minimum sky across **used** chips
for chip in image.returnAllChips(extname=image.scienceExt):
# compute sky value as sky/pixel using the single_drz
# pixel scale:
if bsky is None or bsky > chip.subtractedSky:
bsky = chip.subtractedSky * chip._conversionFactor
# Extract the readnoise value for the chip
rdnoise += chip._rdnoise**2
nchips += 1
if bsky is None:
bsky = 0.0
if nchips > 0:
rdnoise = math.sqrt(rdnoise/nchips)
backgroundValueList.append(bsky)
readnoiseList.append(rdnoise)
print("reference sky value for image '{}' is {}"
.format(image._filename, backgroundValueList[-1]))
#
# END Loop over input image list
#
# create an array for the median output image, use the size of the first
# image in the list. Store other useful image characteristics:
single_driz_data = singleDrizList[0].data
data_item_size = single_driz_data.itemsize
single_data_dtype = single_driz_data.dtype
imrows, imcols = single_driz_data.shape
medianImageArray = np.zeros_like(single_driz_data)
del single_driz_data
if comb_type == "minmed" and not newmasks:
# Issue a warning if minmed is being run with newmasks turned off.
print('\nWARNING: Creating median image without the application of '
'bad pixel masks!\n')
# The overlap value needs to be set to 2*grow in order to
# avoid edge effects when scrolling down the image, and to
# insure that the last section returned from the iterator
# has enough rows to span the kernel used in the boxcar method
# within minmed.
overlap = 2 * grow
buffsize = BUFSIZE if bufsizeMB is None else (BUFSIZE * bufsizeMB)
section_nrows = min(imrows, int(buffsize / (imcols * data_item_size)))
if section_nrows == 0:
buffsize = imcols * data_item_size
print("WARNING: Buffer size is too small to hold a single row.\n"
" Buffer size size will be increased to minimal "
"required: {}MB".format(float(buffsize) / 1048576.0))
section_nrows = 1
if section_nrows < overlap + 1:
new_grow = int((section_nrows - 1) / 2)
if section_nrows == imrows:
print("'grow' parameter is too large for actual image size. "
"Reducing 'grow' to {}".format(new_grow))
else:
print("'grow' parameter is too large for requested buffer size. "
"Reducing 'grow' to {}".format(new_grow))
grow = new_grow
overlap = 2 * grow
nbr = section_nrows - overlap
nsec = (imrows - overlap) // nbr
if (imrows - overlap) % nbr > 0:
nsec += 1
for k in range(nsec):
e1 = k * nbr
e2 = e1 + section_nrows
u1 = grow
u2 = u1 + nbr
if k == 0: # first section
u1 = 0
if k == nsec - 1: # last section
e2 = min(e2, imrows)
e1 = min(e1, e2 - overlap - 1)
u2 = e2 - e1
imdrizSectionsList = np.empty(
(len(singleDrizList), e2 - e1, imcols),
dtype=single_data_dtype
)
for i, w in enumerate(singleDrizList):
imdrizSectionsList[i, :, :] = w[e1:e2]
if singleWeightList:
weightSectionsList = np.empty(
(len(singleWeightList), e2 - e1, imcols),
dtype=single_data_dtype
)
for i, w in enumerate(singleWeightList):
weightSectionsList[i, :, :] = w[e1:e2]
else:
weightSectionsList = None
weight_mask_list = None
if newmasks and weightSectionsList is not None:
# Build new masks from single drizzled images.
# Generate new pixel mask file for median step.
# This mask will be created from the single-drizzled
# weight image for this image.
# The mean of the weight array will be computed and all
# pixels with values less than 0.7 of the mean will be flagged
# as bad in this mask. This mask will then be used when
# creating the median image.
# 0 means good, 1 means bad here...
weight_mask_list = np.less(
weightSectionsList,
np.asarray(wht_mean)[:, None, None]
).astype(np.uint8)
if 'minmed' in comb_type: # Do MINMED
# set up use of 'imedian'/'imean' in minmed algorithm
fillval = comb_type.startswith('i')
# Create the combined array object using the minmed algorithm
result = min_med(
imdrizSectionsList,
weightSectionsList,
readnoiseList,
exposureTimeList,
backgroundValueList,
weight_masks=weight_mask_list,
combine_grow=grow,
combine_nsigma1=nsigma1,
combine_nsigma2=nsigma2,
fillval=fillval
)
else: # DO NUMCOMBINE
# Create the combined array object using the numcombine task
result = numcombine.num_combine(
imdrizSectionsList,
masks=weight_mask_list,
combination_type=comb_type,
nlow=nlow,
nhigh=nhigh,
upper=hthresh,
lower=lthresh
)
# Write out the processed image sections to the final output array:
medianImageArray[e1+u1:e1+u2, :] = result[u1:u2, :]
# Write out the combined image
# use the header from the first single drizzled image in the list
pf = _writeImage(medianImageArray, inputHeader=single_hdr)
if virtual:
mediandict = {}
mediandict[medianfile] = pf
for img in imageObjectList:
img.saveVirtualOutputs(mediandict)
else:
try:
print("Saving output median image to: '{}'".format(medianfile))
pf.writeto(medianfile)
except IOError:
msg = "Problem writing file '{}'".format(medianfile)
print(msg)
raise IOError(msg)
# Always close any files opened to produce median image; namely,
# single drizzle images and singly-drizzled weight images
#
for img in singleDrizList:
if not virtual:
img.close()
# Close all singly drizzled weight images used to create median image.
for img in singleWeightList:
if not virtual:
img.close()
|
Create a median image from the list of image Objects
that has been given.
|
entailment
|
def _writeImage(dataArray=None, inputHeader=None):
""" Writes out the result of the combination step.
The header of the first 'outsingle' file in the
association parlist is used as the header of the
new image.
Parameters
----------
dataArray : arr
Array of data to be written to a fits.PrimaryHDU object
inputHeader : obj
fits.header.Header object to use as basis for the PrimaryHDU header
"""
prihdu = fits.PrimaryHDU(data=dataArray, header=inputHeader)
pf = fits.HDUList()
pf.append(prihdu)
return pf
|
Writes out the result of the combination step.
The header of the first 'outsingle' file in the
association parlist is used as the header of the
new image.
Parameters
----------
dataArray : arr
Array of data to be written to a fits.PrimaryHDU object
inputHeader : obj
fits.header.Header object to use as basis for the PrimaryHDU header
|
entailment
|
def tran(inimage,outimage,direction='forward',x=None,y=None,
coords=None, coordfile=None,colnames=None,separator=None,
precision=6, output=None,verbose=True):
""" Primary interface to perform coordinate transformations in pixel
coordinates between 2 images using STWCS and full distortion models
read from each image's header.
"""
single_coord = False
# Only use value provided in `coords` if nothing has been specified for coordfile
if coords is not None and coordfile is None:
coordfile = coords
warnings.simplefilter('always',DeprecationWarning)
warnings.warn("Please update calling code to pass in `coordfile` instead of `coords`.",
category=DeprecationWarning)
warnings.simplefilter('default',DeprecationWarning)
if coordfile is not None:
if colnames in util.blank_list:
colnames = ['c1','c2']
# Determine columns which contain pixel positions
cols = util.parse_colnames(colnames,coordfile)
# read in columns from input coordinates file
xyvals = np.loadtxt(coordfile,usecols=cols,delimiter=separator)
if xyvals.ndim == 1: # only 1 entry in coordfile
xlist = [xyvals[0].copy()]
ylist = [xyvals[1].copy()]
else:
xlist = xyvals[:,0].copy()
ylist = xyvals[:,1].copy()
del xyvals
else:
if isinstance(x,np.ndarray):
xlist = x.tolist()
ylist = y.tolist()
elif not isinstance(x,list):
xlist = [x]
ylist = [y]
single_coord = True
else:
xlist = x
ylist = y
# start by reading in WCS+distortion info for each image
im1wcs = wcsutil.HSTWCS(inimage)
if im1wcs.wcs.is_unity():
print("####\nNo valid input WCS found in {}.\n Results may be invalid.\n####\n".format(inimage))
if util.is_blank(outimage):
fname,fextn = fileutil.parseFilename(inimage)
numsci = fileutil.countExtn(fname)
chips = []
for e in range(1,numsci+1):
chips.append(wcsutil.HSTWCS(fname,ext=('sci',e)))
if len(chips) == 0:
chips = [im1wcs]
im2wcs = distortion.utils.output_wcs(chips)
else:
im2wcs = wcsutil.HSTWCS(outimage)
if im2wcs.wcs.is_unity():
print("####\nNo valid output WCS found in {}.\n Results may be invalid.\n####\n".format(outimage))
# Setup the transformation
p2p = wcs_functions.WCSMap(im1wcs,im2wcs)
if direction[0].lower() == 'f':
outx,outy = p2p.forward(xlist,ylist)
else:
outx,outy = p2p.backward(xlist,ylist)
if isinstance(outx,np.ndarray):
outx = outx.tolist()
outy = outy.tolist()
# add formatting based on precision here...
xstr = []
ystr = []
fmt = "%."+repr(precision)+"f"
for ox,oy in zip(outx,outy):
xstr.append(fmt%ox)
ystr.append(fmt%oy)
if verbose or (not verbose and util.is_blank(output)):
print('# Coordinate transformations for ',inimage)
print('# X(in) Y(in) X(out) Y(out)\n')
for xs,ys,a,b in zip(xlist,ylist,xstr,ystr):
print("%.4f %.4f %s %s"%(xs,ys,a,b))
# Create output file, if specified
if output:
f = open(output,mode='w')
f.write("# Coordinates converted from %s\n"%inimage)
for xs,ys in zip(xstr,ystr):
f.write('%s %s\n'%(xs,ys))
f.close()
print('Wrote out results to: ',output)
if single_coord:
outx = outx[0]
outy = outy[0]
return outx,outy
|
Primary interface to perform coordinate transformations in pixel
coordinates between 2 images using STWCS and full distortion models
read from each image's header.
|
entailment
|
def fetch_uri_contents(self, uri: str) -> URI:
"""
Return content-addressed URI stored at registry URI.
"""
address, pkg_name, pkg_version = parse_registry_uri(uri)
self.w3.enable_unstable_package_management_api()
self.w3.pm.set_registry(address)
_, _, manifest_uri = self.w3.pm.get_release_data(pkg_name, pkg_version)
return manifest_uri
|
Return content-addressed URI stored at registry URI.
|
entailment
|
def pin_assets(self, file_or_dir_path: Path) -> List[Dict[str, str]]:
"""
Return a dict containing the IPFS hash, file name, and size of a file.
"""
if file_or_dir_path.is_dir():
asset_data = [dummy_ipfs_pin(path) for path in file_or_dir_path.glob("*")]
elif file_or_dir_path.is_file():
asset_data = [dummy_ipfs_pin(file_or_dir_path)]
else:
raise FileNotFoundError(
f"{file_or_dir_path} is not a valid file or directory path."
)
return asset_data
|
Return a dict containing the IPFS hash, file name, and size of a file.
|
entailment
|
async def create_task(app: web.Application,
coro: Coroutine,
*args, **kwargs
) -> asyncio.Task:
"""
Convenience function for calling `TaskScheduler.create(coro)`
This will use the default `TaskScheduler` to create a new background task.
Example:
import asyncio
from datetime import datetime
from brewblox_service import scheduler, service
async def current_time(interval):
while True:
await asyncio.sleep(interval)
print(datetime.now())
async def start(app):
await scheduler.create_task(app, current_time(interval=2))
app = service.create_app(default_name='example')
scheduler.setup(app)
app.on_startup.append(start)
service.furnish(app)
service.run(app)
"""
return await get_scheduler(app).create(coro, *args, **kwargs)
|
Convenience function for calling `TaskScheduler.create(coro)`
This will use the default `TaskScheduler` to create a new background task.
Example:
import asyncio
from datetime import datetime
from brewblox_service import scheduler, service
async def current_time(interval):
while True:
await asyncio.sleep(interval)
print(datetime.now())
async def start(app):
await scheduler.create_task(app, current_time(interval=2))
app = service.create_app(default_name='example')
scheduler.setup(app)
app.on_startup.append(start)
service.furnish(app)
service.run(app)
|
entailment
|
async def cancel_task(app: web.Application,
task: asyncio.Task,
*args, **kwargs
) -> Any:
"""
Convenience function for calling `TaskScheduler.cancel(task)`
This will use the default `TaskScheduler` to cancel the given task.
Example:
import asyncio
from datetime import datetime
from brewblox_service import scheduler, service
async def current_time(interval):
while True:
await asyncio.sleep(interval)
print(datetime.now())
async def stop_after(app, task, duration):
await asyncio.sleep(duration)
await scheduler.cancel_task(app, task)
print('stopped!')
async def start(app):
# Start first task
task = await scheduler.create_task(app, current_time(interval=2))
# Start second task to stop the first
await scheduler.create_task(app, stop_after(app, task, duration=10))
app = service.create_app(default_name='example')
scheduler.setup(app)
app.on_startup.append(start)
service.furnish(app)
service.run(app)
"""
return await get_scheduler(app).cancel(task, *args, **kwargs)
|
Convenience function for calling `TaskScheduler.cancel(task)`
This will use the default `TaskScheduler` to cancel the given task.
Example:
import asyncio
from datetime import datetime
from brewblox_service import scheduler, service
async def current_time(interval):
while True:
await asyncio.sleep(interval)
print(datetime.now())
async def stop_after(app, task, duration):
await asyncio.sleep(duration)
await scheduler.cancel_task(app, task)
print('stopped!')
async def start(app):
# Start first task
task = await scheduler.create_task(app, current_time(interval=2))
# Start second task to stop the first
await scheduler.create_task(app, stop_after(app, task, duration=10))
app = service.create_app(default_name='example')
scheduler.setup(app)
app.on_startup.append(start)
service.furnish(app)
service.run(app)
|
entailment
|
async def _cleanup(self):
"""
Periodically removes completed tasks from the collection,
allowing fire-and-forget tasks to be garbage collected.
This does not delete the task object, it merely removes the reference in the scheduler.
"""
while True:
await asyncio.sleep(CLEANUP_INTERVAL_S)
self._tasks = {t for t in self._tasks if not t.done()}
|
Periodically removes completed tasks from the collection,
allowing fire-and-forget tasks to be garbage collected.
This does not delete the task object, it merely removes the reference in the scheduler.
|
entailment
|
async def create(self, coro: Coroutine) -> asyncio.Task:
"""
Starts execution of a coroutine.
The created asyncio.Task is returned, and added to managed tasks.
The scheduler guarantees that it is cancelled during application shutdown,
regardless of whether it was already cancelled manually.
Args:
coro (Coroutine):
The coroutine to be wrapped in a task, and executed.
Returns:
asyncio.Task: An awaitable Task object.
During Aiohttp shutdown, the scheduler will attempt to cancel and await this task.
The task can be safely cancelled manually, or using `TaskScheduler.cancel(task)`.
"""
task = asyncio.get_event_loop().create_task(coro)
self._tasks.add(task)
return task
|
Starts execution of a coroutine.
The created asyncio.Task is returned, and added to managed tasks.
The scheduler guarantees that it is cancelled during application shutdown,
regardless of whether it was already cancelled manually.
Args:
coro (Coroutine):
The coroutine to be wrapped in a task, and executed.
Returns:
asyncio.Task: An awaitable Task object.
During Aiohttp shutdown, the scheduler will attempt to cancel and await this task.
The task can be safely cancelled manually, or using `TaskScheduler.cancel(task)`.
|
entailment
|
async def cancel(self, task: asyncio.Task, wait_for: bool = True) -> Any:
"""
Cancels and waits for an `asyncio.Task` to finish.
Removes it from the collection of managed tasks.
Args:
task (asyncio.Task):
The to be cancelled task.
It is not required that the task was was created with `TaskScheduler.create_task()`.
wait_for (bool, optional):
Whether to wait for the task to finish execution.
If falsey, this function returns immediately after cancelling the task.
Returns:
Any: The return value of `task`. None if `wait_for` is falsey.
"""
if task is None:
return
task.cancel()
with suppress(KeyError):
self._tasks.remove(task)
with suppress(Exception):
return (await task) if wait_for else None
|
Cancels and waits for an `asyncio.Task` to finish.
Removes it from the collection of managed tasks.
Args:
task (asyncio.Task):
The to be cancelled task.
It is not required that the task was was created with `TaskScheduler.create_task()`.
wait_for (bool, optional):
Whether to wait for the task to finish execution.
If falsey, this function returns immediately after cancelling the task.
Returns:
Any: The return value of `task`. None if `wait_for` is falsey.
|
entailment
|
def create_content_addressed_github_uri(uri: URI) -> URI:
"""
Returns a content-addressed Github "git_url" that conforms to this scheme.
https://api.github.com/repos/:owner/:repo/git/blobs/:file_sha
Accepts Github-defined "url" that conforms to this scheme
https://api.github.com/repos/:owner/:repo/contents/:path/:to/manifest.json
"""
if not is_valid_api_github_uri(uri):
raise CannotHandleURI(f"{uri} does not conform to Github's API 'url' scheme.")
response = requests.get(uri)
response.raise_for_status()
contents = json.loads(response.content)
if contents["type"] != "file":
raise CannotHandleURI(
f"Expected url to point to a 'file' type, instead received {contents['type']}."
)
return contents["git_url"]
|
Returns a content-addressed Github "git_url" that conforms to this scheme.
https://api.github.com/repos/:owner/:repo/git/blobs/:file_sha
Accepts Github-defined "url" that conforms to this scheme
https://api.github.com/repos/:owner/:repo/contents/:path/:to/manifest.json
|
entailment
|
def is_valid_github_uri(uri: URI, expected_path_terms: Tuple[str, ...]) -> bool:
"""
Return a bool indicating whether or not the URI fulfills the following specs
Valid Github URIs *must*:
- Have 'https' scheme
- Have 'api.github.com' authority
- Have a path that contains all "expected_path_terms"
"""
if not is_text(uri):
return False
parsed = parse.urlparse(uri)
path, scheme, authority = parsed.path, parsed.scheme, parsed.netloc
if not all((path, scheme, authority)):
return False
if any(term for term in expected_path_terms if term not in path):
return False
if scheme != "https":
return False
if authority != GITHUB_API_AUTHORITY:
return False
return True
|
Return a bool indicating whether or not the URI fulfills the following specs
Valid Github URIs *must*:
- Have 'https' scheme
- Have 'api.github.com' authority
- Have a path that contains all "expected_path_terms"
|
entailment
|
def validate_blob_uri_contents(contents: bytes, blob_uri: str) -> None:
"""
Raises an exception if the sha1 hash of the contents does not match the hash found in te
blob_uri. Formula for how git calculates the hash found here:
http://alblue.bandlem.com/2011/08/git-tip-of-week-objects.html
"""
blob_path = parse.urlparse(blob_uri).path
blob_hash = blob_path.split("/")[-1]
contents_str = to_text(contents)
content_length = len(contents_str)
hashable_contents = "blob " + str(content_length) + "\0" + contents_str
hash_object = hashlib.sha1(to_bytes(text=hashable_contents))
if hash_object.hexdigest() != blob_hash:
raise ValidationError(
f"Hash of contents fetched from {blob_uri} do not match its hash: {blob_hash}."
)
|
Raises an exception if the sha1 hash of the contents does not match the hash found in te
blob_uri. Formula for how git calculates the hash found here:
http://alblue.bandlem.com/2011/08/git-tip-of-week-objects.html
|
entailment
|
def parse_registry_uri(uri: str) -> RegistryURI:
"""
Validate and return (authority, pkg name, version) from a valid registry URI
"""
validate_registry_uri(uri)
parsed_uri = parse.urlparse(uri)
authority = parsed_uri.netloc
pkg_name = parsed_uri.path.strip("/")
pkg_version = parsed_uri.query.lstrip("version=").strip("/")
return RegistryURI(authority, pkg_name, pkg_version)
|
Validate and return (authority, pkg name, version) from a valid registry URI
|
entailment
|
def is_supported_content_addressed_uri(uri: URI) -> bool:
"""
Returns a bool indicating whether provided uri is currently supported.
Currently Py-EthPM only supports IPFS and Github blob content-addressed uris.
"""
if not is_ipfs_uri(uri) and not is_valid_content_addressed_github_uri(uri):
return False
return True
|
Returns a bool indicating whether provided uri is currently supported.
Currently Py-EthPM only supports IPFS and Github blob content-addressed uris.
|
entailment
|
def create_BIP122_uri(
chain_id: str, resource_type: str, resource_identifier: str
) -> URI:
"""
See: https://github.com/bitcoin/bips/blob/master/bip-0122.mediawiki
"""
if resource_type != BLOCK:
raise ValueError("Invalid resource_type. Must be one of 'block'")
elif not is_block_or_transaction_hash(resource_identifier):
raise ValueError(
"Invalid resource_identifier. Must be a hex encoded 32 byte value"
)
elif not is_block_or_transaction_hash(chain_id):
raise ValueError("Invalid chain_id. Must be a hex encoded 32 byte value")
return URI(
parse.urlunsplit(
[
"blockchain",
remove_0x_prefix(chain_id),
f"{resource_type}/{remove_0x_prefix(resource_identifier)}",
"",
"",
]
)
)
|
See: https://github.com/bitcoin/bips/blob/master/bip-0122.mediawiki
|
entailment
|
def update_w3(self, w3: Web3) -> "Package":
"""
Returns a new instance of `Package` containing the same manifest,
but connected to a different web3 instance.
.. doctest::
>>> new_w3 = Web3(Web3.EthereumTesterProvider())
>>> NewPackage = OwnedPackage.update_w3(new_w3)
>>> assert NewPackage.w3 == new_w3
>>> assert OwnedPackage.manifest == NewPackage.manifest
"""
validate_w3_instance(w3)
return Package(self.manifest, w3, self.uri)
|
Returns a new instance of `Package` containing the same manifest,
but connected to a different web3 instance.
.. doctest::
>>> new_w3 = Web3(Web3.EthereumTesterProvider())
>>> NewPackage = OwnedPackage.update_w3(new_w3)
>>> assert NewPackage.w3 == new_w3
>>> assert OwnedPackage.manifest == NewPackage.manifest
|
entailment
|
def from_file(cls, file_path: Path, w3: Web3) -> "Package":
"""
Returns a ``Package`` instantiated by a manifest located at the provided Path.
``file_path`` arg must be a ``pathlib.Path`` instance.
A valid ``Web3`` instance is required to instantiate a ``Package``.
"""
if isinstance(file_path, Path):
raw_manifest = file_path.read_text()
validate_raw_manifest_format(raw_manifest)
manifest = json.loads(raw_manifest)
else:
raise TypeError(
"The Package.from_file method expects a pathlib.Path instance."
f"Got {type(file_path)} instead."
)
return cls(manifest, w3, file_path.as_uri())
|
Returns a ``Package`` instantiated by a manifest located at the provided Path.
``file_path`` arg must be a ``pathlib.Path`` instance.
A valid ``Web3`` instance is required to instantiate a ``Package``.
|
entailment
|
def from_uri(cls, uri: URI, w3: Web3) -> "Package":
"""
Returns a Package object instantiated by a manifest located at a content-addressed URI.
A valid ``Web3`` instance is also required.
URI schemes supported:
- IPFS `ipfs://Qm...`
- HTTP `https://api.github.com/repos/:owner/:repo/git/blobs/:file_sha`
- Registry `ercXXX://registry.eth/greeter?version=1.0.0`
.. code:: python
OwnedPackage = Package.from_uri('ipfs://QmbeVyFLSuEUxiXKwSsEjef7icpdTdA4kGG9BcrJXKNKUW', w3) # noqa: E501
"""
contents = to_text(resolve_uri_contents(uri))
validate_raw_manifest_format(contents)
manifest = json.loads(contents)
return cls(manifest, w3, uri)
|
Returns a Package object instantiated by a manifest located at a content-addressed URI.
A valid ``Web3`` instance is also required.
URI schemes supported:
- IPFS `ipfs://Qm...`
- HTTP `https://api.github.com/repos/:owner/:repo/git/blobs/:file_sha`
- Registry `ercXXX://registry.eth/greeter?version=1.0.0`
.. code:: python
OwnedPackage = Package.from_uri('ipfs://QmbeVyFLSuEUxiXKwSsEjef7icpdTdA4kGG9BcrJXKNKUW', w3) # noqa: E501
|
entailment
|
def get_contract_factory(self, name: ContractName) -> Contract:
"""
Return the contract factory for a given contract type, generated from the data vailable
in ``Package.manifest``. Contract factories are accessible from the package class.
.. code:: python
Owned = OwnedPackage.get_contract_factory('owned')
In cases where a contract uses a library, the contract factory will have
unlinked bytecode. The ``ethpm`` package ships with its own subclass of
``web3.contract.Contract``, ``ethpm.contract.LinkableContract`` with a few extra
methods and properties related to bytecode linking.
.. code:: python
>>> math = owned_package.contract_factories.math
>>> math.needs_bytecode_linking
True
>>> linked_math = math.link_bytecode({'MathLib': '0x1234...'})
>>> linked_math.needs_bytecode_linking
False
"""
validate_contract_name(name)
if "contract_types" not in self.manifest:
raise InsufficientAssetsError(
"This package does not contain any contract type data."
)
try:
contract_data = self.manifest["contract_types"][name]
except KeyError:
raise InsufficientAssetsError(
"This package does not contain any package data to generate "
f"a contract factory for contract type: {name}. Available contract types include: "
f"{ list(self.manifest['contract_types'].keys()) }."
)
validate_minimal_contract_factory_data(contract_data)
contract_kwargs = generate_contract_factory_kwargs(contract_data)
contract_factory = self.w3.eth.contract(**contract_kwargs)
return contract_factory
|
Return the contract factory for a given contract type, generated from the data vailable
in ``Package.manifest``. Contract factories are accessible from the package class.
.. code:: python
Owned = OwnedPackage.get_contract_factory('owned')
In cases where a contract uses a library, the contract factory will have
unlinked bytecode. The ``ethpm`` package ships with its own subclass of
``web3.contract.Contract``, ``ethpm.contract.LinkableContract`` with a few extra
methods and properties related to bytecode linking.
.. code:: python
>>> math = owned_package.contract_factories.math
>>> math.needs_bytecode_linking
True
>>> linked_math = math.link_bytecode({'MathLib': '0x1234...'})
>>> linked_math.needs_bytecode_linking
False
|
entailment
|
def get_contract_instance(self, name: ContractName, address: Address) -> Contract:
"""
Will return a ``Web3.contract`` instance generated from the contract type data available
in ``Package.manifest`` and the provided ``address``. The provided ``address`` must be
valid on the connected chain available through ``Package.w3``.
"""
validate_address(address)
validate_contract_name(name)
try:
self.manifest["contract_types"][name]["abi"]
except KeyError:
raise InsufficientAssetsError(
"Package does not have the ABI required to generate a contract instance "
f"for contract: {name} at address: {address}."
)
contract_kwargs = generate_contract_factory_kwargs(
self.manifest["contract_types"][name]
)
canonical_address = to_canonical_address(address)
contract_instance = self.w3.eth.contract(
address=canonical_address, **contract_kwargs
)
return contract_instance
|
Will return a ``Web3.contract`` instance generated from the contract type data available
in ``Package.manifest`` and the provided ``address``. The provided ``address`` must be
valid on the connected chain available through ``Package.w3``.
|
entailment
|
def build_dependencies(self) -> "Dependencies":
"""
Return `Dependencies` instance containing the build dependencies available on this Package.
The ``Package`` class should provide access to the full dependency tree.
.. code:: python
>>> owned_package.build_dependencies['zeppelin']
<ZeppelinPackage>
"""
validate_build_dependencies_are_present(self.manifest)
dependencies = self.manifest["build_dependencies"]
dependency_packages = {}
for name, uri in dependencies.items():
try:
validate_build_dependency(name, uri)
dependency_package = Package.from_uri(uri, self.w3)
except PyEthPMError as e:
raise FailureToFetchIPFSAssetsError(
f"Failed to retrieve build dependency: {name} from URI: {uri}.\n"
f"Got error: {e}."
)
else:
dependency_packages[name] = dependency_package
return Dependencies(dependency_packages)
|
Return `Dependencies` instance containing the build dependencies available on this Package.
The ``Package`` class should provide access to the full dependency tree.
.. code:: python
>>> owned_package.build_dependencies['zeppelin']
<ZeppelinPackage>
|
entailment
|
def deployments(self) -> Union["Deployments", Dict[None, None]]:
"""
Returns a ``Deployments`` object containing all the deployment data and contract
factories of a ``Package``'s `contract_types`. Automatically filters deployments
to only expose those available on the current ``Package.w3`` instance.
.. code:: python
package.deployments.get_instance("ContractType")
"""
if not check_for_deployments(self.manifest):
return {}
all_blockchain_uris = self.manifest["deployments"].keys()
matching_uri = validate_single_matching_uri(all_blockchain_uris, self.w3)
deployments = self.manifest["deployments"][matching_uri]
all_contract_factories = {
deployment_data["contract_type"]: self.get_contract_factory(
deployment_data["contract_type"]
)
for deployment_data in deployments.values()
}
validate_deployments_tx_receipt(deployments, self.w3, allow_missing_data=True)
linked_deployments = get_linked_deployments(deployments)
if linked_deployments:
for deployment_data in linked_deployments.values():
on_chain_bytecode = self.w3.eth.getCode(
to_canonical_address(deployment_data["address"])
)
unresolved_linked_refs = normalize_linked_references(
deployment_data["runtime_bytecode"]["link_dependencies"]
)
resolved_linked_refs = tuple(
self._resolve_linked_references(link_ref, deployments)
for link_ref in unresolved_linked_refs
)
for linked_ref in resolved_linked_refs:
validate_linked_references(linked_ref, on_chain_bytecode)
return Deployments(deployments, all_contract_factories, self.w3)
|
Returns a ``Deployments`` object containing all the deployment data and contract
factories of a ``Package``'s `contract_types`. Automatically filters deployments
to only expose those available on the current ``Package.w3`` instance.
.. code:: python
package.deployments.get_instance("ContractType")
|
entailment
|
def dummy_ipfs_pin(path: Path) -> Dict[str, str]:
"""
Return IPFS data as if file was pinned to an actual node.
"""
ipfs_return = {
"Hash": generate_file_hash(path.read_bytes()),
"Name": path.name,
"Size": str(path.stat().st_size),
}
return ipfs_return
|
Return IPFS data as if file was pinned to an actual node.
|
entailment
|
def extract_ipfs_path_from_uri(value: str) -> str:
"""
Return the path from an IPFS URI.
Path = IPFS hash & following path.
"""
parse_result = parse.urlparse(value)
if parse_result.netloc:
if parse_result.path:
return "".join((parse_result.netloc, parse_result.path.rstrip("/")))
else:
return parse_result.netloc
else:
return parse_result.path.strip("/")
|
Return the path from an IPFS URI.
Path = IPFS hash & following path.
|
entailment
|
def is_ipfs_uri(value: str) -> bool:
"""
Return a bool indicating whether or not the value is a valid IPFS URI.
"""
parse_result = parse.urlparse(value)
if parse_result.scheme != "ipfs":
return False
if not parse_result.netloc and not parse_result.path:
return False
return True
|
Return a bool indicating whether or not the value is a valid IPFS URI.
|
entailment
|
def get_instance(self, contract_name: str) -> None:
"""
Fetches a contract instance belonging to deployment
after validating contract name.
"""
self._validate_name_and_references(contract_name)
# Use a deployment's "contract_type" to lookup contract factory
# in case the deployment uses a contract alias
contract_type = self.deployment_data[contract_name]["contract_type"]
factory = self.contract_factories[contract_type]
address = to_canonical_address(self.deployment_data[contract_name]["address"])
contract_kwargs = {
"abi": factory.abi,
"bytecode": factory.bytecode,
"bytecode_runtime": factory.bytecode_runtime,
}
return self.w3.eth.contract(address=address, **contract_kwargs)
|
Fetches a contract instance belonging to deployment
after validating contract name.
|
entailment
|
def validate_address(address: Any) -> None:
"""
Raise a ValidationError if an address is not canonicalized.
"""
if not is_address(address):
raise ValidationError(f"Expected an address, got: {address}")
if not is_canonical_address(address):
raise ValidationError(
"Py-EthPM library only accepts canonicalized addresses. "
f"{address} is not in the accepted format."
)
|
Raise a ValidationError if an address is not canonicalized.
|
entailment
|
def validate_empty_bytes(offset: int, length: int, bytecode: bytes) -> None:
"""
Validates that segment [`offset`:`offset`+`length`] of
`bytecode` is comprised of empty bytes (b'\00').
"""
slot_length = offset + length
slot = bytecode[offset:slot_length]
if slot != bytearray(length):
raise ValidationError(
f"Bytecode segment: [{offset}:{slot_length}] is not comprised of empty bytes, "
f"rather: {slot}."
)
|
Validates that segment [`offset`:`offset`+`length`] of
`bytecode` is comprised of empty bytes (b'\00').
|
entailment
|
def validate_package_name(pkg_name: str) -> None:
"""
Raise an exception if the value is not a valid package name
as defined in the EthPM-Spec.
"""
if not bool(re.match(PACKAGE_NAME_REGEX, pkg_name)):
raise ValidationError(f"{pkg_name} is not a valid package name.")
|
Raise an exception if the value is not a valid package name
as defined in the EthPM-Spec.
|
entailment
|
def validate_registry_uri(uri: str) -> None:
"""
Raise an exception if the URI does not conform to the registry URI scheme.
"""
parsed = parse.urlparse(uri)
scheme, authority, pkg_name, query = (
parsed.scheme,
parsed.netloc,
parsed.path,
parsed.query,
)
validate_registry_uri_scheme(scheme)
validate_registry_uri_authority(authority)
if query:
validate_registry_uri_version(query)
validate_package_name(pkg_name[1:])
|
Raise an exception if the URI does not conform to the registry URI scheme.
|
entailment
|
def validate_registry_uri_authority(auth: str) -> None:
"""
Raise an exception if the authority is not a valid ENS domain
or a valid checksummed contract address.
"""
if is_ens_domain(auth) is False and not is_checksum_address(auth):
raise ValidationError(f"{auth} is not a valid registry URI authority.")
|
Raise an exception if the authority is not a valid ENS domain
or a valid checksummed contract address.
|
entailment
|
def validate_registry_uri_version(query: str) -> None:
"""
Raise an exception if the version param is malformed.
"""
query_dict = parse.parse_qs(query, keep_blank_values=True)
if "version" not in query_dict:
raise ValidationError(f"{query} is not a correctly formatted version param.")
|
Raise an exception if the version param is malformed.
|
entailment
|
def validate_single_matching_uri(all_blockchain_uris: List[str], w3: Web3) -> str:
"""
Return a single block URI after validating that it is the *only* URI in
all_blockchain_uris that matches the w3 instance.
"""
matching_uris = [
uri for uri in all_blockchain_uris if check_if_chain_matches_chain_uri(w3, uri)
]
if not matching_uris:
raise ValidationError("Package has no matching URIs on chain.")
elif len(matching_uris) != 1:
raise ValidationError(
f"Package has too many ({len(matching_uris)}) matching URIs: {matching_uris}."
)
return matching_uris[0]
|
Return a single block URI after validating that it is the *only* URI in
all_blockchain_uris that matches the w3 instance.
|
entailment
|
def validate_meta_object(meta: Dict[str, Any], allow_extra_meta_fields: bool) -> None:
"""
Validates that every key is one of `META_FIELDS` and has a value of the expected type.
"""
for key, value in meta.items():
if key in META_FIELDS:
if type(value) is not META_FIELDS[key]:
raise ValidationError(
f"Values for {key} are expected to have the type {META_FIELDS[key]}, "
f"instead got {type(value)}."
)
elif allow_extra_meta_fields:
if key[:2] != "x-":
raise ValidationError(
"Undefined meta fields need to begin with 'x-', "
f"{key} is not a valid undefined meta field."
)
else:
raise ValidationError(
f"{key} is not a permitted meta field. To allow undefined fields, "
"set `allow_extra_meta_fields` to True."
)
|
Validates that every key is one of `META_FIELDS` and has a value of the expected type.
|
entailment
|
def validate_manifest_against_schema(manifest: Dict[str, Any]) -> None:
"""
Load and validate manifest against schema
located at MANIFEST_SCHEMA_PATH.
"""
schema_data = _load_schema_data()
try:
validate(manifest, schema_data)
except jsonValidationError as e:
raise ValidationError(
f"Manifest invalid for schema version {schema_data['version']}. "
f"Reason: {e.message}"
)
|
Load and validate manifest against schema
located at MANIFEST_SCHEMA_PATH.
|
entailment
|
def validate_manifest_deployments(manifest: Dict[str, Any]) -> None:
"""
Validate that a manifest's deployments contracts reference existing contract_types.
"""
if set(("contract_types", "deployments")).issubset(manifest):
all_contract_types = list(manifest["contract_types"].keys())
all_deployments = list(manifest["deployments"].values())
all_deployment_names = extract_contract_types_from_deployments(all_deployments)
missing_contract_types = set(all_deployment_names).difference(
all_contract_types
)
if missing_contract_types:
raise ValidationError(
f"Manifest missing references to contracts: {missing_contract_types}."
)
|
Validate that a manifest's deployments contracts reference existing contract_types.
|
entailment
|
def validate_raw_manifest_format(raw_manifest: str) -> None:
"""
Raise a ValidationError if a manifest ...
- is not tightly packed (i.e. no linebreaks or extra whitespace)
- does not have alphabetically sorted keys
- has duplicate keys
- is not UTF-8 encoded
- has a trailing newline
"""
try:
manifest_dict = json.loads(raw_manifest, encoding="UTF-8")
except json.JSONDecodeError as err:
raise json.JSONDecodeError(
"Failed to load package data. File is not a valid JSON document.",
err.doc,
err.pos,
)
compact_manifest = json.dumps(manifest_dict, sort_keys=True, separators=(",", ":"))
if raw_manifest != compact_manifest:
raise ValidationError(
"The manifest appears to be malformed. Please ensure that it conforms to the "
"EthPM-Spec for document format. "
"http://ethpm.github.io/ethpm-spec/package-spec.html#document-format "
)
|
Raise a ValidationError if a manifest ...
- is not tightly packed (i.e. no linebreaks or extra whitespace)
- does not have alphabetically sorted keys
- has duplicate keys
- is not UTF-8 encoded
- has a trailing newline
|
entailment
|
def build(obj: Dict[str, Any], *fns: Callable[..., Any]) -> Dict[str, Any]:
"""
Wrapper function to pipe manifest through build functions.
Does not validate the manifest by default.
"""
return pipe(obj, *fns)
|
Wrapper function to pipe manifest through build functions.
Does not validate the manifest by default.
|
entailment
|
def get_names_and_paths(compiler_output: Dict[str, Any]) -> Dict[str, str]:
"""
Return a mapping of contract name to relative path as defined in compiler output.
"""
return {
contract_name: make_path_relative(path)
for path in compiler_output
for contract_name in compiler_output[path].keys()
}
|
Return a mapping of contract name to relative path as defined in compiler output.
|
entailment
|
def make_path_relative(path: str) -> str:
"""
Returns the given path prefixed with "./" if the path
is not already relative in the compiler output.
"""
if "../" in path:
raise ManifestBuildingError(
f"Path: {path} appears to be outside of the virtual source tree. "
"Please make sure all sources are within the virtual source tree root directory."
)
if path[:2] != "./":
return f"./{path}"
return path
|
Returns the given path prefixed with "./" if the path
is not already relative in the compiler output.
|
entailment
|
def contract_type(
name: str,
compiler_output: Dict[str, Any],
alias: Optional[str] = None,
abi: Optional[bool] = False,
compiler: Optional[bool] = False,
contract_type: Optional[bool] = False,
deployment_bytecode: Optional[bool] = False,
natspec: Optional[bool] = False,
runtime_bytecode: Optional[bool] = False,
) -> Manifest:
"""
Returns a copy of manifest with added contract_data field as specified by kwargs.
If no kwargs are present, all available contract_data found in the compiler output
will be included.
To include specific contract_data fields, add kwarg set to True (i.e. `abi=True`)
To alias a contract_type, include a kwarg `alias` (i.e. `alias="OwnedAlias"`)
If only an alias kwarg is provided, all available contract data will be included.
Kwargs must match fields as defined in the EthPM Spec (except "alias") if user
wants to include them in custom contract_type.
"""
contract_type_fields = {
"contract_type": contract_type,
"deployment_bytecode": deployment_bytecode,
"runtime_bytecode": runtime_bytecode,
"abi": abi,
"natspec": natspec,
"compiler": compiler,
}
selected_fields = [k for k, v in contract_type_fields.items() if v]
return _contract_type(name, compiler_output, alias, selected_fields)
|
Returns a copy of manifest with added contract_data field as specified by kwargs.
If no kwargs are present, all available contract_data found in the compiler output
will be included.
To include specific contract_data fields, add kwarg set to True (i.e. `abi=True`)
To alias a contract_type, include a kwarg `alias` (i.e. `alias="OwnedAlias"`)
If only an alias kwarg is provided, all available contract data will be included.
Kwargs must match fields as defined in the EthPM Spec (except "alias") if user
wants to include them in custom contract_type.
|
entailment
|
def filter_all_data_by_selected_fields(
all_type_data: Dict[str, Any], selected_fields: List[str]
) -> Iterable[Tuple[str, Any]]:
"""
Raises exception if selected field data is not available in the contract type data
automatically gathered by normalize_compiler_output. Otherwise, returns the data.
"""
for field in selected_fields:
if field in all_type_data:
yield field, all_type_data[field]
else:
raise ManifestBuildingError(
f"Selected field: {field} not available in data collected from solc output: "
f"{list(sorted(all_type_data.keys()))}. Please make sure the relevant data "
"is present in your solc output."
)
|
Raises exception if selected field data is not available in the contract type data
automatically gathered by normalize_compiler_output. Otherwise, returns the data.
|
entailment
|
def normalize_compiler_output(compiler_output: Dict[str, Any]) -> Dict[str, Any]:
"""
Return compiler output with normalized fields for each contract type,
as specified in `normalize_contract_type`.
"""
paths_and_names = [
(path, contract_name)
for path in compiler_output
for contract_name in compiler_output[path].keys()
]
paths, names = zip(*paths_and_names)
if len(names) != len(set(names)):
raise ManifestBuildingError(
"Duplicate contract names were found in the compiler output."
)
return {
name: normalize_contract_type(compiler_output[path][name])
for path, name in paths_and_names
}
|
Return compiler output with normalized fields for each contract type,
as specified in `normalize_contract_type`.
|
entailment
|
def normalize_contract_type(
contract_type_data: Dict[str, Any]
) -> Iterable[Tuple[str, Any]]:
"""
Serialize contract_data found in compiler output to the defined fields.
"""
yield "abi", contract_type_data["abi"]
if "evm" in contract_type_data:
if "bytecode" in contract_type_data["evm"]:
yield "deployment_bytecode", normalize_bytecode_object(
contract_type_data["evm"]["bytecode"]
)
if "deployedBytecode" in contract_type_data["evm"]:
yield "runtime_bytecode", normalize_bytecode_object(
contract_type_data["evm"]["deployedBytecode"]
)
if any(key in contract_type_data for key in NATSPEC_FIELDS):
natspec = deep_merge_dicts(
contract_type_data.get("userdoc", {}), contract_type_data.get("devdoc", {})
)
yield "natspec", natspec
# make sure metadata isn't an empty string in solc output
if "metadata" in contract_type_data and contract_type_data["metadata"]:
yield "compiler", normalize_compiler_object(
json.loads(contract_type_data["metadata"])
)
|
Serialize contract_data found in compiler output to the defined fields.
|
entailment
|
def process_bytecode(link_refs: Dict[str, Any], bytecode: bytes) -> str:
"""
Replace link_refs in bytecode with 0's.
"""
all_offsets = [y for x in link_refs.values() for y in x.values()]
# Link ref validation.
validate_link_ref_fns = (
validate_link_ref(ref["start"] * 2, ref["length"] * 2)
for ref in concat(all_offsets)
)
pipe(bytecode, *validate_link_ref_fns)
# Convert link_refs in bytecode to 0's
link_fns = (
replace_link_ref_in_bytecode(ref["start"] * 2, ref["length"] * 2)
for ref in concat(all_offsets)
)
processed_bytecode = pipe(bytecode, *link_fns)
return add_0x_prefix(processed_bytecode)
|
Replace link_refs in bytecode with 0's.
|
entailment
|
def deployment_type(
*,
contract_instance: str,
contract_type: str,
deployment_bytecode: Dict[str, Any] = None,
runtime_bytecode: Dict[str, Any] = None,
compiler: Dict[str, Any] = None,
) -> Manifest:
"""
Returns a callable that allows the user to add deployments of the same type
across multiple chains.
"""
return _deployment_type(
contract_instance,
contract_type,
deployment_bytecode,
runtime_bytecode,
compiler,
)
|
Returns a callable that allows the user to add deployments of the same type
across multiple chains.
|
entailment
|
def deployment(
*,
block_uri: URI,
contract_instance: str,
contract_type: str,
address: HexStr,
transaction: HexStr = None,
block: HexStr = None,
deployment_bytecode: Dict[str, Any] = None,
runtime_bytecode: Dict[str, Any] = None,
compiler: Dict[str, Any] = None,
) -> Manifest:
"""
Returns a manifest, with the newly included deployment. Requires a valid blockchain URI,
however no validation is provided that this URI is unique amongst the other deployment
URIs, so the user must take care that each blockchain URI represents a unique blockchain.
"""
return _deployment(
contract_instance,
contract_type,
deployment_bytecode,
runtime_bytecode,
compiler,
block_uri,
address,
transaction,
block,
)
|
Returns a manifest, with the newly included deployment. Requires a valid blockchain URI,
however no validation is provided that this URI is unique amongst the other deployment
URIs, so the user must take care that each blockchain URI represents a unique blockchain.
|
entailment
|
def _build_deployments_object(
contract_type: str,
deployment_bytecode: Dict[str, Any],
runtime_bytecode: Dict[str, Any],
compiler: Dict[str, Any],
address: HexStr,
tx: HexStr,
block: HexStr,
manifest: Dict[str, Any],
) -> Iterable[Tuple[str, Any]]:
"""
Returns a dict with properly formatted deployment data.
"""
yield "contract_type", contract_type
yield "address", to_hex(address)
if deployment_bytecode:
yield "deployment_bytecode", deployment_bytecode
if compiler:
yield "compiler", compiler
if tx:
yield "transaction", tx
if block:
yield "block", block
if runtime_bytecode:
yield "runtime_bytecode", runtime_bytecode
|
Returns a dict with properly formatted deployment data.
|
entailment
|
def init_manifest(
package_name: str, version: str, manifest_version: Optional[str] = "2"
) -> Dict[str, Any]:
"""
Returns an initial dict with the minimal requried fields for a valid manifest.
Should only be used as the first fn to be piped into a `build()` pipeline.
"""
return {
"package_name": package_name,
"version": version,
"manifest_version": manifest_version,
}
|
Returns an initial dict with the minimal requried fields for a valid manifest.
Should only be used as the first fn to be piped into a `build()` pipeline.
|
entailment
|
def write_to_disk(
manifest_root_dir: Optional[Path] = None,
manifest_name: Optional[str] = None,
prettify: Optional[bool] = False,
) -> Manifest:
"""
Write the active manifest to disk
Defaults
- Writes manifest to cwd unless Path is provided as manifest_root_dir.
- Writes manifest with a filename of Manifest[version].json unless a desired
manifest name (which must end in json) is provided as manifest_name.
- Writes the minified manifest version to disk unless prettify is set to True.
"""
return _write_to_disk(manifest_root_dir, manifest_name, prettify)
|
Write the active manifest to disk
Defaults
- Writes manifest to cwd unless Path is provided as manifest_root_dir.
- Writes manifest with a filename of Manifest[version].json unless a desired
manifest name (which must end in json) is provided as manifest_name.
- Writes the minified manifest version to disk unless prettify is set to True.
|
entailment
|
def pin_to_ipfs(
manifest: Manifest, *, backend: BaseIPFSBackend, prettify: Optional[bool] = False
) -> List[Dict[str, str]]:
"""
Returns the IPFS pin data after pinning the manifest to the provided IPFS Backend.
`pin_to_ipfs()` Should *always* be the last argument in a builder, as it will return the pin
data and not the manifest.
"""
contents = format_manifest(manifest, prettify=prettify)
with tempfile.NamedTemporaryFile() as temp:
temp.write(to_bytes(text=contents))
temp.seek(0)
return backend.pin_assets(Path(temp.name))
|
Returns the IPFS pin data after pinning the manifest to the provided IPFS Backend.
`pin_to_ipfs()` Should *always* be the last argument in a builder, as it will return the pin
data and not the manifest.
|
entailment
|
def create_parser(default_name: str) -> argparse.ArgumentParser:
"""
Creates the default brewblox_service ArgumentParser.
Service-agnostic arguments are added.
The parser allows calling code to add additional arguments before using it in create_app()
Args:
default_name (str):
default value for the --name commandline argument.
Returns:
argparse.ArgumentParser: a Python ArgumentParser with defaults set.
"""
argparser = argparse.ArgumentParser(fromfile_prefix_chars='@')
argparser.add_argument('-H', '--host',
help='Host to which the app binds. [%(default)s]',
default='0.0.0.0')
argparser.add_argument('-p', '--port',
help='Port to which the app binds. [%(default)s]',
default=5000,
type=int)
argparser.add_argument('-o', '--output',
help='Logging output. [%(default)s]')
argparser.add_argument('-n', '--name',
help='Service name. This will be used as prefix for all endpoints. [%(default)s]',
default=default_name)
argparser.add_argument('--debug',
help='Run the app in debug mode. [%(default)s]',
action='store_true')
argparser.add_argument('--eventbus-host',
help='Hostname at which the eventbus can be reached [%(default)s]',
default='eventbus')
argparser.add_argument('--eventbus-port',
help='Port at which the eventbus can be reached [%(default)s]',
default=5672,
type=int)
return argparser
|
Creates the default brewblox_service ArgumentParser.
Service-agnostic arguments are added.
The parser allows calling code to add additional arguments before using it in create_app()
Args:
default_name (str):
default value for the --name commandline argument.
Returns:
argparse.ArgumentParser: a Python ArgumentParser with defaults set.
|
entailment
|
def create_app(
default_name: str = None,
parser: argparse.ArgumentParser = None,
raw_args: List[str] = None
) -> web.Application:
"""
Creates and configures an Aiohttp application.
Args:
default_name (str, optional):
Default value for the --name commandline argument.
This value is required if `parser` is not provided.
This value will be ignored if `parser` is provided.
parser (argparse.ArgumentParser, optional):
Application-specific parser.
If not provided, the return value of `create_parser()` will be used.
raw_args (list of str, optional):
Explicit commandline arguments.
Defaults to sys.argv[1:]
Returns:
web.Application: A configured Aiohttp Application object.
This Application must be furnished, and is not yet running.
"""
if parser is None:
assert default_name, 'Default service name is required'
parser = create_parser(default_name)
args = parser.parse_args(raw_args)
_init_logging(args)
LOGGER.info(f'Creating [{args.name}] application')
app = web.Application()
app['config'] = vars(args)
return app
|
Creates and configures an Aiohttp application.
Args:
default_name (str, optional):
Default value for the --name commandline argument.
This value is required if `parser` is not provided.
This value will be ignored if `parser` is provided.
parser (argparse.ArgumentParser, optional):
Application-specific parser.
If not provided, the return value of `create_parser()` will be used.
raw_args (list of str, optional):
Explicit commandline arguments.
Defaults to sys.argv[1:]
Returns:
web.Application: A configured Aiohttp Application object.
This Application must be furnished, and is not yet running.
|
entailment
|
def furnish(app: web.Application):
"""
Configures Application routes, readying it for running.
This function modifies routes and resources that were added by calling code,
and must be called immediately prior to `run(app)`.
Args:
app (web.Application):
The Aiohttp Application as created by `create_app()`
"""
app_name = app['config']['name']
prefix = '/' + app_name.lstrip('/')
app.router.add_routes(routes)
cors_middleware.enable_cors(app)
# Configure CORS and prefixes on all endpoints.
known_resources = set()
for route in list(app.router.routes()):
if route.resource in known_resources:
continue
known_resources.add(route.resource)
route.resource.add_prefix(prefix)
# Configure swagger settings
# We set prefix explicitly here
aiohttp_swagger.setup_swagger(app,
swagger_url=prefix + '/api/doc',
description='',
title=f'Brewblox Service "{app_name}"',
api_version='0.0',
contact='development@brewpi.com')
LOGGER.info('Service info: ' + getenv('SERVICE_INFO', 'UNKNOWN'))
for route in app.router.routes():
LOGGER.info(f'Endpoint [{route.method}] {route.resource}')
for name, impl in app.get(features.FEATURES_KEY, {}).items():
LOGGER.info(f'Feature [{name}] {impl}')
|
Configures Application routes, readying it for running.
This function modifies routes and resources that were added by calling code,
and must be called immediately prior to `run(app)`.
Args:
app (web.Application):
The Aiohttp Application as created by `create_app()`
|
entailment
|
def run(app: web.Application):
"""
Runs the application in an async context.
This function will block indefinitely until the application is shut down.
Args:
app (web.Application):
The Aiohttp Application as created by `create_app()`
"""
host = app['config']['host']
port = app['config']['port']
# starts app. run_app() will automatically start the async context.
web.run_app(app, host=host, port=port)
|
Runs the application in an async context.
This function will block indefinitely until the application is shut down.
Args:
app (web.Application):
The Aiohttp Application as created by `create_app()`
|
entailment
|
def get_linked_deployments(deployments: Dict[str, Any]) -> Dict[str, Any]:
"""
Returns all deployments found in a chain URI's deployment data that contain link dependencies.
"""
linked_deployments = {
dep: data
for dep, data in deployments.items()
if get_in(("runtime_bytecode", "link_dependencies"), data)
}
for deployment, data in linked_deployments.items():
if any(
link_dep["value"] == deployment
for link_dep in data["runtime_bytecode"]["link_dependencies"]
):
raise BytecodeLinkingError(
f"Link dependency found in {deployment} deployment that references its "
"own contract instance, which is disallowed"
)
return linked_deployments
|
Returns all deployments found in a chain URI's deployment data that contain link dependencies.
|
entailment
|
def validate_linked_references(
link_deps: Tuple[Tuple[int, bytes], ...], bytecode: bytes
) -> None:
"""
Validates that normalized linked_references (offset, expected_bytes)
match the corresponding bytecode.
"""
offsets, values = zip(*link_deps)
for idx, offset in enumerate(offsets):
value = values[idx]
# https://github.com/python/mypy/issues/4975
offset_value = int(offset)
dep_length = len(value)
end_of_bytes = offset_value + dep_length
# Ignore b/c whitespace around ':' conflict b/w black & flake8
actual_bytes = bytecode[offset_value:end_of_bytes] # noqa: E203
if actual_bytes != values[idx]:
raise ValidationError(
"Error validating linked reference. "
f"Offset: {offset} "
f"Value: {values[idx]} "
f"Bytecode: {bytecode} ."
)
|
Validates that normalized linked_references (offset, expected_bytes)
match the corresponding bytecode.
|
entailment
|
def normalize_linked_references(
data: List[Dict[str, Any]]
) -> Generator[Tuple[int, str, str], None, None]:
"""
Return a tuple of information representing all insertions of a linked reference.
(offset, type, value)
"""
for deployment in data:
for offset in deployment["offsets"]:
yield offset, deployment["type"], deployment["value"]
|
Return a tuple of information representing all insertions of a linked reference.
(offset, type, value)
|
entailment
|
def validate_deployments_tx_receipt(
deployments: Dict[str, Any], w3: Web3, allow_missing_data: bool = False
) -> None:
"""
Validate that address and block hash found in deployment data match what is found on-chain.
:allow_missing_data: by default, enforces validation of address and blockHash.
"""
# todo: provide hook to lazily look up tx receipt via binary search if missing data
for name, data in deployments.items():
if "transaction" in data:
tx_hash = data["transaction"]
tx_receipt = w3.eth.getTransactionReceipt(tx_hash)
# tx_address will be None if contract created via contract factory
tx_address = tx_receipt["contractAddress"]
if tx_address is None and allow_missing_data is False:
raise ValidationError(
"No contract address found in tx receipt. Unable to verify "
"address found in tx receipt matches address in manifest's deployment data. "
"If this validation is not necessary, please enable `allow_missing_data` arg. "
)
if tx_address is not None and not is_same_address(
tx_address, data["address"]
):
raise ValidationError(
f"Error validating tx_receipt for {name} deployment. "
f"Address found in manifest's deployment data: {data['address']} "
f"Does not match address found on tx_receipt: {tx_address}."
)
if "block" in data:
if tx_receipt["blockHash"] != to_bytes(hexstr=data["block"]):
raise ValidationError(
f"Error validating tx_receipt for {name} deployment. "
f"Block found in manifest's deployment data: {data['block']} does not "
f"Does not match block found on tx_receipt: {tx_receipt['blockHash']}."
)
elif allow_missing_data is False:
raise ValidationError(
"No block hash found in deployment data. "
"Unable to verify block hash on tx receipt. "
"If this validation is not necessary, please enable `allow_missing_data` arg."
)
elif allow_missing_data is False:
raise ValidationError(
"No transaction hash found in deployment data. "
"Unable to validate tx_receipt. "
"If this validation is not necessary, please enable `allow_missing_data` arg."
)
|
Validate that address and block hash found in deployment data match what is found on-chain.
:allow_missing_data: by default, enforces validation of address and blockHash.
|
entailment
|
def is_prelinked_bytecode(bytecode: bytes, link_refs: List[Dict[str, Any]]) -> bool:
"""
Returns False if all expected link_refs are unlinked, otherwise returns True.
todo support partially pre-linked bytecode (currently all or nothing)
"""
for link_ref in link_refs:
for offset in link_ref["offsets"]:
try:
validate_empty_bytes(offset, link_ref["length"], bytecode)
except ValidationError:
return True
return False
|
Returns False if all expected link_refs are unlinked, otherwise returns True.
todo support partially pre-linked bytecode (currently all or nothing)
|
entailment
|
def apply_all_link_refs(
bytecode: bytes, link_refs: List[Dict[str, Any]], attr_dict: Dict[str, str]
) -> bytes:
"""
Applies all link references corresponding to a valid attr_dict to the bytecode.
"""
if link_refs is None:
return bytecode
link_fns = (
apply_link_ref(offset, ref["length"], attr_dict[ref["name"]])
for ref in link_refs
for offset in ref["offsets"]
)
linked_bytecode = pipe(bytecode, *link_fns)
return linked_bytecode
|
Applies all link references corresponding to a valid attr_dict to the bytecode.
|
entailment
|
def apply_link_ref(offset: int, length: int, value: bytes, bytecode: bytes) -> bytes:
"""
Returns the new bytecode with `value` put into the location indicated by `offset` and `length`.
"""
try:
validate_empty_bytes(offset, length, bytecode)
except ValidationError:
raise BytecodeLinkingError("Link references cannot be applied to bytecode")
new_bytes = (
# Ignore linting error b/c conflict b/w black & flake8
bytecode[:offset]
+ value
+ bytecode[offset + length :] # noqa: E201, E203
)
return new_bytes
|
Returns the new bytecode with `value` put into the location indicated by `offset` and `length`.
|
entailment
|
def link_bytecode(cls, attr_dict: Dict[str, str]) -> Type["LinkableContract"]:
"""
Return a cloned contract factory with the deployment / runtime bytecode linked.
:attr_dict: Dict[`ContractType`: `Address`] for all deployment and runtime link references.
"""
if not cls.unlinked_references and not cls.linked_references:
raise BytecodeLinkingError("Contract factory has no linkable bytecode.")
if not cls.needs_bytecode_linking:
raise BytecodeLinkingError(
"Bytecode for this contract factory does not require bytecode linking."
)
cls.validate_attr_dict(attr_dict)
bytecode = apply_all_link_refs(cls.bytecode, cls.unlinked_references, attr_dict)
runtime = apply_all_link_refs(
cls.bytecode_runtime, cls.linked_references, attr_dict
)
linked_class = cls.factory(
cls.web3, bytecode_runtime=runtime, bytecode=bytecode
)
if linked_class.needs_bytecode_linking:
raise BytecodeLinkingError(
"Expected class to be fully linked, but class still needs bytecode linking."
)
return linked_class
|
Return a cloned contract factory with the deployment / runtime bytecode linked.
:attr_dict: Dict[`ContractType`: `Address`] for all deployment and runtime link references.
|
entailment
|
def validate_attr_dict(self, attr_dict: Dict[str, str]) -> None:
"""
Validates that ContractType keys in attr_dict reference existing manifest ContractTypes.
"""
attr_dict_names = list(attr_dict.keys())
if not self.unlinked_references and not self.linked_references:
raise BytecodeLinkingError(
"Unable to validate attr dict, this contract has no linked/unlinked references."
)
unlinked_refs = self.unlinked_references or ({},)
linked_refs = self.linked_references or ({},)
all_link_refs = unlinked_refs + linked_refs
all_link_names = [ref["name"] for ref in all_link_refs]
if set(attr_dict_names) != set(all_link_names):
raise BytecodeLinkingError(
"All link references must be defined when calling "
"`link_bytecode` on a contract factory."
)
for address in attr_dict.values():
if not is_canonical_address(address):
raise BytecodeLinkingError(
f"Address: {address} as specified in the attr_dict is not "
"a valid canoncial address."
)
|
Validates that ContractType keys in attr_dict reference existing manifest ContractTypes.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.