code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
self.prefix = self.PAR_PREFIX if not isinstance(self.wcs,pywcs.WCS): print( textutil.textbox( 'WCS not a valid PyWCS object. ' 'Conversion of RA/Dec not possible...' ), file=sys.stderr ) raise ValueError if self.xypos is None or len(self.xypos[0]) == 0: self.xypos = None warnstr = textutil.textbox( 'WARNING: \n' 'No objects found for this image...' ) for line in warnstr.split('\n'): log.warning(line) print(warnstr) return if self.radec is None: print(' Found {:d} objects.'.format(len(self.xypos[0]))) if self.wcs is not None: ra, dec = self.wcs.all_pix2world(self.xypos[0], self.xypos[1], self.origin) self.radec = [ra, dec] + copy.deepcopy(self.xypos[2:]) else: # If we have no WCS, simply pass along the XY input positions # under the assumption they were already sky positions. self.radec = copy.deepcopy(self.xypos)
def generateRaDec(self)
Convert XY positions into sky coordinates using STWCS methods.
4.532744
4.286812
1.057369
# parse exclusion file into list of positions and distances exclusion_coords = tweakutils.parse_exclusions(exclusions) if exclusion_coords is None: return excluded_list = [] radec_indx = list(range(len(self.radec[0]))) for ra,dec,indx in zip(self.radec[0],self.radec[1],radec_indx): src_pos = coords.SkyCoord(ra=ra,dec=dec,unit=(u.hourangle,u.deg)) # check to see whether this source is within an exclusion region for reg in exclusion_coords: if reg['units'] == 'sky': regpos = reg['pos'] regdist = reg['distance'] # units: arcsec else: regradec = self.wcs.all_pix2world([reg['pos']],1)[0] regpos = (regradec[0],regradec[1]) regdist = reg['distance']*self.wcs.pscale # units: arcsec epos = coords.SkyCoord(ra=regpos[0],dec=regpos[1],unit=(u.hourangle,u.deg)) if float(epos.separation(src_pos).to_string(unit=u.arcsec,decimal=True)) <= regdist: excluded_list.append(indx) break # create a list of all 'good' sources outside all exclusion regions for e in excluded_list: radec_indx.remove(e) radec_indx = np.array(radec_indx,dtype=int) num_excluded = len(excluded_list) if num_excluded > 0: radec_trimmed = [] xypos_trimmed = [] for arr in self.radec: radec_trimmed.append(arr[radec_indx]) for arr in self.xypos: xypos_trimmed.append(arr[radec_indx]) xypos_trimmed[-1] = np.arange(len(xypos_trimmed[0])) self.radec = radec_trimmed self.xypos = xypos_trimmed log.info('Excluded %d sources from catalog.'%num_excluded)
def apply_exclusions(self,exclusions)
Trim sky catalog to remove any sources within regions specified by exclusions file.
2.766947
2.646367
1.045564
if not self._apply_flux_limits: return # only if limits are set should they be applied if ((self.maxflux is None and self.minflux is None) or self.fluxunits is None): return print("\n Applying flux limits...") print(" minflux = {}".format(self.minflux)) print(" maxflux = {}".format(self.maxflux)) print(" fluxunits = '{:s}'".format(self.fluxunits)) print(" nbright = {}".format(self.nbright)) # start by checking to see whether fluxes were read in to use for # applying the limits if not self.flux_col: print(" WARNING: Catalog did not contain fluxes for use in trimming...") return if self.xypos is not None and self.radec is not None: if len(self.xypos) < len(self.radec): src_cat = self.radec else: src_cat = self.xypos else: src_cat = self.radec if self.xypos is None else self.xypos if src_cat is None: raise RuntimeError("No catalogs available for filtering") if len(src_cat) < 3: print(" WARNING: No fluxes read in for catalog for use in trimming...") return fluxes = copy.deepcopy(src_cat[2]) # apply limits equally to all .radec and .xypos entries # Start by clipping by any specified flux range if self.fluxunits == 'mag': if self.minflux is None: flux_mask = fluxes >= self.maxflux elif self.maxflux is None: flux_mask = fluxes <= self.minflux else: flux_mask = (fluxes <= self.minflux) & (fluxes >= self.maxflux) else: if self.minflux is None: flux_mask = fluxes <= self.maxflux elif self.maxflux is None: flux_mask = fluxes >= self.minflux else: flux_mask = (fluxes >= self.minflux) & (fluxes <= self.maxflux) if self.radec is None: all_radec = None else: all_radec = [rd[flux_mask].copy() for rd in self.radec] if self.xypos is None: all_xypos = None else: all_xypos = [xy[flux_mask].copy() for xy in self.xypos] nrem = flux_mask.size - np.count_nonzero(flux_mask) print(" Removed {:d} sources based on flux limits.".format(nrem)) if self.nbright is not None: print("Selecting catalog based on {} brightest sources".format(self.nbright)) fluxes = fluxes[flux_mask] # find indices of brightest sources idx = np.argsort(fluxes) if self.fluxunits == 'mag': idx = idx[:self.nbright] else: idx = (idx[::-1])[:self.nbright] # pick out only the brightest 'nbright' sources if all_radec is not None: all_radec = [rd[idx] for rd in all_radec] if all_xypos is not None: all_xypos = [xy[idx] for xy in all_xypos] self.radec = all_radec self.xypos = all_xypos if len(self.radec[0]) == 0: print("Trimming of catalog resulted in NO valid sources! ") raise ValueError
def apply_flux_limits(self)
Apply any user-specified limits on source selection Limits based on fluxes.
2.596544
2.583171
1.005177
self.generateXY(**kwargs) self.generateRaDec() if exclusions: self.apply_exclusions(exclusions) # apply selection limits as specified by the user: self.apply_flux_limits()
def buildCatalogs(self, exclusions=None, **kwargs)
Primary interface to build catalogs based on user inputs.
8.908711
8.336432
1.068648
try: from matplotlib import pyplot as pl except: pl = None if pl is not None: # If the pyplot package could be loaded... pl.clf() pars = kwargs.copy() if 'marker' not in pars: pars['marker'] = 'b+' if 'cmap' in pars: pl_cmap = pars['cmap'] del pars['cmap'] else: pl_cmap = 'summer' pl_vmin = None pl_vmax = None if 'vmin' in pars: pl_vmin = pars['vmin'] del pars['vmin'] if 'vmax' in pars: pl_vmax = pars['vmax'] del pars['vmax'] pl.imshow(self.source,cmap=pl_cmap,vmin=pl_vmin,vmax=pl_vmax) pl.plot(self.xypos[0]-1,self.xypos[1]-1,pars['marker'])
def plotXYCatalog(self, **kwargs)
Method which displays the original image and overlays the positions of the detected sources from this image's catalog. Plotting `kwargs` that can be provided are: vmin, vmax, cmap, marker Default colormap is `summer`.
2.363787
2.212115
1.068565
if self.xypos is None: warnstr = textutil.textbox( 'WARNING: \n No X,Y source catalog to write to file. ') for line in warnstr.split('\n'): log.warning(line) print(warnstr) return f = open(filename,'w') f.write("# Source catalog derived for %s\n"%self.wcs.filename) f.write("# Columns: \n") if self.use_sharp_round: f.write('# X Y Flux ID Sharp Round1 Round2\n') else: f.write('# X Y Flux ID\n') f.write('# (%s) (%s)\n'%(self.in_units,self.in_units)) for row in range(len(self.xypos[0])): for i in range(len(self.xypos)): f.write("%g "%(self.xypos[i][row])) f.write("\n") f.close()
def writeXYCatalog(self,filename)
Write out the X,Y catalog to a file
4.253358
4.119154
1.032581
self.num_objects = 0 xycols = self._readCatalog() if xycols is not None: # convert the catalog into attribute self.xypos = xycols[:3] # convert optional columns if they are present if self.numcols > 3: self.xypos.append(np.asarray(xycols[3], dtype=int)) # source ID if self.numcols > 4: self.sharp = xycols[4] if self.numcols > 5: self.round1 = xycols[5] if self.numcols > 6: self.round2 = xycols[6] self.num_objects = len(xycols[0]) if self.numcols < 3: # account for flux column self.xypos.append(np.zeros(self.num_objects, dtype=float)) self.flux_col = False if self.numcols < 4: # add source ID column self.xypos.append(np.arange(self.num_objects)+self.start_id) if self.use_sharp_round: for i in range(len(self.xypos), 7): self.xypos.append(np.zeros(self.num_objects, dtype=float)) self.sharp_col = False if self.pars['xyunits'] == 'degrees': self.radec = [x.copy() for x in self.xypos] if self.wcs is not None: self.xypos[:2] = list(self.wcs.all_world2pix(np.array(self.xypos[:2]).T, self.origin).T)
def generateXY(self, **kwargs)
Method to interpret input catalog file as columns of positions and fluxes.
3.543151
3.336415
1.061963
try: from matplotlib import pyplot as pl except: pl = None if pl is not None: pl.clf() pl.plot(self.xypos[0],self.xypos[1],**kwargs)
def plotXYCatalog(self, **kwargs)
Plots the source catalog positions using matplotlib's `pyplot.plot()` Plotting `kwargs` that can also be passed include any keywords understood by matplotlib's `pyplot.plot()` function such as:: vmin, vmax, cmap, marker
3.285438
3.582177
0.917162
if procSteps is not None: procSteps.addStep('Blot') blot_name = util.getSectionName(configObj, _blot_step_num_) # This can be called directly from MultiDrizle, so only execute if # switch has been turned on (no guarantee MD will check before calling). if configObj[blot_name]['blot']: paramDict = buildBlotParamDict(configObj) log.info('USER INPUT PARAMETERS for Blot Step:') util.printParams(paramDict, log=log) run_blot(imageObjectList, output_wcs.single_wcs, paramDict, wcsmap=wcsmap) else: log.info('Blot step not performed.') if procSteps is not None: procSteps.endStep('Blot')
def runBlot(imageObjectList, output_wcs, configObj={}, wcsmap=wcs_functions.WCSMap, procSteps=None)
runBlot(imageObjectList, output_wcs, configObj={}, wcsmap=wcs_functions.WCSMap, procSteps=None)
6.768792
6.905345
0.980225
# Insure that input imageObject is a list if not isinstance(imageObjectList, list): imageObjectList = [imageObjectList] # # Setup the versions info dictionary for output to PRIMARY header # The keys will be used as the name reported in the header, as-is # _versions = {'AstroDrizzle':__version__, 'PyFITS':util.__fits_version__, 'Numpy':util.__numpy_version__} _hdrlist = [] for img in imageObjectList: for chip in img.returnAllChips(extname=img.scienceExt): print(' Blot: creating blotted image: ',chip.outputNames['data']) #### Check to see what names need to be included here for use in _hdrlist chip.outputNames['driz_version'] = _versions['AstroDrizzle'] outputvals = chip.outputNames.copy() outputvals.update(img.outputValues) outputvals['blotnx'] = chip.wcs.naxis1 outputvals['blotny'] = chip.wcs.naxis2 _hdrlist.append(outputvals) plist = outputvals.copy() plist.update(paramDict) # PyFITS can be used here as it will always operate on # output from PyDrizzle (which will always be a FITS file) # Open the input science file medianPar = 'outMedian' outMedianObj = img.getOutputName(medianPar) if img.inmemory: outMedian = img.outputNames[medianPar] _fname,_sciextn = fileutil.parseFilename(outMedian) _inimg = outMedianObj else: outMedian = outMedianObj _fname,_sciextn = fileutil.parseFilename(outMedian) _inimg = fileutil.openImage(_fname, memmap=False) # Return the PyFITS HDU corresponding to the named extension _scihdu = fileutil.getExtn(_inimg,_sciextn) _insci = _scihdu.data.copy() _inimg.close() del _inimg, _scihdu _outsci = do_blot(_insci, output_wcs, chip.wcs, chip._exptime, coeffs=paramDict['coeffs'], interp=paramDict['blot_interp'], sinscl=paramDict['blot_sinscl'], wcsmap=wcsmap) # Apply sky subtraction and unit conversion to blotted array to # match un-modified input array if paramDict['blot_addsky']: skyval = chip.computedSky else: skyval = paramDict['blot_skyval'] _outsci /= chip._conversionFactor if skyval is not None: _outsci += skyval log.info('Applying sky value of %0.6f to blotted image %s'% (skyval,chip.outputNames['data'])) # Write output Numpy objects to a PyFITS file # Blotting only occurs from a drizzled SCI extension # to a blotted SCI extension... _outimg = outputimage.OutputImage(_hdrlist, paramDict, build=False, wcs=chip.wcs, blot=True) _outimg.outweight = None _outimg.outcontext = None outimgs = _outimg.writeFITS(plist['data'],_outsci,None, versions=_versions,blend=False, virtual=img.inmemory) img.saveVirtualOutputs(outimgs) #_buildOutputFits(_outsci,None,plist['outblot']) _hdrlist = [] del _outsci del _outimg
def run_blot(imageObjectList,output_wcs,paramDict,wcsmap=wcs_functions.WCSMap)
run_blot(imageObjectList, output_wcs, paramDict, wcsmap=wcs_functions.WCSMap) Perform the blot operation on the list of images.
6.428103
6.389866
1.005984
_outsci = np.zeros(blot_wcs.array_shape, dtype=np.float32) # Now pass numpy objects to callable version of Blot... build=False misval = 0.0 kscale = 1.0 xmin = 1 ymin = 1 xmax, ymax = source_wcs.pixel_shape # compute the undistorted 'natural' plate scale for this chip if coeffs: wcslin = distortion.utils.make_orthogonal_cd(blot_wcs) else: wcslin = blot_wcs blot_wcs.sip = None blot_wcs.cpdis1 = None blot_wcs.cpdis2 = None blot_wcs.det2im = None if wcsmap is None and cdriz is not None: print('Using default C-based coordinate transformation...') mapping = cdriz.DefaultWCSMapping( blot_wcs, source_wcs, blot_wcs.pixel_shape[0], blot_wcs.pixel_shape[1], stepsize ) pix_ratio = source_wcs.pscale/wcslin.pscale else: # ##Using the Python class for the WCS-based transformation # # Use user provided mapping function print('Using coordinate transformation defined by user...') if wcsmap is None: wcsmap = wcs_functions.WCSMap wmap = wcsmap(blot_wcs,source_wcs) mapping = wmap.forward pix_ratio = source_wcs.pscale/wcslin.pscale t = cdriz.tblot( source, _outsci,xmin,xmax,ymin,ymax, pix_ratio, kscale, 1.0, 1.0, 'center',interp, exptime, misval, sinscl, 1, mapping) del mapping return _outsci
def do_blot(source, source_wcs, blot_wcs, exptime, coeffs = True, interp='poly5', sinscl=1.0, stepsize=10, wcsmap=None)
Core functionality of performing the 'blot' operation to create a single blotted image from a single source image. All distortion information is assumed to be included in the WCS specification of the 'output' blotted image given in 'blot_wcs'. This is the simplest interface that can be called for stand-alone use of the blotting function. Parameters ---------- source Input numpy array of undistorted source image in units of 'cps'. source_wcs HSTWCS object representing source image distortion-corrected WCS. blot_wcs (py)wcs.WCS object representing the blotted image WCS. exptime exptime to use for scaling output blot image. A value of 1 will result in output blot image in units of 'cps'. coeffs Flag to specify whether or not to use distortion coefficients associated with blot_wcs. If False, do not apply any distortion model. interp Form of interpolation to use when blotting pixels. Valid options:: "nearest","linear","poly3", "poly5"(default), "spline3", "sinc" sinscl Scale for sinc interpolation kernel (in output, blotted pixels) stepsize Number of pixels for WCS interpolation wcsmap Custom mapping class to use to provide transformation from drizzled to blotted WCS. Default will be to use `drizzlepac.wcs_functions.WCSMap`.
6.019913
5.846189
1.029716
# Start by creating a new name for the ASN table _indx = asnfile.find('_asn.fits') _new_asn = asnfile[:_indx]+'_pipeline'+asnfile[_indx:] if os.path.exists(_new_asn): os.remove(_new_asn) # copy original ASN table to new table shutil.copy(asnfile,_new_asn) # Open up the new copy and convert all MEMNAME's to lower-case fasn = fits.open(_new_asn, mode='update', memmap=False) for i in range(len(fasn[1].data)): fasn[1].data[i].setfield('MEMNAME',fasn[1].data[i].field('MEMNAME').lower()) fasn.close() return _new_asn
def _lowerAsn(asnfile)
Create a copy of the original asn file and change the case of all members to lower-case.
3.64934
3.571083
1.021914
if not os.path.exists(drizfile): return # Open already existing CALWF3 trailer file for appending ftrl = open(trlfile,'a') # Open astrodrizzle trailer file fdriz = open(drizfile) # Read in drizzle comments _dlines = fdriz.readlines() # Append them to CALWF3 trailer file ftrl.writelines(_dlines) # Close all files ftrl.close() fdriz.close() # Now, clean up astrodrizzle trailer file os.remove(drizfile)
def _appendTrlFile(trlfile,drizfile)
Append drizfile to already existing trlfile from CALXXX.
4.267887
4.300467
0.992424
_prefix= time.strftime("%Y%j%H%M%S-I-----",time.localtime()) _lenstr = 60 - len(_process_name) return _prefix+_process_name+(_lenstr*'-')+'\n'
def _timestamp(_process_name)
Create formatted time string recognizable by OPUS.
6.402603
6.000616
1.066991
# extract rootname from input rootname = input[:input.find('_')] newdir = os.path.join(rootdir,rootname) if not os.path.exists(newdir): os.mkdir(newdir) return newdir
def _createWorkingDir(rootdir,input)
Create a working directory based on input name under the parent directory specified as rootdir
2.621056
2.763066
0.948604
flist = [] if '_asn.fits' in input: asndict = asnutil.readASNTable(input,None) flist.append(input[:input.find('_')]) flist.extend(asndict['order']) flist.append(asndict['output']) else: flist.append(input[:input.find('_')]) # copy all files related to these rootnames into new dir for rootname in flist: for fname in glob.glob(rootname+'*'): shutil.copy(fname,os.path.join(newdir,fname))
def _copyToNewWorkingDir(newdir,input)
Copy input file and all related files necessary for processing to the new working directory. This function works in a greedy manner, in that all files associated with all inputs(have the same rootname) will be copied to the new working directory.
4.336543
4.193735
1.034053
for fname in glob.glob(os.path.join(newdir,'*')): shutil.move(fname,os.path.join(origdir,os.path.basename(fname)))
def _restoreResults(newdir,origdir)
Move (not copy) all files from newdir back to the original directory
1.944494
1.904419
1.021044
if input is not None: inputDict["input"] = input else: raise ValueError("Please supply an input image") configObj = util.getDefaultConfigObj(__taskname__, configObj, inputDict, loadOnly=(not editpars)) if configObj is None: return if not editpars: run(configObj)
def median(input=None, configObj=None, editpars=False, **inputDict)
Create a median image from the seperately drizzled images.
5.403569
5.18791
1.041569
if imgObjList is None: msg = "Please provide a list of imageObjects to the median step" print(msg, file=sys.stderr) raise ValueError(msg) if procSteps is not None: procSteps.addStep('Create Median') step_name = util.getSectionName(configObj, _step_num_) if not configObj[step_name]['median']: log.info('Median combination step not performed.') return paramDict = configObj[step_name] paramDict['proc_unit'] = configObj['proc_unit'] # include whether or not compression was performed driz_sep_name = util.getSectionName(configObj, _single_step_num_) driz_sep_paramDict = configObj[driz_sep_name] paramDict['compress'] = driz_sep_paramDict['driz_sep_compress'] log.info('USER INPUT PARAMETERS for Create Median Step:') util.printParams(paramDict, log=log) _median(imgObjList, paramDict) if procSteps is not None: procSteps.endStep('Create Median')
def createMedian(imgObjList, configObj, procSteps=None)
Top-level interface to createMedian step called from top-level AstroDrizzle. This function parses the input parameters then calls the `_median()` function to median-combine the input images into a single image.
4.032344
3.90485
1.03265
prihdu = fits.PrimaryHDU(data=dataArray, header=inputHeader) pf = fits.HDUList() pf.append(prihdu) return pf
def _writeImage(dataArray=None, inputHeader=None)
Writes out the result of the combination step. The header of the first 'outsingle' file in the association parlist is used as the header of the new image. Parameters ---------- dataArray : arr Array of data to be written to a fits.PrimaryHDU object inputHeader : obj fits.header.Header object to use as basis for the PrimaryHDU header
3.164418
4.115116
0.768974
address, pkg_name, pkg_version = parse_registry_uri(uri) self.w3.enable_unstable_package_management_api() self.w3.pm.set_registry(address) _, _, manifest_uri = self.w3.pm.get_release_data(pkg_name, pkg_version) return manifest_uri
def fetch_uri_contents(self, uri: str) -> URI
Return content-addressed URI stored at registry URI.
6.314346
5.349858
1.180283
if file_or_dir_path.is_dir(): asset_data = [dummy_ipfs_pin(path) for path in file_or_dir_path.glob("*")] elif file_or_dir_path.is_file(): asset_data = [dummy_ipfs_pin(file_or_dir_path)] else: raise FileNotFoundError( f"{file_or_dir_path} is not a valid file or directory path." ) return asset_data
def pin_assets(self, file_or_dir_path: Path) -> List[Dict[str, str]]
Return a dict containing the IPFS hash, file name, and size of a file.
2.19206
2.02071
1.084797
return await get_scheduler(app).create(coro, *args, **kwargs)
async def create_task(app: web.Application, coro: Coroutine, *args, **kwargs ) -> asyncio.Task
Convenience function for calling `TaskScheduler.create(coro)` This will use the default `TaskScheduler` to create a new background task. Example: import asyncio from datetime import datetime from brewblox_service import scheduler, service async def current_time(interval): while True: await asyncio.sleep(interval) print(datetime.now()) async def start(app): await scheduler.create_task(app, current_time(interval=2)) app = service.create_app(default_name='example') scheduler.setup(app) app.on_startup.append(start) service.furnish(app) service.run(app)
7.779609
8.396544
0.926525
return await get_scheduler(app).cancel(task, *args, **kwargs)
async def cancel_task(app: web.Application, task: asyncio.Task, *args, **kwargs ) -> Any
Convenience function for calling `TaskScheduler.cancel(task)` This will use the default `TaskScheduler` to cancel the given task. Example: import asyncio from datetime import datetime from brewblox_service import scheduler, service async def current_time(interval): while True: await asyncio.sleep(interval) print(datetime.now()) async def stop_after(app, task, duration): await asyncio.sleep(duration) await scheduler.cancel_task(app, task) print('stopped!') async def start(app): # Start first task task = await scheduler.create_task(app, current_time(interval=2)) # Start second task to stop the first await scheduler.create_task(app, stop_after(app, task, duration=10)) app = service.create_app(default_name='example') scheduler.setup(app) app.on_startup.append(start) service.furnish(app) service.run(app)
6.92435
14.161149
0.488968
while True: await asyncio.sleep(CLEANUP_INTERVAL_S) self._tasks = {t for t in self._tasks if not t.done()}
async def _cleanup(self)
Periodically removes completed tasks from the collection, allowing fire-and-forget tasks to be garbage collected. This does not delete the task object, it merely removes the reference in the scheduler.
3.968116
3.680271
1.078213
task = asyncio.get_event_loop().create_task(coro) self._tasks.add(task) return task
async def create(self, coro: Coroutine) -> asyncio.Task
Starts execution of a coroutine. The created asyncio.Task is returned, and added to managed tasks. The scheduler guarantees that it is cancelled during application shutdown, regardless of whether it was already cancelled manually. Args: coro (Coroutine): The coroutine to be wrapped in a task, and executed. Returns: asyncio.Task: An awaitable Task object. During Aiohttp shutdown, the scheduler will attempt to cancel and await this task. The task can be safely cancelled manually, or using `TaskScheduler.cancel(task)`.
3.056275
4.137803
0.738623
if task is None: return task.cancel() with suppress(KeyError): self._tasks.remove(task) with suppress(Exception): return (await task) if wait_for else None
async def cancel(self, task: asyncio.Task, wait_for: bool = True) -> Any
Cancels and waits for an `asyncio.Task` to finish. Removes it from the collection of managed tasks. Args: task (asyncio.Task): The to be cancelled task. It is not required that the task was was created with `TaskScheduler.create_task()`. wait_for (bool, optional): Whether to wait for the task to finish execution. If falsey, this function returns immediately after cancelling the task. Returns: Any: The return value of `task`. None if `wait_for` is falsey.
4.46599
4.758609
0.938508
if not is_valid_api_github_uri(uri): raise CannotHandleURI(f"{uri} does not conform to Github's API 'url' scheme.") response = requests.get(uri) response.raise_for_status() contents = json.loads(response.content) if contents["type"] != "file": raise CannotHandleURI( f"Expected url to point to a 'file' type, instead received {contents['type']}." ) return contents["git_url"]
def create_content_addressed_github_uri(uri: URI) -> URI
Returns a content-addressed Github "git_url" that conforms to this scheme. https://api.github.com/repos/:owner/:repo/git/blobs/:file_sha Accepts Github-defined "url" that conforms to this scheme https://api.github.com/repos/:owner/:repo/contents/:path/:to/manifest.json
4.11416
3.426196
1.200795
if not is_text(uri): return False parsed = parse.urlparse(uri) path, scheme, authority = parsed.path, parsed.scheme, parsed.netloc if not all((path, scheme, authority)): return False if any(term for term in expected_path_terms if term not in path): return False if scheme != "https": return False if authority != GITHUB_API_AUTHORITY: return False return True
def is_valid_github_uri(uri: URI, expected_path_terms: Tuple[str, ...]) -> bool
Return a bool indicating whether or not the URI fulfills the following specs Valid Github URIs *must*: - Have 'https' scheme - Have 'api.github.com' authority - Have a path that contains all "expected_path_terms"
3.006113
3.20718
0.937307
blob_path = parse.urlparse(blob_uri).path blob_hash = blob_path.split("/")[-1] contents_str = to_text(contents) content_length = len(contents_str) hashable_contents = "blob " + str(content_length) + "\0" + contents_str hash_object = hashlib.sha1(to_bytes(text=hashable_contents)) if hash_object.hexdigest() != blob_hash: raise ValidationError( f"Hash of contents fetched from {blob_uri} do not match its hash: {blob_hash}." )
def validate_blob_uri_contents(contents: bytes, blob_uri: str) -> None
Raises an exception if the sha1 hash of the contents does not match the hash found in te blob_uri. Formula for how git calculates the hash found here: http://alblue.bandlem.com/2011/08/git-tip-of-week-objects.html
3.625515
3.257347
1.113027
validate_registry_uri(uri) parsed_uri = parse.urlparse(uri) authority = parsed_uri.netloc pkg_name = parsed_uri.path.strip("/") pkg_version = parsed_uri.query.lstrip("version=").strip("/") return RegistryURI(authority, pkg_name, pkg_version)
def parse_registry_uri(uri: str) -> RegistryURI
Validate and return (authority, pkg name, version) from a valid registry URI
2.801194
2.205024
1.270369
if not is_ipfs_uri(uri) and not is_valid_content_addressed_github_uri(uri): return False return True
def is_supported_content_addressed_uri(uri: URI) -> bool
Returns a bool indicating whether provided uri is currently supported. Currently Py-EthPM only supports IPFS and Github blob content-addressed uris.
6.337846
3.088882
2.051825
if resource_type != BLOCK: raise ValueError("Invalid resource_type. Must be one of 'block'") elif not is_block_or_transaction_hash(resource_identifier): raise ValueError( "Invalid resource_identifier. Must be a hex encoded 32 byte value" ) elif not is_block_or_transaction_hash(chain_id): raise ValueError("Invalid chain_id. Must be a hex encoded 32 byte value") return URI( parse.urlunsplit( [ "blockchain", remove_0x_prefix(chain_id), f"{resource_type}/{remove_0x_prefix(resource_identifier)}", "", "", ] ) )
def create_BIP122_uri( chain_id: str, resource_type: str, resource_identifier: str ) -> URI
See: https://github.com/bitcoin/bips/blob/master/bip-0122.mediawiki
2.762895
2.703735
1.021881
validate_w3_instance(w3) return Package(self.manifest, w3, self.uri)
def update_w3(self, w3: Web3) -> "Package"
Returns a new instance of `Package` containing the same manifest, but connected to a different web3 instance. .. doctest:: >>> new_w3 = Web3(Web3.EthereumTesterProvider()) >>> NewPackage = OwnedPackage.update_w3(new_w3) >>> assert NewPackage.w3 == new_w3 >>> assert OwnedPackage.manifest == NewPackage.manifest
8.024322
12.676555
0.633005
if isinstance(file_path, Path): raw_manifest = file_path.read_text() validate_raw_manifest_format(raw_manifest) manifest = json.loads(raw_manifest) else: raise TypeError( "The Package.from_file method expects a pathlib.Path instance." f"Got {type(file_path)} instead." ) return cls(manifest, w3, file_path.as_uri())
def from_file(cls, file_path: Path, w3: Web3) -> "Package"
Returns a ``Package`` instantiated by a manifest located at the provided Path. ``file_path`` arg must be a ``pathlib.Path`` instance. A valid ``Web3`` instance is required to instantiate a ``Package``.
3.109861
2.746528
1.132288
contents = to_text(resolve_uri_contents(uri)) validate_raw_manifest_format(contents) manifest = json.loads(contents) return cls(manifest, w3, uri)
def from_uri(cls, uri: URI, w3: Web3) -> "Package"
Returns a Package object instantiated by a manifest located at a content-addressed URI. A valid ``Web3`` instance is also required. URI schemes supported: - IPFS `ipfs://Qm...` - HTTP `https://api.github.com/repos/:owner/:repo/git/blobs/:file_sha` - Registry `ercXXX://registry.eth/greeter?version=1.0.0` .. code:: python OwnedPackage = Package.from_uri('ipfs://QmbeVyFLSuEUxiXKwSsEjef7icpdTdA4kGG9BcrJXKNKUW', w3) # noqa: E501
6.985141
8.753523
0.797981
validate_contract_name(name) if "contract_types" not in self.manifest: raise InsufficientAssetsError( "This package does not contain any contract type data." ) try: contract_data = self.manifest["contract_types"][name] except KeyError: raise InsufficientAssetsError( "This package does not contain any package data to generate " f"a contract factory for contract type: {name}. Available contract types include: " f"{ list(self.manifest['contract_types'].keys()) }." ) validate_minimal_contract_factory_data(contract_data) contract_kwargs = generate_contract_factory_kwargs(contract_data) contract_factory = self.w3.eth.contract(**contract_kwargs) return contract_factory
def get_contract_factory(self, name: ContractName) -> Contract
Return the contract factory for a given contract type, generated from the data vailable in ``Package.manifest``. Contract factories are accessible from the package class. .. code:: python Owned = OwnedPackage.get_contract_factory('owned') In cases where a contract uses a library, the contract factory will have unlinked bytecode. The ``ethpm`` package ships with its own subclass of ``web3.contract.Contract``, ``ethpm.contract.LinkableContract`` with a few extra methods and properties related to bytecode linking. .. code:: python >>> math = owned_package.contract_factories.math >>> math.needs_bytecode_linking True >>> linked_math = math.link_bytecode({'MathLib': '0x1234...'}) >>> linked_math.needs_bytecode_linking False
3.457469
3.616072
0.956139
validate_address(address) validate_contract_name(name) try: self.manifest["contract_types"][name]["abi"] except KeyError: raise InsufficientAssetsError( "Package does not have the ABI required to generate a contract instance " f"for contract: {name} at address: {address}." ) contract_kwargs = generate_contract_factory_kwargs( self.manifest["contract_types"][name] ) canonical_address = to_canonical_address(address) contract_instance = self.w3.eth.contract( address=canonical_address, **contract_kwargs ) return contract_instance
def get_contract_instance(self, name: ContractName, address: Address) -> Contract
Will return a ``Web3.contract`` instance generated from the contract type data available in ``Package.manifest`` and the provided ``address``. The provided ``address`` must be valid on the connected chain available through ``Package.w3``.
3.614753
3.014348
1.199182
validate_build_dependencies_are_present(self.manifest) dependencies = self.manifest["build_dependencies"] dependency_packages = {} for name, uri in dependencies.items(): try: validate_build_dependency(name, uri) dependency_package = Package.from_uri(uri, self.w3) except PyEthPMError as e: raise FailureToFetchIPFSAssetsError( f"Failed to retrieve build dependency: {name} from URI: {uri}.\n" f"Got error: {e}." ) else: dependency_packages[name] = dependency_package return Dependencies(dependency_packages)
def build_dependencies(self) -> "Dependencies"
Return `Dependencies` instance containing the build dependencies available on this Package. The ``Package`` class should provide access to the full dependency tree. .. code:: python >>> owned_package.build_dependencies['zeppelin'] <ZeppelinPackage>
4.831511
5.09746
0.947827
if not check_for_deployments(self.manifest): return {} all_blockchain_uris = self.manifest["deployments"].keys() matching_uri = validate_single_matching_uri(all_blockchain_uris, self.w3) deployments = self.manifest["deployments"][matching_uri] all_contract_factories = { deployment_data["contract_type"]: self.get_contract_factory( deployment_data["contract_type"] ) for deployment_data in deployments.values() } validate_deployments_tx_receipt(deployments, self.w3, allow_missing_data=True) linked_deployments = get_linked_deployments(deployments) if linked_deployments: for deployment_data in linked_deployments.values(): on_chain_bytecode = self.w3.eth.getCode( to_canonical_address(deployment_data["address"]) ) unresolved_linked_refs = normalize_linked_references( deployment_data["runtime_bytecode"]["link_dependencies"] ) resolved_linked_refs = tuple( self._resolve_linked_references(link_ref, deployments) for link_ref in unresolved_linked_refs ) for linked_ref in resolved_linked_refs: validate_linked_references(linked_ref, on_chain_bytecode) return Deployments(deployments, all_contract_factories, self.w3)
def deployments(self) -> Union["Deployments", Dict[None, None]]
Returns a ``Deployments`` object containing all the deployment data and contract factories of a ``Package``'s `contract_types`. Automatically filters deployments to only expose those available on the current ``Package.w3`` instance. .. code:: python package.deployments.get_instance("ContractType")
3.600855
3.531681
1.019587
ipfs_return = { "Hash": generate_file_hash(path.read_bytes()), "Name": path.name, "Size": str(path.stat().st_size), } return ipfs_return
def dummy_ipfs_pin(path: Path) -> Dict[str, str]
Return IPFS data as if file was pinned to an actual node.
3.00208
2.63837
1.137854
parse_result = parse.urlparse(value) if parse_result.netloc: if parse_result.path: return "".join((parse_result.netloc, parse_result.path.rstrip("/"))) else: return parse_result.netloc else: return parse_result.path.strip("/")
def extract_ipfs_path_from_uri(value: str) -> str
Return the path from an IPFS URI. Path = IPFS hash & following path.
2.647679
2.420615
1.093804
parse_result = parse.urlparse(value) if parse_result.scheme != "ipfs": return False if not parse_result.netloc and not parse_result.path: return False return True
def is_ipfs_uri(value: str) -> bool
Return a bool indicating whether or not the value is a valid IPFS URI.
3.046168
2.609649
1.167271
self._validate_name_and_references(contract_name) # Use a deployment's "contract_type" to lookup contract factory # in case the deployment uses a contract alias contract_type = self.deployment_data[contract_name]["contract_type"] factory = self.contract_factories[contract_type] address = to_canonical_address(self.deployment_data[contract_name]["address"]) contract_kwargs = { "abi": factory.abi, "bytecode": factory.bytecode, "bytecode_runtime": factory.bytecode_runtime, } return self.w3.eth.contract(address=address, **contract_kwargs)
def get_instance(self, contract_name: str) -> None
Fetches a contract instance belonging to deployment after validating contract name.
3.617181
3.302626
1.095244
if not is_address(address): raise ValidationError(f"Expected an address, got: {address}") if not is_canonical_address(address): raise ValidationError( "Py-EthPM library only accepts canonicalized addresses. " f"{address} is not in the accepted format." )
def validate_address(address: Any) -> None
Raise a ValidationError if an address is not canonicalized.
6.265985
4.679381
1.339063
slot_length = offset + length slot = bytecode[offset:slot_length] if slot != bytearray(length): raise ValidationError( f"Bytecode segment: [{offset}:{slot_length}] is not comprised of empty bytes, " f"rather: {slot}." )
def validate_empty_bytes(offset: int, length: int, bytecode: bytes) -> None
Validates that segment [`offset`:`offset`+`length`] of `bytecode` is comprised of empty bytes (b'\00').
6.309968
4.135105
1.525951
if not bool(re.match(PACKAGE_NAME_REGEX, pkg_name)): raise ValidationError(f"{pkg_name} is not a valid package name.")
def validate_package_name(pkg_name: str) -> None
Raise an exception if the value is not a valid package name as defined in the EthPM-Spec.
3.190868
2.556426
1.248176
parsed = parse.urlparse(uri) scheme, authority, pkg_name, query = ( parsed.scheme, parsed.netloc, parsed.path, parsed.query, ) validate_registry_uri_scheme(scheme) validate_registry_uri_authority(authority) if query: validate_registry_uri_version(query) validate_package_name(pkg_name[1:])
def validate_registry_uri(uri: str) -> None
Raise an exception if the URI does not conform to the registry URI scheme.
3.077015
2.895932
1.06253
if is_ens_domain(auth) is False and not is_checksum_address(auth): raise ValidationError(f"{auth} is not a valid registry URI authority.")
def validate_registry_uri_authority(auth: str) -> None
Raise an exception if the authority is not a valid ENS domain or a valid checksummed contract address.
6.798785
3.39119
2.004838
query_dict = parse.parse_qs(query, keep_blank_values=True) if "version" not in query_dict: raise ValidationError(f"{query} is not a correctly formatted version param.")
def validate_registry_uri_version(query: str) -> None
Raise an exception if the version param is malformed.
4.788084
3.313483
1.44503
matching_uris = [ uri for uri in all_blockchain_uris if check_if_chain_matches_chain_uri(w3, uri) ] if not matching_uris: raise ValidationError("Package has no matching URIs on chain.") elif len(matching_uris) != 1: raise ValidationError( f"Package has too many ({len(matching_uris)}) matching URIs: {matching_uris}." ) return matching_uris[0]
def validate_single_matching_uri(all_blockchain_uris: List[str], w3: Web3) -> str
Return a single block URI after validating that it is the *only* URI in all_blockchain_uris that matches the w3 instance.
3.151826
3.015052
1.045364
for key, value in meta.items(): if key in META_FIELDS: if type(value) is not META_FIELDS[key]: raise ValidationError( f"Values for {key} are expected to have the type {META_FIELDS[key]}, " f"instead got {type(value)}." ) elif allow_extra_meta_fields: if key[:2] != "x-": raise ValidationError( "Undefined meta fields need to begin with 'x-', " f"{key} is not a valid undefined meta field." ) else: raise ValidationError( f"{key} is not a permitted meta field. To allow undefined fields, " "set `allow_extra_meta_fields` to True." )
def validate_meta_object(meta: Dict[str, Any], allow_extra_meta_fields: bool) -> None
Validates that every key is one of `META_FIELDS` and has a value of the expected type.
2.894345
2.645778
1.093948
schema_data = _load_schema_data() try: validate(manifest, schema_data) except jsonValidationError as e: raise ValidationError( f"Manifest invalid for schema version {schema_data['version']}. " f"Reason: {e.message}" )
def validate_manifest_against_schema(manifest: Dict[str, Any]) -> None
Load and validate manifest against schema located at MANIFEST_SCHEMA_PATH.
4.048726
3.679832
1.100247
if set(("contract_types", "deployments")).issubset(manifest): all_contract_types = list(manifest["contract_types"].keys()) all_deployments = list(manifest["deployments"].values()) all_deployment_names = extract_contract_types_from_deployments(all_deployments) missing_contract_types = set(all_deployment_names).difference( all_contract_types ) if missing_contract_types: raise ValidationError( f"Manifest missing references to contracts: {missing_contract_types}." )
def validate_manifest_deployments(manifest: Dict[str, Any]) -> None
Validate that a manifest's deployments contracts reference existing contract_types.
3.292239
2.628244
1.252638
try: manifest_dict = json.loads(raw_manifest, encoding="UTF-8") except json.JSONDecodeError as err: raise json.JSONDecodeError( "Failed to load package data. File is not a valid JSON document.", err.doc, err.pos, ) compact_manifest = json.dumps(manifest_dict, sort_keys=True, separators=(",", ":")) if raw_manifest != compact_manifest: raise ValidationError( "The manifest appears to be malformed. Please ensure that it conforms to the " "EthPM-Spec for document format. " "http://ethpm.github.io/ethpm-spec/package-spec.html#document-format " )
def validate_raw_manifest_format(raw_manifest: str) -> None
Raise a ValidationError if a manifest ... - is not tightly packed (i.e. no linebreaks or extra whitespace) - does not have alphabetically sorted keys - has duplicate keys - is not UTF-8 encoded - has a trailing newline
3.933915
3.701388
1.062822
return pipe(obj, *fns)
def build(obj: Dict[str, Any], *fns: Callable[..., Any]) -> Dict[str, Any]
Wrapper function to pipe manifest through build functions. Does not validate the manifest by default.
17.088421
19.795385
0.863253
return { contract_name: make_path_relative(path) for path in compiler_output for contract_name in compiler_output[path].keys() }
def get_names_and_paths(compiler_output: Dict[str, Any]) -> Dict[str, str]
Return a mapping of contract name to relative path as defined in compiler output.
4.818773
3.37498
1.427793
if "../" in path: raise ManifestBuildingError( f"Path: {path} appears to be outside of the virtual source tree. " "Please make sure all sources are within the virtual source tree root directory." ) if path[:2] != "./": return f"./{path}" return path
def make_path_relative(path: str) -> str
Returns the given path prefixed with "./" if the path is not already relative in the compiler output.
7.862459
6.247543
1.258488
contract_type_fields = { "contract_type": contract_type, "deployment_bytecode": deployment_bytecode, "runtime_bytecode": runtime_bytecode, "abi": abi, "natspec": natspec, "compiler": compiler, } selected_fields = [k for k, v in contract_type_fields.items() if v] return _contract_type(name, compiler_output, alias, selected_fields)
def contract_type( name: str, compiler_output: Dict[str, Any], alias: Optional[str] = None, abi: Optional[bool] = False, compiler: Optional[bool] = False, contract_type: Optional[bool] = False, deployment_bytecode: Optional[bool] = False, natspec: Optional[bool] = False, runtime_bytecode: Optional[bool] = False, ) -> Manifest
Returns a copy of manifest with added contract_data field as specified by kwargs. If no kwargs are present, all available contract_data found in the compiler output will be included. To include specific contract_data fields, add kwarg set to True (i.e. `abi=True`) To alias a contract_type, include a kwarg `alias` (i.e. `alias="OwnedAlias"`) If only an alias kwarg is provided, all available contract data will be included. Kwargs must match fields as defined in the EthPM Spec (except "alias") if user wants to include them in custom contract_type.
2.079057
2.264912
0.917942
for field in selected_fields: if field in all_type_data: yield field, all_type_data[field] else: raise ManifestBuildingError( f"Selected field: {field} not available in data collected from solc output: " f"{list(sorted(all_type_data.keys()))}. Please make sure the relevant data " "is present in your solc output." )
def filter_all_data_by_selected_fields( all_type_data: Dict[str, Any], selected_fields: List[str] ) -> Iterable[Tuple[str, Any]]
Raises exception if selected field data is not available in the contract type data automatically gathered by normalize_compiler_output. Otherwise, returns the data.
4.173787
3.614731
1.15466
paths_and_names = [ (path, contract_name) for path in compiler_output for contract_name in compiler_output[path].keys() ] paths, names = zip(*paths_and_names) if len(names) != len(set(names)): raise ManifestBuildingError( "Duplicate contract names were found in the compiler output." ) return { name: normalize_contract_type(compiler_output[path][name]) for path, name in paths_and_names }
def normalize_compiler_output(compiler_output: Dict[str, Any]) -> Dict[str, Any]
Return compiler output with normalized fields for each contract type, as specified in `normalize_contract_type`.
3.042522
2.807451
1.083731
yield "abi", contract_type_data["abi"] if "evm" in contract_type_data: if "bytecode" in contract_type_data["evm"]: yield "deployment_bytecode", normalize_bytecode_object( contract_type_data["evm"]["bytecode"] ) if "deployedBytecode" in contract_type_data["evm"]: yield "runtime_bytecode", normalize_bytecode_object( contract_type_data["evm"]["deployedBytecode"] ) if any(key in contract_type_data for key in NATSPEC_FIELDS): natspec = deep_merge_dicts( contract_type_data.get("userdoc", {}), contract_type_data.get("devdoc", {}) ) yield "natspec", natspec # make sure metadata isn't an empty string in solc output if "metadata" in contract_type_data and contract_type_data["metadata"]: yield "compiler", normalize_compiler_object( json.loads(contract_type_data["metadata"]) )
def normalize_contract_type( contract_type_data: Dict[str, Any] ) -> Iterable[Tuple[str, Any]]
Serialize contract_data found in compiler output to the defined fields.
2.463265
2.370378
1.039186
all_offsets = [y for x in link_refs.values() for y in x.values()] # Link ref validation. validate_link_ref_fns = ( validate_link_ref(ref["start"] * 2, ref["length"] * 2) for ref in concat(all_offsets) ) pipe(bytecode, *validate_link_ref_fns) # Convert link_refs in bytecode to 0's link_fns = ( replace_link_ref_in_bytecode(ref["start"] * 2, ref["length"] * 2) for ref in concat(all_offsets) ) processed_bytecode = pipe(bytecode, *link_fns) return add_0x_prefix(processed_bytecode)
def process_bytecode(link_refs: Dict[str, Any], bytecode: bytes) -> str
Replace link_refs in bytecode with 0's.
4.205227
3.538317
1.188482
return _deployment_type( contract_instance, contract_type, deployment_bytecode, runtime_bytecode, compiler, )
def deployment_type( *, contract_instance: str, contract_type: str, deployment_bytecode: Dict[str, Any] = None, runtime_bytecode: Dict[str, Any] = None, compiler: Dict[str, Any] = None, ) -> Manifest
Returns a callable that allows the user to add deployments of the same type across multiple chains.
2.319232
2.430945
0.954045
return _deployment( contract_instance, contract_type, deployment_bytecode, runtime_bytecode, compiler, block_uri, address, transaction, block, )
def deployment( *, block_uri: URI, contract_instance: str, contract_type: str, address: HexStr, transaction: HexStr = None, block: HexStr = None, deployment_bytecode: Dict[str, Any] = None, runtime_bytecode: Dict[str, Any] = None, compiler: Dict[str, Any] = None, ) -> Manifest
Returns a manifest, with the newly included deployment. Requires a valid blockchain URI, however no validation is provided that this URI is unique amongst the other deployment URIs, so the user must take care that each blockchain URI represents a unique blockchain.
2.378958
2.654021
0.89636
yield "contract_type", contract_type yield "address", to_hex(address) if deployment_bytecode: yield "deployment_bytecode", deployment_bytecode if compiler: yield "compiler", compiler if tx: yield "transaction", tx if block: yield "block", block if runtime_bytecode: yield "runtime_bytecode", runtime_bytecode
def _build_deployments_object( contract_type: str, deployment_bytecode: Dict[str, Any], runtime_bytecode: Dict[str, Any], compiler: Dict[str, Any], address: HexStr, tx: HexStr, block: HexStr, manifest: Dict[str, Any], ) -> Iterable[Tuple[str, Any]]
Returns a dict with properly formatted deployment data.
1.828822
1.874161
0.975809
return { "package_name": package_name, "version": version, "manifest_version": manifest_version, }
def init_manifest( package_name: str, version: str, manifest_version: Optional[str] = "2" ) -> Dict[str, Any]
Returns an initial dict with the minimal requried fields for a valid manifest. Should only be used as the first fn to be piped into a `build()` pipeline.
2.224052
2.075308
1.071673
return _write_to_disk(manifest_root_dir, manifest_name, prettify)
def write_to_disk( manifest_root_dir: Optional[Path] = None, manifest_name: Optional[str] = None, prettify: Optional[bool] = False, ) -> Manifest
Write the active manifest to disk Defaults - Writes manifest to cwd unless Path is provided as manifest_root_dir. - Writes manifest with a filename of Manifest[version].json unless a desired manifest name (which must end in json) is provided as manifest_name. - Writes the minified manifest version to disk unless prettify is set to True.
3.257989
3.865169
0.84291
contents = format_manifest(manifest, prettify=prettify) with tempfile.NamedTemporaryFile() as temp: temp.write(to_bytes(text=contents)) temp.seek(0) return backend.pin_assets(Path(temp.name))
def pin_to_ipfs( manifest: Manifest, *, backend: BaseIPFSBackend, prettify: Optional[bool] = False ) -> List[Dict[str, str]]
Returns the IPFS pin data after pinning the manifest to the provided IPFS Backend. `pin_to_ipfs()` Should *always* be the last argument in a builder, as it will return the pin data and not the manifest.
4.286696
4.982472
0.860355
argparser = argparse.ArgumentParser(fromfile_prefix_chars='@') argparser.add_argument('-H', '--host', help='Host to which the app binds. [%(default)s]', default='0.0.0.0') argparser.add_argument('-p', '--port', help='Port to which the app binds. [%(default)s]', default=5000, type=int) argparser.add_argument('-o', '--output', help='Logging output. [%(default)s]') argparser.add_argument('-n', '--name', help='Service name. This will be used as prefix for all endpoints. [%(default)s]', default=default_name) argparser.add_argument('--debug', help='Run the app in debug mode. [%(default)s]', action='store_true') argparser.add_argument('--eventbus-host', help='Hostname at which the eventbus can be reached [%(default)s]', default='eventbus') argparser.add_argument('--eventbus-port', help='Port at which the eventbus can be reached [%(default)s]', default=5672, type=int) return argparser
def create_parser(default_name: str) -> argparse.ArgumentParser
Creates the default brewblox_service ArgumentParser. Service-agnostic arguments are added. The parser allows calling code to add additional arguments before using it in create_app() Args: default_name (str): default value for the --name commandline argument. Returns: argparse.ArgumentParser: a Python ArgumentParser with defaults set.
2.033455
2.012395
1.010465
if parser is None: assert default_name, 'Default service name is required' parser = create_parser(default_name) args = parser.parse_args(raw_args) _init_logging(args) LOGGER.info(f'Creating [{args.name}] application') app = web.Application() app['config'] = vars(args) return app
def create_app( default_name: str = None, parser: argparse.ArgumentParser = None, raw_args: List[str] = None ) -> web.Application
Creates and configures an Aiohttp application. Args: default_name (str, optional): Default value for the --name commandline argument. This value is required if `parser` is not provided. This value will be ignored if `parser` is provided. parser (argparse.ArgumentParser, optional): Application-specific parser. If not provided, the return value of `create_parser()` will be used. raw_args (list of str, optional): Explicit commandline arguments. Defaults to sys.argv[1:] Returns: web.Application: A configured Aiohttp Application object. This Application must be furnished, and is not yet running.
3.428429
3.814733
0.898734
app_name = app['config']['name'] prefix = '/' + app_name.lstrip('/') app.router.add_routes(routes) cors_middleware.enable_cors(app) # Configure CORS and prefixes on all endpoints. known_resources = set() for route in list(app.router.routes()): if route.resource in known_resources: continue known_resources.add(route.resource) route.resource.add_prefix(prefix) # Configure swagger settings # We set prefix explicitly here aiohttp_swagger.setup_swagger(app, swagger_url=prefix + '/api/doc', description='', title=f'Brewblox Service "{app_name}"', api_version='0.0', contact='development@brewpi.com') LOGGER.info('Service info: ' + getenv('SERVICE_INFO', 'UNKNOWN')) for route in app.router.routes(): LOGGER.info(f'Endpoint [{route.method}] {route.resource}') for name, impl in app.get(features.FEATURES_KEY, {}).items(): LOGGER.info(f'Feature [{name}] {impl}')
def furnish(app: web.Application)
Configures Application routes, readying it for running. This function modifies routes and resources that were added by calling code, and must be called immediately prior to `run(app)`. Args: app (web.Application): The Aiohttp Application as created by `create_app()`
5.701588
5.778715
0.986653
host = app['config']['host'] port = app['config']['port'] # starts app. run_app() will automatically start the async context. web.run_app(app, host=host, port=port)
def run(app: web.Application)
Runs the application in an async context. This function will block indefinitely until the application is shut down. Args: app (web.Application): The Aiohttp Application as created by `create_app()`
6.300467
6.837298
0.921485
linked_deployments = { dep: data for dep, data in deployments.items() if get_in(("runtime_bytecode", "link_dependencies"), data) } for deployment, data in linked_deployments.items(): if any( link_dep["value"] == deployment for link_dep in data["runtime_bytecode"]["link_dependencies"] ): raise BytecodeLinkingError( f"Link dependency found in {deployment} deployment that references its " "own contract instance, which is disallowed" ) return linked_deployments
def get_linked_deployments(deployments: Dict[str, Any]) -> Dict[str, Any]
Returns all deployments found in a chain URI's deployment data that contain link dependencies.
5.045632
4.422785
1.140827
offsets, values = zip(*link_deps) for idx, offset in enumerate(offsets): value = values[idx] # https://github.com/python/mypy/issues/4975 offset_value = int(offset) dep_length = len(value) end_of_bytes = offset_value + dep_length # Ignore b/c whitespace around ':' conflict b/w black & flake8 actual_bytes = bytecode[offset_value:end_of_bytes] # noqa: E203 if actual_bytes != values[idx]: raise ValidationError( "Error validating linked reference. " f"Offset: {offset} " f"Value: {values[idx]} " f"Bytecode: {bytecode} ." )
def validate_linked_references( link_deps: Tuple[Tuple[int, bytes], ...], bytecode: bytes ) -> None
Validates that normalized linked_references (offset, expected_bytes) match the corresponding bytecode.
4.52761
4.306994
1.051223
for deployment in data: for offset in deployment["offsets"]: yield offset, deployment["type"], deployment["value"]
def normalize_linked_references( data: List[Dict[str, Any]] ) -> Generator[Tuple[int, str, str], None, None]
Return a tuple of information representing all insertions of a linked reference. (offset, type, value)
8.489205
6.11419
1.388443
# todo: provide hook to lazily look up tx receipt via binary search if missing data for name, data in deployments.items(): if "transaction" in data: tx_hash = data["transaction"] tx_receipt = w3.eth.getTransactionReceipt(tx_hash) # tx_address will be None if contract created via contract factory tx_address = tx_receipt["contractAddress"] if tx_address is None and allow_missing_data is False: raise ValidationError( "No contract address found in tx receipt. Unable to verify " "address found in tx receipt matches address in manifest's deployment data. " "If this validation is not necessary, please enable `allow_missing_data` arg. " ) if tx_address is not None and not is_same_address( tx_address, data["address"] ): raise ValidationError( f"Error validating tx_receipt for {name} deployment. " f"Address found in manifest's deployment data: {data['address']} " f"Does not match address found on tx_receipt: {tx_address}." ) if "block" in data: if tx_receipt["blockHash"] != to_bytes(hexstr=data["block"]): raise ValidationError( f"Error validating tx_receipt for {name} deployment. " f"Block found in manifest's deployment data: {data['block']} does not " f"Does not match block found on tx_receipt: {tx_receipt['blockHash']}." ) elif allow_missing_data is False: raise ValidationError( "No block hash found in deployment data. " "Unable to verify block hash on tx receipt. " "If this validation is not necessary, please enable `allow_missing_data` arg." ) elif allow_missing_data is False: raise ValidationError( "No transaction hash found in deployment data. " "Unable to validate tx_receipt. " "If this validation is not necessary, please enable `allow_missing_data` arg." )
def validate_deployments_tx_receipt( deployments: Dict[str, Any], w3: Web3, allow_missing_data: bool = False ) -> None
Validate that address and block hash found in deployment data match what is found on-chain. :allow_missing_data: by default, enforces validation of address and blockHash.
2.716045
2.566055
1.058452
for link_ref in link_refs: for offset in link_ref["offsets"]: try: validate_empty_bytes(offset, link_ref["length"], bytecode) except ValidationError: return True return False
def is_prelinked_bytecode(bytecode: bytes, link_refs: List[Dict[str, Any]]) -> bool
Returns False if all expected link_refs are unlinked, otherwise returns True. todo support partially pre-linked bytecode (currently all or nothing)
4.835971
4.091491
1.181958
if link_refs is None: return bytecode link_fns = ( apply_link_ref(offset, ref["length"], attr_dict[ref["name"]]) for ref in link_refs for offset in ref["offsets"] ) linked_bytecode = pipe(bytecode, *link_fns) return linked_bytecode
def apply_all_link_refs( bytecode: bytes, link_refs: List[Dict[str, Any]], attr_dict: Dict[str, str] ) -> bytes
Applies all link references corresponding to a valid attr_dict to the bytecode.
3.918962
3.480247
1.126059
try: validate_empty_bytes(offset, length, bytecode) except ValidationError: raise BytecodeLinkingError("Link references cannot be applied to bytecode") new_bytes = ( # Ignore linting error b/c conflict b/w black & flake8 bytecode[:offset] + value + bytecode[offset + length :] # noqa: E201, E203 ) return new_bytes
def apply_link_ref(offset: int, length: int, value: bytes, bytecode: bytes) -> bytes
Returns the new bytecode with `value` put into the location indicated by `offset` and `length`.
7.233094
6.498395
1.113059
if not cls.unlinked_references and not cls.linked_references: raise BytecodeLinkingError("Contract factory has no linkable bytecode.") if not cls.needs_bytecode_linking: raise BytecodeLinkingError( "Bytecode for this contract factory does not require bytecode linking." ) cls.validate_attr_dict(attr_dict) bytecode = apply_all_link_refs(cls.bytecode, cls.unlinked_references, attr_dict) runtime = apply_all_link_refs( cls.bytecode_runtime, cls.linked_references, attr_dict ) linked_class = cls.factory( cls.web3, bytecode_runtime=runtime, bytecode=bytecode ) if linked_class.needs_bytecode_linking: raise BytecodeLinkingError( "Expected class to be fully linked, but class still needs bytecode linking." ) return linked_class
def link_bytecode(cls, attr_dict: Dict[str, str]) -> Type["LinkableContract"]
Return a cloned contract factory with the deployment / runtime bytecode linked. :attr_dict: Dict[`ContractType`: `Address`] for all deployment and runtime link references.
3.568019
3.242904
1.100254
attr_dict_names = list(attr_dict.keys()) if not self.unlinked_references and not self.linked_references: raise BytecodeLinkingError( "Unable to validate attr dict, this contract has no linked/unlinked references." ) unlinked_refs = self.unlinked_references or ({},) linked_refs = self.linked_references or ({},) all_link_refs = unlinked_refs + linked_refs all_link_names = [ref["name"] for ref in all_link_refs] if set(attr_dict_names) != set(all_link_names): raise BytecodeLinkingError( "All link references must be defined when calling " "`link_bytecode` on a contract factory." ) for address in attr_dict.values(): if not is_canonical_address(address): raise BytecodeLinkingError( f"Address: {address} as specified in the attr_dict is not " "a valid canoncial address." )
def validate_attr_dict(self, attr_dict: Dict[str, str]) -> None
Validates that ContractType keys in attr_dict reference existing manifest ContractTypes.
3.598618
3.369988
1.067843
def items(self) -> Tuple[Tuple[str, "Package"], ...]: # type: ignore item_dict = { name: self.build_dependencies.get(name) for name in self.build_dependencies } return tuple(item_dict.items())
Return an iterable containing package name and corresponding `Package` instance that are available.
null
null
null
def values(self) -> List["Package"]: # type: ignore values = [self.build_dependencies.get(name) for name in self.build_dependencies] return values
Return an iterable of the available `Package` instances.
null
null
null
def get_dependency_package( self, package_name: str ) -> "Package": # type: ignore # noqa: F821 self._validate_name(package_name) return self.build_dependencies.get(package_name)
Return the dependency Package for a given package name.
null
null
null
if FEATURES_KEY not in app: app[FEATURES_KEY] = dict() key = key or type(feature) if key in app[FEATURES_KEY]: if exist_ok: return else: raise KeyError(f'Feature "{key}" already registered') app[FEATURES_KEY][key] = feature
def add(app: web.Application, feature: Any, key: Hashable = None, exist_ok: bool = False )
Adds a new feature to the app. Features can either be registered as the default feature for the class, or be given an explicit name. Args: app (web.Application): The current Aiohttp application. feature (Any): The new feature that should be registered. It is recommended, but not required to use a `ServiceFeature`. key (Hashable, optional): The key under which the feature should be registered. Defaults to `type(feature)`. exist_ok (bool): If truthy, this function will do nothing if a feature was already registered for `key`. Otherwise, an exception is raised.
2.713314
2.645083
1.025795
key = key or feature_type if not key: raise AssertionError('No feature identifier provided') try: found = app[FEATURES_KEY][key] except KeyError: raise KeyError(f'No feature found for "{key}"') if feature_type and not isinstance(found, feature_type): raise AssertionError(f'Found {found} did not match type "{feature_type}"') return found
def get(app: web.Application, feature_type: Type[Any] = None, key: Hashable = None ) -> Any
Finds declared feature. Identification is done based on feature type and key. Args: app (web.Application): The current Aiohttp application. feature_type (Type[Any]): The Python type of the desired feature. If specified, it will be checked against the found feature. key (Hashable): A specific identifier for the desired feature. Defaults to `feature_type` Returns: Any: The feature found for the combination of `feature_type` and `key`
3.419882
3.759459
0.909674
if not all(key in contract_data.keys() for key in ("abi", "deployment_bytecode")): raise InsufficientAssetsError( "Minimum required contract data to generate a deployable " "contract factory (abi & deployment_bytecode) not found." )
def validate_minimal_contract_factory_data(contract_data: Dict[str, str]) -> None
Validate that contract data in a package contains at least an "abi" and "deployment_bytecode" necessary to generate a deployable contract factory.
6.384696
3.69643
1.72726
if "abi" in contract_data: yield "abi", contract_data["abi"] if "deployment_bytecode" in contract_data: yield "bytecode", contract_data["deployment_bytecode"]["bytecode"] if "link_references" in contract_data["deployment_bytecode"]: yield "unlinked_references", tuple( contract_data["deployment_bytecode"]["link_references"] ) if "runtime_bytecode" in contract_data: yield "bytecode_runtime", contract_data["runtime_bytecode"]["bytecode"] if "link_references" in contract_data["runtime_bytecode"]: yield "linked_references", tuple( contract_data["runtime_bytecode"]["link_references"] )
def generate_contract_factory_kwargs( contract_data: Dict[str, Any] ) -> Generator[Tuple[str, Any], None, None]
Build a dictionary of kwargs to be passed into contract factory.
1.706231
1.709707
0.997967
args = await request.json() try: await get_publisher(request.app).publish( args['exchange'], args['routing'], args['message'] ) return web.Response() except Exception as ex: warnings.warn(f'Unable to publish {args}: {ex}') return web.Response(body='Event bus connection refused', status=500)
async def post_publish(request)
--- tags: - Events summary: Publish event. description: Publish a new event message to the event bus. operationId: events.publish produces: - text/plain parameters: - in: body name: body description: Event message required: true schema: type: object properties: exchange: type: string routing: type: string message: type: object
4.607201
4.715381
0.977058
args = await request.json() get_listener(request.app).subscribe( args['exchange'], args['routing'] ) return web.Response()
async def post_subscribe(request)
--- tags: - Events summary: Subscribe to events. operationId: events.subscribe produces: - text/plain parameters: - in: body name: body description: Event message required: true schema: type: object properties: exchange: type: string routing: type: string
7.386188
10.727887
0.688503
try: await channel.basic_client_ack(envelope.delivery_tag) await self.on_message(self, envelope.routing_key, json.loads(body)) except Exception as ex: LOGGER.error(f'Exception relaying message in {self}: {ex}')
async def _relay(self, channel: aioamqp.channel.Channel, body: str, envelope: aioamqp.envelope.Envelope, properties: aioamqp.properties.Properties)
Relays incoming messages between the queue and the user callback
3.395058
3.272721
1.037381
if all([ self._loop, not self.running, self._subscriptions or (self._pending and not self._pending.empty()), ]): self._task = self._loop.create_task(self._listen())
def _lazy_listen(self)
Ensures that the listener task only runs when actually needed. This function is a no-op if any of the preconditions is not met. Preconditions are: * The application is running (self._loop is set) * The task is not already running * There are subscriptions: either pending, or active
5.656684
4.40746
1.283434
sub = EventSubscription( exchange_name, routing, exchange_type, on_message=on_message ) if self._pending is not None: self._pending.put_nowait(sub) else: self._pending_pre_async.append(sub) LOGGER.info(f'Deferred event bus subscription: [{sub}]') self._lazy_listen() return sub
def subscribe(self, exchange_name: str, routing: str, exchange_type: ExchangeType_ = 'topic', on_message: EVENT_CALLBACK_ = None ) -> EventSubscription
Adds a new event subscription to the listener. Actual queue declaration to the remote message server is done when connected. If the listener is not currently connected, it defers declaration. All existing subscriptions are redeclared on the remote if `EventListener` loses and recreates the connection. Args: exchange_name (str): Name of the AMQP exchange. Messages are always published to a specific exchange. routing (str): Filter messages passing through the exchange. A routing key is a '.'-separated string, and accepts '#' and '*' wildcards. exchange_type (ExchangeType_, optional): If the exchange does not yet exist, it will be created with this type. Default is `topic`, acceptable values are `topic`, `fanout`, or `direct`. on_message (EVENT_CALLBACK_, optional): The function to be called when a new message is received. If `on_message` is none, it will default to logging the message. Returns: EventSubscription: The newly created subscription. This value can safely be discarded: EventListener keeps its own reference.
4.987219
6.034009
0.826518
try: await self._ensure_channel() except Exception: # If server has restarted since our last attempt, ensure channel will fail (old connection invalid) # Retry once to check whether a new connection can be made await self._ensure_channel() # json.dumps() also correctly handles strings data = json.dumps(message).encode() await self._channel.exchange_declare( exchange_name=exchange, type_name=exchange_type, auto_delete=True ) await self._channel.basic_publish( payload=data, exchange_name=exchange, routing_key=routing )
async def publish(self, exchange: str, routing: str, message: Union[str, dict], exchange_type: ExchangeType_ = 'topic')
Publish a new event message. Connections are created automatically when calling `publish()`, and will attempt to reconnect if connection was lost. For more information on publishing AMQP messages, see https://www.rabbitmq.com/tutorials/tutorial-three-python.html Args: exchange (str): The AMQP message exchange to publish the message to. A new exchange will be created if it does not yet exist. routing (str): The routing identification with which the message should be published. Subscribers use routing information for fine-grained filtering. Routing can be expressed as a '.'-separated path. message (Union[str, dict]): The message body. It will be serialized before transmission. exchange_type (ExchangeType_, optional): When publishing to a previously undeclared exchange, it will be created. `exchange_type` defines how the exchange distributes messages between subscribers. The default is 'topic', and acceptable values are: 'topic', 'direct', or 'fanout'. Raises: aioamqp.exceptions.AioamqpException: * Failed to connect to AMQP host * Failed to send message * `exchange` already exists, but has a different `exchange_type`
5.04992
5.00207
1.009566
# check that authority ends with the tld '.eth' # check that there are either 2 or 3 subdomains in the authority # i.e. zeppelinos.eth or packages.zeppelinos.eth if authority[-4:] != ".eth" or len(authority.split(".")) not in [2, 3]: return False return True
def is_ens_domain(authority: str) -> bool
Return false if authority is not a valid ENS domain.
5.992643
4.895196
1.224189
if isinstance(name, basestring) \ and ':' in name: return tuple(name.split(':', 1)) else: return (None, name)
def splitPrefix(name)
Split the name into a tuple (I{prefix}, I{name}). The first element in the tuple is I{None} when the name does't have a prefix. @param name: A node name containing an optional prefix. @type name: basestring @return: A tuple containing the (2) parts of I{name} @rtype: (I{prefix}, I{name})
3.959623
4.823907
0.820833
nopt = ( lambda x: x ) try: md = d.__metadata__ pmd = getattr(md, '__print__', None) if pmd is None: return item wrappers = getattr(pmd, 'wrappers', {}) fn = wrappers.get(item[0], nopt) return (item[0], fn(item[1])) except: pass return item
def unwrap(self, d, item)
translate (unwrap) using an optional wrapper function
5.654027
5.623501
1.005428
try: md = d.__metadata__ pmd = getattr(md, '__print__', None) if pmd is None: return False excludes = getattr(pmd, 'excludes', []) return ( item[0] in excludes ) except: pass return False
def exclude(self, d, item)
check metadata for excluded items
5.077162
4.537495
1.118935
h = abs(hash(name)) return '%s-%s' % (h, x)
def mangle(self, name, x)
Mangle the name by hashing the I{name} and appending I{x}. @return: the mangled name.
7.977896
9.192563
0.867864