sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def getObjectFromTaskArg(theTask, strict, setAllToDefaults): """ Take the arg (usually called theTask), which can be either a subclass of ConfigObjPars, or a string package name, or a .cfg filename - no matter what it is - take it and return a ConfigObjPars object. strict - bool - warning severity, passed to the ConfigObjPars() ctor setAllToDefaults - bool - if theTask is a pkg name, force all to defaults """ # Already in the form we need (instance of us or of subclass) if isinstance(theTask, ConfigObjPars): if setAllToDefaults: raise RuntimeError('Called getObjectFromTaskArg with existing'+\ ' object AND setAllToDefaults - is unexpected use case.') # If it is an existing object, make sure it's internal param list is # up to date with it's ConfigObj dict, since the user may have manually # edited the dict before calling us. theTask.syncParamList(False) # use strict somehow? # Note - some validation is done here in IrafPar creation, but it is # not the same validation done by the ConfigObj s/w (no check funcs). # Do we want to do that too here? return theTask # For example, a .cfg file if os.path.isfile(str(theTask)): try: return ConfigObjPars(theTask, strict=strict, setAllToDefaults=setAllToDefaults) except KeyError: # this might just be caused by a file sitting in the local cwd with # the same exact name as the package we want to import, let's see if theTask.find('.') > 0: # it has an extension, like '.cfg' raise # this really was an error # else we drop down to the next step - try it as a pkg name # Else it must be a Python package name to load if isinstance(theTask, str) and setAllToDefaults: # NOTE how we pass the task name string in setAllToDefaults return ConfigObjPars('', setAllToDefaults=theTask, strict=strict) else: return getParsObjForPyPkg(theTask, strict)
Take the arg (usually called theTask), which can be either a subclass of ConfigObjPars, or a string package name, or a .cfg filename - no matter what it is - take it and return a ConfigObjPars object. strict - bool - warning severity, passed to the ConfigObjPars() ctor setAllToDefaults - bool - if theTask is a pkg name, force all to defaults
entailment
def getEmbeddedKeyVal(cfgFileName, kwdName, dflt=None): """ Read a config file and pull out the value of a given keyword. """ # Assume this is a ConfigObj file. Use that s/w to quickly read it and # put it in dict format. Assume kwd is at top level (not in a section). # The input may also be a .cfgspc file. # # Only use ConfigObj here as a tool to generate a dict from a file - do # not use the returned object as a ConfigObj per se. As such, we can call # with "simple" format, ie. no cfgspc, no val'n, and "list_values"=False. try: junkObj = configobj.ConfigObj(cfgFileName, list_values=False) except: if kwdName == TASK_NAME_KEY: raise KeyError('Can not parse as a parameter config file: '+ \ '\n\t'+os.path.realpath(cfgFileName)) else: raise KeyError('Unfound key "'+kwdName+'" while parsing: '+ \ '\n\t'+os.path.realpath(cfgFileName)) if kwdName in junkObj: retval = junkObj[kwdName] del junkObj return retval # Not found if dflt is not None: del junkObj return dflt else: if kwdName == TASK_NAME_KEY: raise KeyError('Can not parse as a parameter config file: '+ \ '\n\t'+os.path.realpath(cfgFileName)) else: raise KeyError('Unfound key "'+kwdName+'" while parsing: '+ \ '\n\t'+os.path.realpath(cfgFileName))
Read a config file and pull out the value of a given keyword.
entailment
def findCfgFileForPkg(pkgName, theExt, pkgObj=None, taskName=None): """ Locate the configuration files for/from/within a given python package. pkgName is a string python package name. This is used unless pkgObj is given, in which case pkgName is taken from pkgObj.__name__. theExt is either '.cfg' or '.cfgspc'. If the task name is known, it is given as taskName, otherwise one is determined using the pkgName. Returns a tuple of (package-object, cfg-file-name). """ # arg check ext = theExt if ext[0] != '.': ext = '.'+theExt # Do the import, if needed pkgsToTry = {} if pkgObj: pkgsToTry[pkgObj.__name__] = pkgObj else: # First try something simple like a regular or dotted import try: fl = [] if pkgName.find('.') > 0: fl = [ pkgName[:pkgName.rfind('.')], ] pkgsToTry[str(pkgName)] = __import__(str(pkgName), fromlist=fl) except: throwIt = True # One last case to try is something like "csc_kill" from # "acstools.csc_kill", but this convenience capability will only be # allowed if the parent pkg (acstools) has already been imported. if isinstance(pkgName, string_types) and pkgName.find('.') < 0: matches = [x for x in sys.modules.keys() \ if x.endswith("."+pkgName)] if len(matches)>0: throwIt = False for mmm in matches: pkgsToTry[mmm] = sys.modules[mmm] if throwIt: raise NoCfgFileError("Unfound package or "+ext+" file via: "+\ "import "+str(pkgName)) # Now that we have the package object (or a few of them to try), for each # one find the .cfg or .cfgspc file, and return # Return as soon as ANY match is found. for aPkgName in pkgsToTry: aPkg = pkgsToTry[aPkgName] path = os.path.dirname(aPkg.__file__) if len(path) < 1: path = '.' flist = irafutils.rglob(path, "*"+ext) if len(flist) < 1: continue # Go through these and find the first one for the assumed or given task # name. The task name for 'BigBlackBox.drizzle' would be 'drizzle'. if taskName is None: taskName = aPkgName.split(".")[-1] flist.sort() for f in flist: # A .cfg file gets checked for _task_name_=val, but a .cfgspc file # will have a string check function signature as the val. if ext == '.cfg': itsTask = getEmbeddedKeyVal(f, TASK_NAME_KEY, '') else: # .cfgspc sigStr = getEmbeddedKeyVal(f, TASK_NAME_KEY, '') # .cfgspc file MUST have an entry for TASK_NAME_KEY w/ a default itsTask = vtor_checks.sigStrToKwArgsDict(sigStr)['default'] if itsTask == taskName: # We've found the correct file in an installation area. Return # the package object and the found file. return aPkg, f # What, are you still here? raise NoCfgFileError('No valid '+ext+' files found in package: "'+ \ str(pkgName)+'" for task: "'+str(taskName)+'"')
Locate the configuration files for/from/within a given python package. pkgName is a string python package name. This is used unless pkgObj is given, in which case pkgName is taken from pkgObj.__name__. theExt is either '.cfg' or '.cfgspc'. If the task name is known, it is given as taskName, otherwise one is determined using the pkgName. Returns a tuple of (package-object, cfg-file-name).
entailment
def findAllCfgTasksUnderDir(aDir): """ Finds all installed tasks by examining any .cfg files found on disk at and under the given directory, as an installation might be. This returns a dict of { file name : task name } """ retval = {} for f in irafutils.rglob(aDir, '*.cfg'): retval[f] = getEmbeddedKeyVal(f, TASK_NAME_KEY, '') return retval
Finds all installed tasks by examining any .cfg files found on disk at and under the given directory, as an installation might be. This returns a dict of { file name : task name }
entailment
def getCfgFilesInDirForTask(aDir, aTask, recurse=False): """ This is a specialized function which is meant only to keep the same code from needlessly being much repeated throughout this application. This must be kept as fast and as light as possible. This checks a given directory for .cfg files matching a given task. If recurse is True, it will check subdirectories. If aTask is None, it returns all files and ignores aTask. """ if recurse: flist = irafutils.rglob(aDir, '*.cfg') else: flist = glob.glob(aDir+os.sep+'*.cfg') if aTask: retval = [] for f in flist: try: if aTask == getEmbeddedKeyVal(f, TASK_NAME_KEY, ''): retval.append(f) except Exception as e: print('Warning: '+str(e)) return retval else: return flist
This is a specialized function which is meant only to keep the same code from needlessly being much repeated throughout this application. This must be kept as fast and as light as possible. This checks a given directory for .cfg files matching a given task. If recurse is True, it will check subdirectories. If aTask is None, it returns all files and ignores aTask.
entailment
def getParsObjForPyPkg(pkgName, strict): """ Locate the appropriate ConfigObjPars (or subclass) within the given package. NOTE this begins the same way as getUsrCfgFilesForPyPkg(). Look for .cfg file matches in these places, in this order: 1 - any named .cfg file in current directory matching given task 2 - if there exists a ~/.teal/<taskname>.cfg file 3 - any named .cfg file in SOME*ENV*VAR directory matching given task 4 - the installed default .cfg file (with the given package) """ # Get the python package and it's .cfg file - need this no matter what installedPkg, installedFile = findCfgFileForPkg(pkgName, '.cfg') theFile = None tname = getEmbeddedKeyVal(installedFile, TASK_NAME_KEY) # See if the user has any of their own .cfg files in the cwd for this task if theFile is None: flist = getCfgFilesInDirForTask(os.getcwd(), tname) if len(flist) > 0: if len(flist) == 1: # can skip file times sort theFile = flist[0] else: # There are a few different choices. In the absence of # requirements to the contrary, just take the latest. Set up a # list of tuples of (mtime, fname) so we can sort by mtime. ftups = [ (os.stat(f)[stat.ST_MTIME], f) for f in flist] ftups.sort() theFile = ftups[-1][1] # See if the user has any of their own app-dir .cfg files for this task if theFile is None: flist = getCfgFilesInDirForTask(getAppDir(), tname) # verifies tname flist = [f for f in flist if os.path.basename(f) == tname+'.cfg'] if len(flist) > 0: theFile = flist[0] assert len(flist) == 1, str(flist) # should never happen # Add code to check an env. var defined area? (speak to users first) # Did we find one yet? If not, use the installed version useInstVer = False if theFile is None: theFile = installedFile useInstVer = True # Create a stand-in instance from this file. Force a read-only situation # if we are dealing with the installed, (expected to be) unwritable file. return ConfigObjPars(theFile, associatedPkg=installedPkg, forceReadOnly=useInstVer, strict=strict)
Locate the appropriate ConfigObjPars (or subclass) within the given package. NOTE this begins the same way as getUsrCfgFilesForPyPkg(). Look for .cfg file matches in these places, in this order: 1 - any named .cfg file in current directory matching given task 2 - if there exists a ~/.teal/<taskname>.cfg file 3 - any named .cfg file in SOME*ENV*VAR directory matching given task 4 - the installed default .cfg file (with the given package)
entailment
def getUsrCfgFilesForPyPkg(pkgName): """ See if the user has one of their own local .cfg files for this task, such as might be created automatically during the save of a read-only package, and return their names. """ # Get the python package and it's .cfg file thePkg, theFile = findCfgFileForPkg(pkgName, '.cfg') # See if the user has any of their own local .cfg files for this task tname = getEmbeddedKeyVal(theFile, TASK_NAME_KEY) flist = getCfgFilesInDirForTask(getAppDir(), tname) return flist
See if the user has one of their own local .cfg files for this task, such as might be created automatically during the save of a read-only package, and return their names.
entailment
def checkSetReadOnly(fname, raiseOnErr = False): """ See if we have write-privileges to this file. If we do, and we are not supposed to, then fix that case. """ if os.access(fname, os.W_OK): # We can write to this but it is supposed to be read-only. Fix it. # Take away usr-write, leave group and other alone, though it # may be simpler to just force/set it to: r--r--r-- or r-------- irafutils.setWritePrivs(fname, False, ignoreErrors= not raiseOnErr)
See if we have write-privileges to this file. If we do, and we are not supposed to, then fix that case.
entailment
def flattenDictTree(aDict): """ Takes a dict of vals and dicts (so, a tree) as input, and returns a flat dict (only one level) as output. All key-vals are moved to the top level. Sub-section dict names (keys) are ignored/dropped. If there are name collisions, an error is raised. """ retval = {} for k in aDict: val = aDict[k] if isinstance(val, dict): # This val is a dict, get its data (recursively) into a flat dict subDict = flattenDictTree(val) # Merge its dict of data into ours, watching for NO collisions rvKeySet = set(retval.keys()) sdKeySet = set(subDict.keys()) intr = rvKeySet.intersection(sdKeySet) if len(intr) > 0: raise DuplicateKeyError("Flattened dict already has "+ \ "key(s): "+str(list(intr))+" - cannot flatten this.") else: retval.update(subDict) else: if k in retval: raise DuplicateKeyError("Flattened dict already has key: "+\ k+" - cannot flatten this.") else: retval[k] = val return retval
Takes a dict of vals and dicts (so, a tree) as input, and returns a flat dict (only one level) as output. All key-vals are moved to the top level. Sub-section dict names (keys) are ignored/dropped. If there are name collisions, an error is raised.
entailment
def countKey(theDict, name): """ Return the number of times the given par exists in this dict-tree, since the same key name may be used in different sections/sub-sections. """ retval = 0 for key in theDict: val = theDict[key] if isinstance(val, dict): retval += countKey(val, name) # recurse else: if key == name: retval += 1 # can't break, even tho we found a hit, other items on # this level will not be named "name", but child dicts # may have further counts return retval
Return the number of times the given par exists in this dict-tree, since the same key name may be used in different sections/sub-sections.
entailment
def findFirstPar(theDict, name, _depth=0): """ Find the given par. Return tuple: (its own (sub-)dict, its value). Returns the first match found, without checking whether the given key name is unique or whether it is used in multiple sections. """ for key in theDict: val = theDict[key] # print _depth*' ', key, str(val)[:40] if isinstance(val, dict): retval = findFirstPar(val, name, _depth=_depth+1) # recurse if retval is not None: return retval # else keep looking else: if key == name: return theDict, theDict[name] # else keep looking # if we get here then we have searched this whole (sub)-section and its # descendants, and found no matches. only raise if we are at the top. if _depth == 0: raise KeyError(name) else: return None
Find the given par. Return tuple: (its own (sub-)dict, its value). Returns the first match found, without checking whether the given key name is unique or whether it is used in multiple sections.
entailment
def findScopedPar(theDict, scope, name): """ Find the given par. Return tuple: (its own (sub-)dict, its value). """ # Do not search (like findFirstPar), but go right to the correct # sub-section, and pick it up. Assume it is there as stated. if len(scope): theDict = theDict[scope] # ! only goes one level deep - enhance ! return theDict, theDict[name]
Find the given par. Return tuple: (its own (sub-)dict, its value).
entailment
def setPar(theDict, name, value): """ Sets a par's value without having to give its scope/section. """ section, previousVal = findFirstPar(theDict, name) # "section" is the actual object, not a copy section[name] = value
Sets a par's value without having to give its scope/section.
entailment
def mergeConfigObj(configObj, inputDict): """ Merge the inputDict values into an existing given configObj instance. The inputDict is a "flat" dict - it has no sections/sub-sections. The configObj may have sub-sections nested to any depth. This will raise a DuplicateKeyError if one of the inputDict keys is used more than once in configObj (e.g. within two different sub-sections). """ # Expanded upon Warren's version in astrodrizzle # Verify that all inputDict keys in configObj are unique within configObj for key in inputDict: if countKey(configObj, key) > 1: raise DuplicateKeyError(key) # Now update configObj with each inputDict item for key in inputDict: setPar(configObj, key, inputDict[key])
Merge the inputDict values into an existing given configObj instance. The inputDict is a "flat" dict - it has no sections/sub-sections. The configObj may have sub-sections nested to any depth. This will raise a DuplicateKeyError if one of the inputDict keys is used more than once in configObj (e.g. within two different sub-sections).
entailment
def findTheLost(config_file, configspec_file, skipHidden=True): """ Find any lost/missing parameters in this cfg file, compared to what the .cfgspc says should be there. This method is recommended by the ConfigObj docs. Return a stringified list of item errors. """ # do some sanity checking, but don't (yet) make this a serious error if not os.path.exists(config_file): print("ERROR: Config file not found: "+config_file) return [] if not os.path.exists(configspec_file): print("ERROR: Configspec file not found: "+configspec_file) return [] tmpObj = configobj.ConfigObj(config_file, configspec=configspec_file) simval = configobj.SimpleVal() test = tmpObj.validate(simval) if test == True: return [] # If we get here, there is a dict returned of {key1: bool, key2: bool} # which matches the shape of the config obj. We need to walk it to # find the Falses, since they are the missing pars. missing = [] flattened = configobj.flatten_errors(tmpObj, test) # But, before we move on, skip/eliminate any 'hidden' items from our list, # since hidden items are really supposed to be missing from the .cfg file. if len(flattened) > 0 and skipHidden: keepers = [] for tup in flattened: keep = True # hidden section if len(tup[0])>0 and isHiddenName(tup[0][-1]): keep = False # hidden par (in a section, or at the top level) elif tup[1] is not None and isHiddenName(tup[1]): keep = False if keep: keepers.append(tup) flattened = keepers flatStr = flattened2str(flattened, missing=True) return flatStr
Find any lost/missing parameters in this cfg file, compared to what the .cfgspc says should be there. This method is recommended by the ConfigObj docs. Return a stringified list of item errors.
entailment
def isHiddenName(astr): """ Return True if this string name denotes a hidden par or section """ if astr is not None and len(astr) > 2 and astr.startswith('_') and \ astr.endswith('_'): return True else: return False
Return True if this string name denotes a hidden par or section
entailment
def flattened2str(flattened, missing=False, extra=False): """ Return a pretty-printed multi-line string version of the output of flatten_errors. Know that flattened comes in the form of a list of keys that failed. Each member of the list is a tuple:: ([list of sections...], key, result) so we turn that into a string. Set missing to True if all the input problems are from missing items. Set extra to True if all the input problems are from extra items. """ if flattened is None or len(flattened) < 1: return '' retval = '' for sections, key, result in flattened: # Name the section and item, to start the message line if sections is None or len(sections) == 0: retval += '\t"'+key+'"' elif len(sections) == 1: if key is None: # a whole section is missing at the top-level; see if hidden junk = sections[0] if isHiddenName(junk): continue # this missing or extra section is not an error else: retval += '\tSection "'+sections[0]+'"' else: retval += '\t"'+sections[0]+'.'+key+'"' else: # len > 1 joined = '.'.join(sections) joined = '"'+joined+'"' if key is None: retval += '\tSection '+joined else: retval += '\t"'+key+'" from '+joined # End the msg line with "what seems to be the trouble" with this one if missing and result==False: retval += ' is missing.' elif extra: if result: retval += ' is an unexpected section. Is your file out of date?' else: retval += ' is an unexpected parameter. Is your file out of date?' elif isinstance(result, bool): retval += ' has an invalid value' else: retval += ' is invalid, '+result.message retval += '\n\n' return retval.rstrip()
Return a pretty-printed multi-line string version of the output of flatten_errors. Know that flattened comes in the form of a list of keys that failed. Each member of the list is a tuple:: ([list of sections...], key, result) so we turn that into a string. Set missing to True if all the input problems are from missing items. Set extra to True if all the input problems are from extra items.
entailment
def getDefaultSaveFilename(self, stub=False): """ Return name of file where we are expected to be saved if no files for this task have ever been saved, and the user wishes to save. If stub is True, the result will be <dir>/<taskname>_stub.cfg instead of <dir>/<taskname>.cfg. """ if stub: return self._rcDir+os.sep+self.__taskName+'_stub.cfg' else: return self._rcDir+os.sep+self.__taskName+'.cfg'
Return name of file where we are expected to be saved if no files for this task have ever been saved, and the user wishes to save. If stub is True, the result will be <dir>/<taskname>_stub.cfg instead of <dir>/<taskname>.cfg.
entailment
def syncParamList(self, firstTime, preserve_order=True): """ Set or reset the internal param list from the dict's contents. """ # See the note in setParam about this design. # Get latest par values from dict. Make sure we do not # change the id of the __paramList pointer here. new_list = self._getParamsFromConfigDict(self, initialPass=firstTime) # dumpCfgspcTo=sys.stdout) # Have to add this odd last one for the sake of the GUI (still?) if self._forUseWithEpar: new_list.append(basicpar.IrafParS(['$nargs','s','h','N'])) if len(self.__paramList) > 0 and preserve_order: # Here we have the most up-to-date data from the actual data # model, the ConfigObj dict, and we need to use it to fill in # our param list. BUT, we need to preserve the order our list # has had up until now (by unique parameter name). namesInOrder = [p.fullName() for p in self.__paramList] assert len(namesInOrder) == len(new_list), \ 'Mismatch in num pars, had: '+str(len(namesInOrder))+ \ ', now we have: '+str(len(new_list))+', '+ \ str([p.fullName() for p in new_list]) self.__paramList[:] = [] # clear list, keep same pointer # create a flat dict view of new_list, for ease of use in next step new_list_dict = {} # can do in one step in v2.7 for par in new_list: new_list_dict[par.fullName()] = par # populate for fn in namesInOrder: self.__paramList.append(new_list_dict[fn]) else: # Here we just take the data in whatever order it came. self.__paramList[:] = new_list
Set or reset the internal param list from the dict's contents.
entailment
def getDefaultParList(self): """ Return a par list just like ours, but with all default values. """ # The code below (create a new set-to-dflts obj) is correct, but it # adds a tenth of a second to startup. Clicking "Defaults" in the # GUI does not call this. But this can be used to set the order seen. # But first check for rare case of no cfg file name if self.filename is None: # this is a .cfgspc-only kind of object so far self.filename = self.getDefaultSaveFilename(stub=True) return copy.deepcopy(self.__paramList) tmpObj = ConfigObjPars(self.filename, associatedPkg=self.__assocPkg, setAllToDefaults=True, strict=False) return tmpObj.getParList()
Return a par list just like ours, but with all default values.
entailment
def setParam(self, name, val, scope='', check=1, idxHint=None): """ Find the ConfigObj entry. Update the __paramList. """ theDict, oldVal = findScopedPar(self, scope, name) # Set the value, even if invalid. It needs to be set before # the validation step (next). theDict[name] = val # If need be, check the proposed value. Ideally, we'd like to # (somehow elegantly) only check this one item. For now, the best # shortcut is to only validate this section. if check: ans=self.validate(self._vtor, preserve_errors=True, section=theDict) if ans != True: flatStr = "All values are invalid!" if ans != False: flatStr = flattened2str(configobj.flatten_errors(self, ans)) raise RuntimeError("Validation error: "+flatStr) # Note - this design needs work. Right now there are two copies # of the data: the ConfigObj dict, and the __paramList ... # We rely on the idxHint arg so we don't have to search the __paramList # every time this is called, which could really slows things down. assert idxHint is not None, "ConfigObjPars relies on a valid idxHint" assert name == self.__paramList[idxHint].name, \ 'Error in setParam, name: "'+name+'" != name at idxHint: "'+\ self.__paramList[idxHint].name+'", idxHint: '+str(idxHint) self.__paramList[idxHint].set(val)
Find the ConfigObj entry. Update the __paramList.
entailment
def saveParList(self, *args, **kw): """Write parameter data to filename (string or filehandle)""" if 'filename' in kw: filename = kw['filename'] if not filename: filename = self.getFilename() if not filename: raise ValueError("No filename specified to save parameters") if hasattr(filename,'write'): fh = filename absFileName = os.path.abspath(fh.name) else: absFileName = os.path.expanduser(filename) absDir = os.path.dirname(absFileName) if len(absDir) and not os.path.isdir(absDir): os.makedirs(absDir) fh = open(absFileName,'w') numpars = len(self.__paramList) if self._forUseWithEpar: numpars -= 1 if not self.final_comment: self.final_comment = [''] # force \n at EOF # Empty the ConfigObj version of section.defaults since that is based # on an assumption incorrect for us, and override with our own list. # THIS IS A BIT OF MONKEY-PATCHING! WATCH FUTURE VERSION CHANGES! # See Trac ticket #762. while len(self.defaults): self.defaults.pop(-1) # empty it, keeping ref for key in self._neverWrite: self.defaults.append(key) # Note also that we are only overwriting the top/main section's # "defaults" list, but EVERY [sub-]section has such an attribute... # Now write to file, delegating work to ConfigObj (note that ConfigObj # write() skips any items listed by name in the self.defaults list) self.write(fh) fh.close() retval = str(numpars) + " parameters written to " + absFileName self.filename = absFileName # reset our own ConfigObj filename attr self.debug('Keys not written: '+str(self.defaults)) return retval
Write parameter data to filename (string or filehandle)
entailment
def run(self, *args, **kw): """ This may be overridden by a subclass. """ if self._runFunc is not None: # remove the two args sent by EditParDialog which we do not use if 'mode' in kw: kw.pop('mode') if '_save' in kw: kw.pop('_save') return self._runFunc(self, *args, **kw) else: raise taskpars.NoExecError('No way to run task "'+self.__taskName+\ '". You must either override the "run" method in your '+ \ 'ConfigObjPars subclass, or you must supply a "run" '+ \ 'function in your package.')
This may be overridden by a subclass.
entailment
def triggerLogicToStr(self): """ Print all the trigger logic to a string and return it. """ try: import json except ImportError: return "Cannot dump triggers/dependencies/executes (need json)" retval = "TRIGGERS:\n"+json.dumps(self._allTriggers, indent=3) retval += "\nDEPENDENCIES:\n"+json.dumps(self._allDepdcs, indent=3) retval += "\nTO EXECUTE:\n"+json.dumps(self._allExecutes, indent=3) retval += "\n" return retval
Print all the trigger logic to a string and return it.
entailment
def _findAssociatedConfigSpecFile(self, cfgFileName): """ Given a config file, find its associated config-spec file, and return the full pathname of the file. """ # Handle simplest 2 cases first: co-located or local .cfgspc file retval = "."+os.sep+self.__taskName+".cfgspc" if os.path.isfile(retval): return retval retval = os.path.dirname(cfgFileName)+os.sep+self.__taskName+".cfgspc" if os.path.isfile(retval): return retval # Also try the resource dir retval = self.getDefaultSaveFilename()+'spc' # .cfgspc if os.path.isfile(retval): return retval # Now try and see if there is a matching .cfgspc file in/under an # associated package, if one is defined. if self.__assocPkg is not None: x, theFile = findCfgFileForPkg(None, '.cfgspc', pkgObj = self.__assocPkg, taskName = self.__taskName) return theFile # Finally try to import the task name and see if there is a .cfgspc # file in that directory x, theFile = findCfgFileForPkg(self.__taskName, '.cfgspc', taskName = self.__taskName) if os.path.exists(theFile): return theFile # unfound raise NoCfgFileError('Unfound config-spec file for task: "'+ \ self.__taskName+'"')
Given a config file, find its associated config-spec file, and return the full pathname of the file.
entailment
def _getParamsFromConfigDict(self, cfgObj, scopePrefix='', initialPass=False, dumpCfgspcTo=None): """ Walk the given ConfigObj dict pulling out IRAF-like parameters into a list. Since this operates on a dict this can be called recursively. This is also our chance to find and pull out triggers and such dependencies. """ # init retval = [] if initialPass and len(scopePrefix) < 1: self._posArgs = [] # positional args [2-tuples]: (index,scopedName) # FOR SECURITY: the following 3 chunks of data, # _allTriggers, _allDepdcs, _allExecutes, # are collected ONLY from the .cfgspc file self._allTriggers = {} self._allDepdcs = {} self._allExecutes = {} # start walking ("tell yer story walkin, buddy") # NOTE: this relies on the "in" operator returning keys in the # order that they exist in the dict (which depends on ConfigObj keeping # the order they were found in the original file) for key in cfgObj: val = cfgObj[key] # Do we need to skip this - if not a par, like a rule or something toBeHidden = isHiddenName(key) if toBeHidden: if key not in self._neverWrite and key != TASK_NAME_KEY: self._neverWrite.append(key) # yes TASK_NAME_KEY is hidden, but it IS output to the .cfg # a section if isinstance(val, dict): if not toBeHidden: if len(list(val.keys()))>0 and len(retval)>0: # Here is where we sneak in the section comment # This is so incredibly kludgy (as the code was), it # MUST be revamped eventually! This is for the epar GUI. prevPar = retval[-1] # Use the key (or its comment?) as the section header prevPar.set(prevPar.get('p_prompt')+'\n\n'+key, field='p_prompt', check=0) if dumpCfgspcTo: dumpCfgspcTo.write('\n['+key+']\n') # a logical grouping (append its params) pfx = scopePrefix+'.'+key pfx = pfx.strip('.') retval = retval + self._getParamsFromConfigDict(val, pfx, initialPass, dumpCfgspcTo) # recurse else: # a param fields = [] choicesOrMin = None fields.append(key) # name dtype = 's' cspc = None if cfgObj.configspec: cspc = cfgObj.configspec.get(key) # None if not found chk_func_name = '' chk_args_dict = {} if cspc: chk_func_name = cspc[:cspc.find('(')] chk_args_dict = vtor_checks.sigStrToKwArgsDict(cspc) if chk_func_name.find('option') >= 0: dtype = 's' # convert the choices string to a list (to weed out kwds) x = cspc[cspc.find('(')+1:-1] # just the options() args # cspc e.g.: option_kw("poly5","nearest","linear", default="poly5", comment="Interpolant (poly5,nearest,linear)") x = x.split(',') # tokenize # but! comment value may have commas in it, find it # using it's equal sign, rm all after it has_eq = [i for i in x if i.find('=')>=0] if len(has_eq) > 0: x = x[: x.index(has_eq[0]) ] # rm spaces, extra quotes; rm kywd arg pairs x = [i.strip("' ") for i in x if i.find('=')<0] choicesOrMin = '|'+'|'.join(x)+'|' # IRAF format for enums elif chk_func_name.find('boolean') >= 0: dtype = 'b' elif chk_func_name.find('float_or_') >= 0: dtype = 'r' elif chk_func_name.find('float') >= 0: dtype = 'R' elif chk_func_name.find('integer_or_') >= 0: dtype = 'i' elif chk_func_name.find('integer') >= 0: dtype = 'I' elif chk_func_name.find('action') >= 0: dtype = 'z' fields.append(dtype) fields.append('a') if type(val)==bool: if val: fields.append('yes') else: fields.append('no') else: fields.append(val) fields.append(choicesOrMin) fields.append(None) # Primarily use description from .cfgspc file (0). But, allow # overrides from .cfg file (1) if different. dscrp0 = chk_args_dict.get('comment','').strip() # ok if missing dscrp1 = cfgObj.inline_comments[key] if dscrp1 is None: dscrp1 = '' while len(dscrp1) > 0 and dscrp1[0] in (' ','#'): dscrp1 = dscrp1[1:] # .cfg file comments start with '#' dscrp1 = dscrp1.strip() # Now, decide what to do/say about the descriptions if len(dscrp1) > 0: dscrp = dscrp0 if dscrp0 != dscrp1: # allow override if different dscrp = dscrp1+eparoption.DSCRPTN_FLAG # flag it if initialPass: if dscrp0 == '' and cspc is None: # this is a case where this par isn't in the # .cfgspc; ignore, it is caught/error later pass else: self.debug('Description of "'+key+ \ '" overridden, from: '+repr(dscrp0)+\ ' to: '+repr(dscrp1)) fields.append(dscrp) else: # set the field for the GUI fields.append(dscrp0) # ALSO set it in the dict so it is written to file later cfgObj.inline_comments[key] = '# '+dscrp0 # This little section, while never intended to be used during # normal operation, could save a lot of manual work. if dumpCfgspcTo: junk = cspc junk = key+' = '+junk.strip() if junk.find(' comment=')<0: junk = junk[:-1]+", comment="+ \ repr(irafutils.stripQuotes(dscrp1.strip()))+")" dumpCfgspcTo.write(junk+'\n') # Create the par if not toBeHidden or chk_func_name.find('action')==0: par = basicpar.parFactory(fields, True) par.setScope(scopePrefix) retval.append(par) # else this is a hidden key # The next few items require a fully scoped name absKeyName = scopePrefix+'.'+key # assumed to be unique # Check for pars marked to be positional args if initialPass: pos = chk_args_dict.get('pos') if pos: # we'll sort them later, on demand self._posArgs.append( (int(pos), scopePrefix, key) ) # Check for triggers and/or dependencies if initialPass: # What triggers what? (thats why theres an 's' in the kwd) # try "trigger" (old) if chk_args_dict.get('trigger'): print("WARNING: outdated version of .cfgspc!! for "+ self.__taskName+", 'trigger' unused for "+ absKeyName) # try "triggers" trgs = chk_args_dict.get('triggers') if trgs and len(trgs)>0: # eg. _allTriggers['STEP2.xy'] == ('_rule1_','_rule3_') assert absKeyName not in self._allTriggers, \ 'More than 1 of these in .cfgspc?: '+absKeyName # we force this to always be a sequence if isinstance(trgs, (list,tuple)): self._allTriggers[absKeyName] = trgs else: self._allTriggers[absKeyName] = (trgs,) # try "executes" excs = chk_args_dict.get('executes') if excs and len(excs)>0: # eg. _allExecutes['STEP2.xy'] == ('_rule1_','_rule3_') assert absKeyName not in self._allExecutes, \ 'More than 1 of these in .cfgspc?: '+absKeyName # we force this to always be a sequence if isinstance(excs, (list,tuple)): self._allExecutes[absKeyName] = excs else: self._allExecutes[absKeyName] = (excs,) # Dependencies? (besides these used here, may someday # add: 'range_from', 'warn_if', etc.) depName = None if not depName: depType = 'active_if' depName = chk_args_dict.get(depType) # e.g. =='_rule1_' if not depName: depType = 'inactive_if' depName = chk_args_dict.get(depType) if not depName: depType = 'is_set_by' depName = chk_args_dict.get(depType) if not depName: depType = 'set_yes_if' depName = chk_args_dict.get(depType) if not depName: depType = 'set_no_if' depName = chk_args_dict.get(depType) if not depName: depType = 'is_disabled_by' depName = chk_args_dict.get(depType) # NOTE - the above few lines stops at the first dependency # found (depName) for a given par. If, in the future a # given par can have >1 dependency than we need to revamp!! if depName: # Add to _allDepdcs dict: (val is dict of pars:types) # # e.g. _allDepdcs['_rule1_'] == \ # {'STEP3.ra': 'active_if', # 'STEP3.dec': 'active_if', # 'STEP3.azimuth': 'inactive_if'} if depName in self._allDepdcs: thisRulesDict = self._allDepdcs[depName] assert not absKeyName in thisRulesDict, \ 'Cant yet handle multiple actions for the '+ \ 'same par and the same rule. For "'+depName+ \ '" dict was: '+str(thisRulesDict)+ \ ' while trying to add to it: '+\ str({absKeyName:depType}) thisRulesDict[absKeyName] = depType else: self._allDepdcs[depName] = {absKeyName:depType} # else no dependencies found for this chk_args_dict return retval
Walk the given ConfigObj dict pulling out IRAF-like parameters into a list. Since this operates on a dict this can be called recursively. This is also our chance to find and pull out triggers and such dependencies.
entailment
def getTriggerStrings(self, parScope, parName): """ For a given item (scope + name), return all strings (in a tuple) that it is meant to trigger, if any exist. Returns None is none. """ # The data structure of _allTriggers was chosen for how easily/quickly # this particular access can be made here. fullName = parScope+'.'+parName return self._allTriggers.get(fullName)
For a given item (scope + name), return all strings (in a tuple) that it is meant to trigger, if any exist. Returns None is none.
entailment
def getExecuteStrings(self, parScope, parName): """ For a given item (scope + name), return all strings (in a tuple) that it is meant to execute, if any exist. Returns None is none. """ # The data structure of _allExecutes was chosen for how easily/quickly # this particular access can be made here. fullName = parScope+'.'+parName return self._allExecutes.get(fullName)
For a given item (scope + name), return all strings (in a tuple) that it is meant to execute, if any exist. Returns None is none.
entailment
def getPosArgs(self): """ Return a list, in order, of any parameters marked with "pos=N" in the .cfgspc file. """ if len(self._posArgs) < 1: return [] # The first item in the tuple is the index, so we now sort by it self._posArgs.sort() # Build a return list retval = [] for idx, scope, name in self._posArgs: theDict, val = findScopedPar(self, scope, name) retval.append(val) return retval
Return a list, in order, of any parameters marked with "pos=N" in the .cfgspc file.
entailment
def getKwdArgs(self, flatten = False): """ Return a dict of all normal dict parameters - that is, all parameters NOT marked with "pos=N" in the .cfgspc file. This will also exclude all hidden parameters (metadata, rules, etc). """ # Start with a full deep-copy. What complicates this method is the # idea of sub-sections. This dict can have dicts as values, and so on. dcopy = self.dict() # ConfigObj docs say this is a deep-copy # First go through the dict removing all positional args for idx,scope,name in self._posArgs: theDict, val = findScopedPar(dcopy, scope, name) # 'theDict' may be dcopy, or it may be a dict under it theDict.pop(name) # Then go through the dict removing all hidden items ('_item_name_') for k in list(dcopy.keys()): if isHiddenName(k): dcopy.pop(k) # Done with the nominal operation if not flatten: return dcopy # They have asked us to flatten the structure - to bring all parameters # up to the top level, even if they are in sub-sections. So we look # for values that are dicts. We will throw something if we end up # with name collisions at the top level as a result of this. return flattenDictTree(dcopy)
Return a dict of all normal dict parameters - that is, all parameters NOT marked with "pos=N" in the .cfgspc file. This will also exclude all hidden parameters (metadata, rules, etc).
entailment
def tryValue(self, name, val, scope=''): """ For the given item name (and scope), we are being asked to try the given value to see if it would pass validation. We are not to set it, but just try it. We return a tuple: If it fails, we return: (False, the last known valid value). On success, we return: (True, None). """ # SIMILARITY BETWEEN THIS AND setParam() SHOULD BE CONSOLIDATED! # Set the value, even if invalid. It needs to be set before # the validation step (next). theDict, oldVal = findScopedPar(self, scope, name) if oldVal == val: return (True, None) # assume oldVal is valid theDict[name] = val # Check the proposed value. Ideally, we'd like to # (somehow elegantly) only check this one item. For now, the best # shortcut is to only validate this section. ans=self.validate(self._vtor, preserve_errors=True, section=theDict) # No matter what ans is, immediately return the item to its original # value since we are only checking the value here - not setting. theDict[name] = oldVal # Now see what the validation check said errStr = '' if ans != True: flatStr = "All values are invalid!" if ans != False: flatStr = flattened2str(configobj.flatten_errors(self, ans)) errStr = "Validation error: "+flatStr # for now this info is unused # Done if len(errStr): return (False, oldVal) # was an error else: return (True, None)
For the given item name (and scope), we are being asked to try the given value to see if it would pass validation. We are not to set it, but just try it. We return a tuple: If it fails, we return: (False, the last known valid value). On success, we return: (True, None).
entailment
def listTheExtras(self, deleteAlso): """ Use ConfigObj's get_extra_values() call to find any extra/unknown parameters we may have loaded. Return a string similar to findTheLost. If deleteAlso is True, this will also delete any extra/unknown items. """ # get list of extras extras = configobj.get_extra_values(self) # extras is in format: [(sections, key), (sections, key), ] # but we need: [(sections, key, result), ...] - set all results to # a bool just to make it the right shape. BUT, since we are in # here anyway, make that bool mean something - hide info in it about # whether that extra item is a section (1) or just a single par (0) # # simplified, this is: expanded = [ (x+(abool,)) for x in extras] expanded = [ (x+ \ ( bool(len(x[0])<1 and hasattr(self[x[1]], 'keys')), ) \ ) for x in extras] retval = '' if expanded: retval = flattened2str(expanded, extra=1) # but before we return, delete them (from ourself!) if requested to if deleteAlso: for tup_to_del in extras: target = self # descend the tree to the dict where this items is located. # (this works because target is not a copy (because the dict # type is mutable)) location = tup_to_del[0] for subdict in location: target = target[subdict] # delete it target.pop(tup_to_del[1]) return retval
Use ConfigObj's get_extra_values() call to find any extra/unknown parameters we may have loaded. Return a string similar to findTheLost. If deleteAlso is True, this will also delete any extra/unknown items.
entailment
def get(self): """ Reloads the measurements from the backing store. :return: 200 if success. """ try: self._measurementController.reloadCompletedMeasurements() return None, 200 except: logger.exception("Failed to reload measurements") return str(sys.exc_info()), 500
Reloads the measurements from the backing store. :return: 200 if success.
entailment
def handle(self, data): """ Writes each sample to a csv file. :param data: the samples. :return: """ self.logger.debug("Handling " + str(len(data)) + " data items") for datum in data: if isinstance(datum, dict): # these have to wrapped in a list for python 3.4 due to a change in the implementation # of OrderedDict in python 3.5+ (which means .keys() and .values() are sequences in 3.5+) if self._first: self._csv.writerow(list(datum.keys())) self._first = False self._csv.writerow(list(datum.values())) elif isinstance(datum, list): self._csv.writerow(datum) else: self.logger.warning("Ignoring unsupported data type " + str(type(datum)) + " : " + str(datum))
Writes each sample to a csv file. :param data: the samples. :return:
entailment
def start(self, measurementId): """ Posts to the target to tell it a named measurement is starting. :param measurementId: """ self.sendURL = self.rootURL + measurementId + '/' + self.deviceName self.startResponseCode = self._doPut(self.sendURL)
Posts to the target to tell it a named measurement is starting. :param measurementId:
entailment
def handle(self, data): """ puts the data in the target. :param data: the data to post. :return: """ self.dataResponseCode.append(self._doPut(self.sendURL + '/data', data=data))
puts the data in the target. :param data: the data to post. :return:
entailment
def stop(self, measurementId, failureReason=None): """ informs the target the named measurement has completed :param measurementId: the measurement that has completed. :return: """ if failureReason is None: self.endResponseCode = self._doPut(self.sendURL + "/complete") else: self.endResponseCode = self._doPut(self.sendURL + "/failed", data={'failureReason': failureReason}) self.sendURL = None
informs the target the named measurement has completed :param measurementId: the measurement that has completed. :return:
entailment
def dottedQuadToNum(ip): """ Convert decimal dotted quad string to long integer >>> int(dottedQuadToNum('1 ')) 1 >>> int(dottedQuadToNum(' 1.2')) 16777218 >>> int(dottedQuadToNum(' 1.2.3 ')) 16908291 >>> int(dottedQuadToNum('1.2.3.4')) 16909060 >>> dottedQuadToNum('255.255.255.255') 4294967295 >>> dottedQuadToNum('255.255.255.256') Traceback (most recent call last): ValueError: Not a good dotted-quad IP: 255.255.255.256 """ # import here to avoid it when ip_addr values are not used import socket, struct try: return struct.unpack('!L', socket.inet_aton(ip.strip()))[0] except socket.error: raise ValueError('Not a good dotted-quad IP: %s' % ip) return
Convert decimal dotted quad string to long integer >>> int(dottedQuadToNum('1 ')) 1 >>> int(dottedQuadToNum(' 1.2')) 16777218 >>> int(dottedQuadToNum(' 1.2.3 ')) 16908291 >>> int(dottedQuadToNum('1.2.3.4')) 16909060 >>> dottedQuadToNum('255.255.255.255') 4294967295 >>> dottedQuadToNum('255.255.255.256') Traceback (most recent call last): ValueError: Not a good dotted-quad IP: 255.255.255.256
entailment
def numToDottedQuad(num): """ Convert long int to dotted quad string >>> numToDottedQuad(long(-1)) Traceback (most recent call last): ValueError: Not a good numeric IP: -1 >>> numToDottedQuad(long(1)) '0.0.0.1' >>> numToDottedQuad(long(16777218)) '1.0.0.2' >>> numToDottedQuad(long(16908291)) '1.2.0.3' >>> numToDottedQuad(long(16909060)) '1.2.3.4' >>> numToDottedQuad(long(4294967295)) '255.255.255.255' >>> numToDottedQuad(long(4294967296)) Traceback (most recent call last): ValueError: Not a good numeric IP: 4294967296 """ # import here to avoid it when ip_addr values are not used import socket, struct # no need to intercept here, 4294967295 is fine if num > 4294967295 or num < 0: raise ValueError('Not a good numeric IP: %s' % num) try: return socket.inet_ntoa( struct.pack('!L', long(num))) except (socket.error, struct.error, OverflowError): raise ValueError('Not a good numeric IP: %s' % num)
Convert long int to dotted quad string >>> numToDottedQuad(long(-1)) Traceback (most recent call last): ValueError: Not a good numeric IP: -1 >>> numToDottedQuad(long(1)) '0.0.0.1' >>> numToDottedQuad(long(16777218)) '1.0.0.2' >>> numToDottedQuad(long(16908291)) '1.2.0.3' >>> numToDottedQuad(long(16909060)) '1.2.3.4' >>> numToDottedQuad(long(4294967295)) '255.255.255.255' >>> numToDottedQuad(long(4294967296)) Traceback (most recent call last): ValueError: Not a good numeric IP: 4294967296
entailment
def _is_num_param(names, values, to_float=False): """ Return numbers from inputs or raise VdtParamError. Lets ``None`` pass through. Pass in keyword argument ``to_float=True`` to use float for the conversion rather than int. >>> _is_num_param(('', ''), (0, 1.0)) [0, 1] >>> _is_num_param(('', ''), (0, 1.0), to_float=True) [0.0, 1.0] >>> _is_num_param(('a'), ('a')) # doctest: +SKIP Traceback (most recent call last): VdtParamError: passed an incorrect value "a" for parameter "a". """ fun = to_float and float or int out_params = [] for (name, val) in zip(names, values): if val is None: out_params.append(val) elif isinstance(val, number_or_string_types): try: out_params.append(fun(val)) except ValueError: raise VdtParamError(name, val) else: raise VdtParamError(name, val) return out_params
Return numbers from inputs or raise VdtParamError. Lets ``None`` pass through. Pass in keyword argument ``to_float=True`` to use float for the conversion rather than int. >>> _is_num_param(('', ''), (0, 1.0)) [0, 1] >>> _is_num_param(('', ''), (0, 1.0), to_float=True) [0.0, 1.0] >>> _is_num_param(('a'), ('a')) # doctest: +SKIP Traceback (most recent call last): VdtParamError: passed an incorrect value "a" for parameter "a".
entailment
def is_integer(value, min=None, max=None): """ A check that tests that a given value is an integer (int, or long) and optionally, between bounds. A negative value is accepted, while a float will fail. If the value is a string, then the conversion is done - if possible. Otherwise a VdtError is raised. >>> vtor = Validator() >>> vtor.check('integer', '-1') -1 >>> vtor.check('integer', '0') 0 >>> vtor.check('integer', 9) 9 >>> vtor.check('integer', 'a') # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type. >>> vtor.check('integer', '2.2') # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "2.2" is of the wrong type. >>> vtor.check('integer(10)', '20') 20 >>> vtor.check('integer(max=20)', '15') 15 >>> vtor.check('integer(10)', '9') # doctest: +SKIP Traceback (most recent call last): VdtValueTooSmallError: the value "9" is too small. >>> vtor.check('integer(10)', 9) # doctest: +SKIP Traceback (most recent call last): VdtValueTooSmallError: the value "9" is too small. >>> vtor.check('integer(max=20)', '35') # doctest: +SKIP Traceback (most recent call last): VdtValueTooBigError: the value "35" is too big. >>> vtor.check('integer(max=20)', 35) # doctest: +SKIP Traceback (most recent call last): VdtValueTooBigError: the value "35" is too big. >>> vtor.check('integer(0, 9)', False) 0 """ (min_val, max_val) = _is_num_param(('min', 'max'), (min, max)) if not isinstance(value, int_or_string_types): raise VdtTypeError(value) if isinstance(value, string_types): # if it's a string - does it represent an integer ? try: value = int(value) except ValueError: raise VdtTypeError(value) if (min_val is not None) and (value < min_val): raise VdtValueTooSmallError(value) if (max_val is not None) and (value > max_val): raise VdtValueTooBigError(value) return value
A check that tests that a given value is an integer (int, or long) and optionally, between bounds. A negative value is accepted, while a float will fail. If the value is a string, then the conversion is done - if possible. Otherwise a VdtError is raised. >>> vtor = Validator() >>> vtor.check('integer', '-1') -1 >>> vtor.check('integer', '0') 0 >>> vtor.check('integer', 9) 9 >>> vtor.check('integer', 'a') # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type. >>> vtor.check('integer', '2.2') # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "2.2" is of the wrong type. >>> vtor.check('integer(10)', '20') 20 >>> vtor.check('integer(max=20)', '15') 15 >>> vtor.check('integer(10)', '9') # doctest: +SKIP Traceback (most recent call last): VdtValueTooSmallError: the value "9" is too small. >>> vtor.check('integer(10)', 9) # doctest: +SKIP Traceback (most recent call last): VdtValueTooSmallError: the value "9" is too small. >>> vtor.check('integer(max=20)', '35') # doctest: +SKIP Traceback (most recent call last): VdtValueTooBigError: the value "35" is too big. >>> vtor.check('integer(max=20)', 35) # doctest: +SKIP Traceback (most recent call last): VdtValueTooBigError: the value "35" is too big. >>> vtor.check('integer(0, 9)', False) 0
entailment
def is_float(value, min=None, max=None): """ A check that tests that a given value is a float (an integer will be accepted), and optionally - that it is between bounds. If the value is a string, then the conversion is done - if possible. Otherwise a VdtError is raised. This can accept negative values. >>> vtor = Validator() >>> vtor.check('float', '2') 2.0 From now on we multiply the value to avoid comparing decimals >>> vtor.check('float', '-6.8') * 10 -68.0 >>> vtor.check('float', '12.2') * 10 122.0 >>> vtor.check('float', 8.4) * 10 84.0 >>> vtor.check('float', 'a') # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type. >>> vtor.check('float(10.1)', '10.2') * 10 102.0 >>> vtor.check('float(max=20.2)', '15.1') * 10 151.0 >>> vtor.check('float(10.0)', '9.0') # doctest: +SKIP Traceback (most recent call last): VdtValueTooSmallError: the value "9.0" is too small. >>> vtor.check('float(max=20.0)', '35.0') # doctest: +SKIP Traceback (most recent call last): VdtValueTooBigError: the value "35.0" is too big. """ (min_val, max_val) = _is_num_param( ('min', 'max'), (min, max), to_float=True) if not isinstance(value, number_or_string_types): raise VdtTypeError(value) if not isinstance(value, float): # if it's a string - does it represent a float ? try: value = float(value) except ValueError: raise VdtTypeError(value) if (min_val is not None) and (value < min_val): raise VdtValueTooSmallError(value) if (max_val is not None) and (value > max_val): raise VdtValueTooBigError(value) return value
A check that tests that a given value is a float (an integer will be accepted), and optionally - that it is between bounds. If the value is a string, then the conversion is done - if possible. Otherwise a VdtError is raised. This can accept negative values. >>> vtor = Validator() >>> vtor.check('float', '2') 2.0 From now on we multiply the value to avoid comparing decimals >>> vtor.check('float', '-6.8') * 10 -68.0 >>> vtor.check('float', '12.2') * 10 122.0 >>> vtor.check('float', 8.4) * 10 84.0 >>> vtor.check('float', 'a') # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type. >>> vtor.check('float(10.1)', '10.2') * 10 102.0 >>> vtor.check('float(max=20.2)', '15.1') * 10 151.0 >>> vtor.check('float(10.0)', '9.0') # doctest: +SKIP Traceback (most recent call last): VdtValueTooSmallError: the value "9.0" is too small. >>> vtor.check('float(max=20.0)', '35.0') # doctest: +SKIP Traceback (most recent call last): VdtValueTooBigError: the value "35.0" is too big.
entailment
def is_boolean(value): """ Check if the value represents a boolean. >>> vtor = Validator() >>> vtor.check('boolean', 0) 0 >>> vtor.check('boolean', False) 0 >>> vtor.check('boolean', '0') 0 >>> vtor.check('boolean', 'off') 0 >>> vtor.check('boolean', 'false') 0 >>> vtor.check('boolean', 'no') 0 >>> vtor.check('boolean', 'nO') 0 >>> vtor.check('boolean', 'NO') 0 >>> vtor.check('boolean', 1) 1 >>> vtor.check('boolean', True) 1 >>> vtor.check('boolean', '1') 1 >>> vtor.check('boolean', 'on') 1 >>> vtor.check('boolean', 'true') 1 >>> vtor.check('boolean', 'yes') 1 >>> vtor.check('boolean', 'Yes') 1 >>> vtor.check('boolean', 'YES') 1 >>> vtor.check('boolean', '') # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "" is of the wrong type. >>> vtor.check('boolean', 'up') # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "up" is of the wrong type. """ if isinstance(value, string_types): try: return bool_dict[value.lower()] except KeyError: raise VdtTypeError(value) # we do an equality test rather than an identity test # this ensures Python 2.2 compatibilty # and allows 0 and 1 to represent True and False if value == False: return False elif value == True: return True else: raise VdtTypeError(value)
Check if the value represents a boolean. >>> vtor = Validator() >>> vtor.check('boolean', 0) 0 >>> vtor.check('boolean', False) 0 >>> vtor.check('boolean', '0') 0 >>> vtor.check('boolean', 'off') 0 >>> vtor.check('boolean', 'false') 0 >>> vtor.check('boolean', 'no') 0 >>> vtor.check('boolean', 'nO') 0 >>> vtor.check('boolean', 'NO') 0 >>> vtor.check('boolean', 1) 1 >>> vtor.check('boolean', True) 1 >>> vtor.check('boolean', '1') 1 >>> vtor.check('boolean', 'on') 1 >>> vtor.check('boolean', 'true') 1 >>> vtor.check('boolean', 'yes') 1 >>> vtor.check('boolean', 'Yes') 1 >>> vtor.check('boolean', 'YES') 1 >>> vtor.check('boolean', '') # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "" is of the wrong type. >>> vtor.check('boolean', 'up') # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "up" is of the wrong type.
entailment
def is_ip_addr(value): """ Check that the supplied value is an Internet Protocol address, v.4, represented by a dotted-quad string, i.e. '1.2.3.4'. >>> vtor = Validator() >>> vtor.check('ip_addr', '1 ') '1' >>> vtor.check('ip_addr', ' 1.2') '1.2' >>> vtor.check('ip_addr', ' 1.2.3 ') '1.2.3' >>> vtor.check('ip_addr', '1.2.3.4') '1.2.3.4' >>> vtor.check('ip_addr', '0.0.0.0') '0.0.0.0' >>> vtor.check('ip_addr', '255.255.255.255') '255.255.255.255' >>> vtor.check('ip_addr', '255.255.255.256') # doctest: +SKIP Traceback (most recent call last): VdtValueError: the value "255.255.255.256" is unacceptable. >>> vtor.check('ip_addr', '1.2.3.4.5') # doctest: +SKIP Traceback (most recent call last): VdtValueError: the value "1.2.3.4.5" is unacceptable. >>> vtor.check('ip_addr', 0) # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. """ if not isinstance(value, string_types): raise VdtTypeError(value) value = value.strip() try: dottedQuadToNum(value) except ValueError: raise VdtValueError(value) return value
Check that the supplied value is an Internet Protocol address, v.4, represented by a dotted-quad string, i.e. '1.2.3.4'. >>> vtor = Validator() >>> vtor.check('ip_addr', '1 ') '1' >>> vtor.check('ip_addr', ' 1.2') '1.2' >>> vtor.check('ip_addr', ' 1.2.3 ') '1.2.3' >>> vtor.check('ip_addr', '1.2.3.4') '1.2.3.4' >>> vtor.check('ip_addr', '0.0.0.0') '0.0.0.0' >>> vtor.check('ip_addr', '255.255.255.255') '255.255.255.255' >>> vtor.check('ip_addr', '255.255.255.256') # doctest: +SKIP Traceback (most recent call last): VdtValueError: the value "255.255.255.256" is unacceptable. >>> vtor.check('ip_addr', '1.2.3.4.5') # doctest: +SKIP Traceback (most recent call last): VdtValueError: the value "1.2.3.4.5" is unacceptable. >>> vtor.check('ip_addr', 0) # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type.
entailment
def is_list(value, min=None, max=None): """ Check that the value is a list of values. You can optionally specify the minimum and maximum number of members. It does no check on list members. >>> vtor = Validator() >>> vtor.check('list', ()) [] >>> vtor.check('list', []) [] >>> vtor.check('list', (1, 2)) [1, 2] >>> vtor.check('list', [1, 2]) [1, 2] >>> vtor.check('list(3)', (1, 2)) # doctest: +SKIP Traceback (most recent call last): VdtValueTooShortError: the value "(1, 2)" is too short. >>> vtor.check('list(max=5)', (1, 2, 3, 4, 5, 6)) # doctest: +SKIP Traceback (most recent call last): VdtValueTooLongError: the value "(1, 2, 3, 4, 5, 6)" is too long. >>> vtor.check('list(min=3, max=5)', (1, 2, 3, 4)) # doctest: +SKIP [1, 2, 3, 4] >>> vtor.check('list', 0) # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. >>> vtor.check('list', '12') # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "12" is of the wrong type. """ (min_len, max_len) = _is_num_param(('min', 'max'), (min, max)) if isinstance(value, string_types): raise VdtTypeError(value) try: num_members = len(value) except TypeError: raise VdtTypeError(value) if min_len is not None and num_members < min_len: raise VdtValueTooShortError(value) if max_len is not None and num_members > max_len: raise VdtValueTooLongError(value) return list(value)
Check that the value is a list of values. You can optionally specify the minimum and maximum number of members. It does no check on list members. >>> vtor = Validator() >>> vtor.check('list', ()) [] >>> vtor.check('list', []) [] >>> vtor.check('list', (1, 2)) [1, 2] >>> vtor.check('list', [1, 2]) [1, 2] >>> vtor.check('list(3)', (1, 2)) # doctest: +SKIP Traceback (most recent call last): VdtValueTooShortError: the value "(1, 2)" is too short. >>> vtor.check('list(max=5)', (1, 2, 3, 4, 5, 6)) # doctest: +SKIP Traceback (most recent call last): VdtValueTooLongError: the value "(1, 2, 3, 4, 5, 6)" is too long. >>> vtor.check('list(min=3, max=5)', (1, 2, 3, 4)) # doctest: +SKIP [1, 2, 3, 4] >>> vtor.check('list', 0) # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. >>> vtor.check('list', '12') # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "12" is of the wrong type.
entailment
def is_int_list(value, min=None, max=None): """ Check that the value is a list of integers. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is an integer. >>> vtor = Validator() >>> vtor.check('int_list', ()) [] >>> vtor.check('int_list', []) [] >>> vtor.check('int_list', (1, 2)) [1, 2] >>> vtor.check('int_list', [1, 2]) [1, 2] >>> vtor.check('int_list', [1, 'a']) # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type. """ return [is_integer(mem) for mem in is_list(value, min, max)]
Check that the value is a list of integers. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is an integer. >>> vtor = Validator() >>> vtor.check('int_list', ()) [] >>> vtor.check('int_list', []) [] >>> vtor.check('int_list', (1, 2)) [1, 2] >>> vtor.check('int_list', [1, 2]) [1, 2] >>> vtor.check('int_list', [1, 'a']) # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type.
entailment
def is_bool_list(value, min=None, max=None): """ Check that the value is a list of booleans. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is a boolean. >>> vtor = Validator() >>> vtor.check('bool_list', ()) [] >>> vtor.check('bool_list', []) [] >>> check_res = vtor.check('bool_list', (True, False)) >>> check_res == [True, False] 1 >>> check_res = vtor.check('bool_list', [True, False]) >>> check_res == [True, False] 1 >>> vtor.check('bool_list', [True, 'a']) # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type. """ return [is_boolean(mem) for mem in is_list(value, min, max)]
Check that the value is a list of booleans. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is a boolean. >>> vtor = Validator() >>> vtor.check('bool_list', ()) [] >>> vtor.check('bool_list', []) [] >>> check_res = vtor.check('bool_list', (True, False)) >>> check_res == [True, False] 1 >>> check_res = vtor.check('bool_list', [True, False]) >>> check_res == [True, False] 1 >>> vtor.check('bool_list', [True, 'a']) # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type.
entailment
def is_float_list(value, min=None, max=None): """ Check that the value is a list of floats. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is a float. >>> vtor = Validator() >>> vtor.check('float_list', ()) [] >>> vtor.check('float_list', []) [] >>> vtor.check('float_list', (1, 2.0)) [1.0, 2.0] >>> vtor.check('float_list', [1, 2.0]) [1.0, 2.0] >>> vtor.check('float_list', [1, 'a']) # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type. """ return [is_float(mem) for mem in is_list(value, min, max)]
Check that the value is a list of floats. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is a float. >>> vtor = Validator() >>> vtor.check('float_list', ()) [] >>> vtor.check('float_list', []) [] >>> vtor.check('float_list', (1, 2.0)) [1.0, 2.0] >>> vtor.check('float_list', [1, 2.0]) [1.0, 2.0] >>> vtor.check('float_list', [1, 'a']) # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type.
entailment
def is_string_list(value, min=None, max=None): """ Check that the value is a list of strings. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is a string. >>> vtor = Validator() >>> vtor.check('string_list', ()) [] >>> vtor.check('string_list', []) [] >>> vtor.check('string_list', ('a', 'b')) ['a', 'b'] >>> vtor.check('string_list', ['a', 1]) # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "1" is of the wrong type. >>> vtor.check('string_list', 'hello') # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "hello" is of the wrong type. """ if isinstance(value, string_types): raise VdtTypeError(value) return [is_string(mem) for mem in is_list(value, min, max)]
Check that the value is a list of strings. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is a string. >>> vtor = Validator() >>> vtor.check('string_list', ()) [] >>> vtor.check('string_list', []) [] >>> vtor.check('string_list', ('a', 'b')) ['a', 'b'] >>> vtor.check('string_list', ['a', 1]) # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "1" is of the wrong type. >>> vtor.check('string_list', 'hello') # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "hello" is of the wrong type.
entailment
def is_ip_addr_list(value, min=None, max=None): """ Check that the value is a list of IP addresses. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is an IP address. >>> vtor = Validator() >>> vtor.check('ip_addr_list', ()) [] >>> vtor.check('ip_addr_list', []) [] >>> vtor.check('ip_addr_list', ('1.2.3.4', '5.6.7.8')) ['1.2.3.4', '5.6.7.8'] >>> vtor.check('ip_addr_list', ['a']) # doctest: +SKIP Traceback (most recent call last): VdtValueError: the value "a" is unacceptable. """ return [is_ip_addr(mem) for mem in is_list(value, min, max)]
Check that the value is a list of IP addresses. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is an IP address. >>> vtor = Validator() >>> vtor.check('ip_addr_list', ()) [] >>> vtor.check('ip_addr_list', []) [] >>> vtor.check('ip_addr_list', ('1.2.3.4', '5.6.7.8')) ['1.2.3.4', '5.6.7.8'] >>> vtor.check('ip_addr_list', ['a']) # doctest: +SKIP Traceback (most recent call last): VdtValueError: the value "a" is unacceptable.
entailment
def force_list(value, min=None, max=None): """ Check that a value is a list, coercing strings into a list with one member. Useful where users forget the trailing comma that turns a single value into a list. You can optionally specify the minimum and maximum number of members. A minumum of greater than one will fail if the user only supplies a string. >>> vtor = Validator() >>> vtor.check('force_list', ()) [] >>> vtor.check('force_list', []) [] >>> vtor.check('force_list', 'hello') ['hello'] """ if not isinstance(value, (list, tuple)): value = [value] return is_list(value, min, max)
Check that a value is a list, coercing strings into a list with one member. Useful where users forget the trailing comma that turns a single value into a list. You can optionally specify the minimum and maximum number of members. A minumum of greater than one will fail if the user only supplies a string. >>> vtor = Validator() >>> vtor.check('force_list', ()) [] >>> vtor.check('force_list', []) [] >>> vtor.check('force_list', 'hello') ['hello']
entailment
def is_mixed_list(value, *args): """ Check that the value is a list. Allow specifying the type of each member. Work on lists of specific lengths. You specify each member as a positional argument specifying type Each type should be one of the following strings : 'integer', 'float', 'ip_addr', 'string', 'boolean' So you can specify a list of two strings, followed by two integers as : mixed_list('string', 'string', 'integer', 'integer') The length of the list must match the number of positional arguments you supply. >>> vtor = Validator() >>> mix_str = "mixed_list('integer', 'float', 'ip_addr', 'string', 'boolean')" >>> check_res = vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', True)) >>> check_res == [1, 2.0, '1.2.3.4', 'a', True] 1 >>> check_res = vtor.check(mix_str, ('1', '2.0', '1.2.3.4', 'a', 'True')) >>> check_res == [1, 2.0, '1.2.3.4', 'a', True] 1 >>> vtor.check(mix_str, ('b', 2.0, '1.2.3.4', 'a', True)) # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "b" is of the wrong type. >>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a')) # doctest: +SKIP Traceback (most recent call last): VdtValueTooShortError: the value "(1, 2.0, '1.2.3.4', 'a')" is too short. >>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', 1, 'b')) # doctest: +SKIP Traceback (most recent call last): VdtValueTooLongError: the value "(1, 2.0, '1.2.3.4', 'a', 1, 'b')" is too long. >>> vtor.check(mix_str, 0) # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. This test requires an elaborate setup, because of a change in error string output from the interpreter between Python 2.2 and 2.3 . >>> res_seq = ( ... 'passed an incorrect value "', ... 'yoda', ... '" for parameter "mixed_list".', ... ) >>> res_str = "'".join(res_seq) >>> try: ... vtor.check('mixed_list("yoda")', ('a')) ... except VdtParamError as err: ... str(err) == res_str 1 """ try: length = len(value) except TypeError: raise VdtTypeError(value) if length < len(args): raise VdtValueTooShortError(value) elif length > len(args): raise VdtValueTooLongError(value) try: return [fun_dict[arg](val) for arg, val in zip(args, value)] except KeyError as e: raise(VdtParamError('mixed_list', e))
Check that the value is a list. Allow specifying the type of each member. Work on lists of specific lengths. You specify each member as a positional argument specifying type Each type should be one of the following strings : 'integer', 'float', 'ip_addr', 'string', 'boolean' So you can specify a list of two strings, followed by two integers as : mixed_list('string', 'string', 'integer', 'integer') The length of the list must match the number of positional arguments you supply. >>> vtor = Validator() >>> mix_str = "mixed_list('integer', 'float', 'ip_addr', 'string', 'boolean')" >>> check_res = vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', True)) >>> check_res == [1, 2.0, '1.2.3.4', 'a', True] 1 >>> check_res = vtor.check(mix_str, ('1', '2.0', '1.2.3.4', 'a', 'True')) >>> check_res == [1, 2.0, '1.2.3.4', 'a', True] 1 >>> vtor.check(mix_str, ('b', 2.0, '1.2.3.4', 'a', True)) # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "b" is of the wrong type. >>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a')) # doctest: +SKIP Traceback (most recent call last): VdtValueTooShortError: the value "(1, 2.0, '1.2.3.4', 'a')" is too short. >>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', 1, 'b')) # doctest: +SKIP Traceback (most recent call last): VdtValueTooLongError: the value "(1, 2.0, '1.2.3.4', 'a', 1, 'b')" is too long. >>> vtor.check(mix_str, 0) # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. This test requires an elaborate setup, because of a change in error string output from the interpreter between Python 2.2 and 2.3 . >>> res_seq = ( ... 'passed an incorrect value "', ... 'yoda', ... '" for parameter "mixed_list".', ... ) >>> res_str = "'".join(res_seq) >>> try: ... vtor.check('mixed_list("yoda")', ('a')) ... except VdtParamError as err: ... str(err) == res_str 1
entailment
def is_option(value, *options): """ This check matches the value to any of a set of options. >>> vtor = Validator() >>> vtor.check('option("yoda", "jedi")', 'yoda') 'yoda' >>> vtor.check('option("yoda", "jedi")', 'jed') # doctest: +SKIP Traceback (most recent call last): VdtValueError: the value "jed" is unacceptable. >>> vtor.check('option("yoda", "jedi")', 0) # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. """ if not isinstance(value, string_types): raise VdtTypeError(value) if not value in options: raise VdtValueError(value) return value
This check matches the value to any of a set of options. >>> vtor = Validator() >>> vtor.check('option("yoda", "jedi")', 'yoda') 'yoda' >>> vtor.check('option("yoda", "jedi")', 'jed') # doctest: +SKIP Traceback (most recent call last): VdtValueError: the value "jed" is unacceptable. >>> vtor.check('option("yoda", "jedi")', 0) # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type.
entailment
def check(self, check, value, missing=False): """ Usage: check(check, value) Arguments: check: string representing check to apply (including arguments) value: object to be checked Returns value, converted to correct type if necessary If the check fails, raises a ``ValidateError`` subclass. >>> vtor = Validator() >>> vtor.check('yoda', '') # doctest: +SKIP Traceback (most recent call last): VdtUnknownCheckError: the check "yoda" is unknown. >>> vtor.check('yoda()', '') # doctest: +SKIP Traceback (most recent call last): VdtUnknownCheckError: the check "yoda" is unknown. >>> vtor.check('string(default="")', '', missing=True) '' """ fun_name, fun_args, fun_kwargs, default = self._parse_with_caching(check) if missing: if default is None: # no information needed here - to be handled by caller raise VdtMissingValue() value = self._handle_none(default) if value is None: return None return self._check_value(value, fun_name, fun_args, fun_kwargs)
Usage: check(check, value) Arguments: check: string representing check to apply (including arguments) value: object to be checked Returns value, converted to correct type if necessary If the check fails, raises a ``ValidateError`` subclass. >>> vtor = Validator() >>> vtor.check('yoda', '') # doctest: +SKIP Traceback (most recent call last): VdtUnknownCheckError: the check "yoda" is unknown. >>> vtor.check('yoda()', '') # doctest: +SKIP Traceback (most recent call last): VdtUnknownCheckError: the check "yoda" is unknown. >>> vtor.check('string(default="")', '', missing=True) ''
entailment
def _unquote(self, val): """Unquote a value if necessary.""" if (len(val) >= 2) and (val[0] in ("'", '"')) and (val[0] == val[-1]): val = val[1:-1] return val
Unquote a value if necessary.
entailment
def _list_handle(self, listmatch): """Take apart a ``keyword=list('val, 'val')`` type string.""" out = [] name = listmatch.group(1) args = listmatch.group(2) for arg in self._list_members.findall(args): out.append(self._unquote(arg)) return name, out
Take apart a ``keyword=list('val, 'val')`` type string.
entailment
def get_default_value(self, check): """ Given a check, return the default value for the check (converted to the right type). If the check doesn't specify a default value then a ``KeyError`` will be raised. """ fun_name, fun_args, fun_kwargs, default = self._parse_with_caching(check) if default is None: raise KeyError('Check "%s" has no default value.' % check) value = self._handle_none(default) if value is None: return value return self._check_value(value, fun_name, fun_args, fun_kwargs)
Given a check, return the default value for the check (converted to the right type). If the check doesn't specify a default value then a ``KeyError`` will be raised.
entailment
def match_header(cls, header): """A constant value HDU will only be recognized as such if the header contains a valid PIXVALUE and NAXIS == 0. """ pixvalue = header.get('PIXVALUE') naxis = header.get('NAXIS', 0) return (super(_ConstantValueImageBaseHDU, cls).match_header(header) and (isinstance(pixvalue, float) or _is_int(pixvalue)) and naxis == 0)
A constant value HDU will only be recognized as such if the header contains a valid PIXVALUE and NAXIS == 0.
entailment
def _check_constant_value_data(self, data): """Verify that the HDU's data is a constant value array.""" arrayval = data.flat[0] if np.all(data == arrayval): return arrayval return None
Verify that the HDU's data is a constant value array.
entailment
def get_current_index(self): """ Return currently selected index (or -1) """ # Need to convert to int; currently API returns a tuple of string curSel = self.__lb.curselection() if curSel and len(curSel) > 0: return int(curSel[0]) else: return -1
Return currently selected index (or -1)
entailment
def convert(input): """Input GEIS files "input" will be read and a HDUList object will be returned that matches the waiver-FITS format written out by 'stwfits' in IRAF. The user can use the writeto method to write the HDUList object to a FITS file. """ global dat cardLen = fits.Card.length # input file(s) must be of the form *.??h and *.??d if input[-1] != 'h' or input[-4] != '.': raise "Illegal input GEIS file name %s" % input data_file = input[:-1]+'d' _os = sys.platform if _os[:5] == 'linux' or _os[:5] == 'win32' or _os[:5] == 'sunos' or _os[:3] == 'osf' or _os[:6] == 'darwin': bytes_per_line = cardLen+1 else: raise "Platform %s is not supported (yet)." % _os end_card = 'END'+' '* (cardLen-3) # open input file im = open(input) # Generate the primary HDU cards = [] while 1: line = im.read(bytes_per_line)[:cardLen] line = line[:8].upper() + line[8:] if line == end_card: break cards.append(fits.Card.fromstring(line)) phdr = fits.Header(cards) im.close() # Determine starting point for adding Group Parameter Block keywords to Primary header phdr_indx = phdr.index('PSIZE') _naxis0 = phdr.get('NAXIS', 0) _naxis = [phdr['NAXIS'+str(j)] for j in range(1, _naxis0+1)] _naxis.insert(0, _naxis0) _bitpix = phdr['BITPIX'] _psize = phdr['PSIZE'] if phdr['DATATYPE'][:4] == 'REAL': _bitpix = -_bitpix if _naxis0 > 0: size = reduce(lambda x,y:x*y, _naxis[1:]) data_size = abs(_bitpix) * size // 8 else: data_size = 0 group_size = data_size + _psize // 8 # decode the group parameter definitions, # group parameters will become extension table groups = phdr['GROUPS'] gcount = phdr['GCOUNT'] pcount = phdr['PCOUNT'] formats = [] bools = [] floats = [] cols = [] # column definitions used for extension table cols_dict = {} # provides name access to Column defs _range = range(1, pcount+1) key = [phdr['PTYPE'+str(j)] for j in _range] comm = [phdr.cards['PTYPE'+str(j)].comment for j in _range] # delete group parameter definition header keywords _list = ['PTYPE'+str(j) for j in _range] + \ ['PDTYPE'+str(j) for j in _range] + \ ['PSIZE'+str(j) for j in _range] + \ ['DATATYPE', 'PSIZE', 'GCOUNT', 'PCOUNT', 'BSCALE', 'BZERO'] # Construct record array formats for the group parameters # as interpreted from the Primary header file for i in range(1, pcount+1): ptype = key[i-1] pdtype = phdr['PDTYPE'+str(i)] star = pdtype.find('*') _type = pdtype[:star] _bytes = pdtype[star+1:] # collect boolean keywords since they need special attention later if _type == 'LOGICAL': bools.append(i) if pdtype == 'REAL*4': floats.append(i) # identify keywords which require conversion to special units if ptype in kw_DOUBLE: _type = 'DOUBLE' fmt = geis_fmt[_type] + _bytes formats.append((ptype,fmt)) # Set up definitions for use in creating the group-parameter block table nrpt = '' nbits = str(int(_bytes)*8) if 'CHAR' in _type: nrpt = _bytes nbits = _bytes afmt = cols_fmt[_type]+ nbits if 'LOGICAL' in _type: afmt = cols_fmt[_type] cfmt = cols_pfmt[_type]+nrpt #print 'Column format for ',ptype,': ',cfmt,' with dtype of ',afmt cols_dict[ptype] = fits.Column(name=ptype,format=cfmt,array=numpy.zeros(gcount,dtype=afmt)) cols.append(cols_dict[ptype]) # This keeps the columns in order _shape = _naxis[1:] _shape.reverse() _code = fits.BITPIX2DTYPE[_bitpix] _bscale = phdr.get('BSCALE', 1) _bzero = phdr.get('BZERO', 0) if phdr['DATATYPE'][:10] == 'UNSIGNED*2': _uint16 = 1 _bzero = 32768 else: _uint16 = 0 # delete from the end, so it will not conflict with previous delete for i in range(len(phdr)-1, -1, -1): if phdr.cards[i].keyword in _list: del phdr[i] # clean up other primary header keywords phdr['SIMPLE'] = True phdr['GROUPS'] = False _after = 'NAXIS' if _naxis0 > 0: _after += str(_naxis0) phdr.set('EXTEND', value=True, comment="FITS dataset may contain extensions", after=_after) # Use copy-on-write for all data types since byteswap may be needed # in some platforms. f1 = open(data_file, mode='rb') dat = f1.read() errormsg = "" # Define data array for all groups arr_shape = _naxis[:] arr_shape[0] = gcount arr_stack = numpy.zeros(arr_shape,dtype=_code) loc = 0 for k in range(gcount): ext_dat = numpy.fromstring(dat[loc:loc+data_size], dtype=_code) ext_dat = ext_dat.reshape(_shape) if _uint16: ext_dat += _bzero # Check to see whether there are any NaN's or infs which might indicate # a byte-swapping problem, such as being written out on little-endian # and being read in on big-endian or vice-versa. if _code.find('float') >= 0 and \ (numpy.any(numpy.isnan(ext_dat)) or numpy.any(numpy.isinf(ext_dat))): errormsg += "===================================\n" errormsg += "= WARNING: =\n" errormsg += "= Input image: =\n" errormsg += input+"[%d]\n"%(k+1) errormsg += "= had floating point data values =\n" errormsg += "= of NaN and/or Inf. =\n" errormsg += "===================================\n" elif _code.find('int') >= 0: # Check INT data for max values ext_dat_frac,ext_dat_exp = numpy.frexp(ext_dat) if ext_dat_exp.max() == int(_bitpix) - 1: # Potential problems with byteswapping errormsg += "===================================\n" errormsg += "= WARNING: =\n" errormsg += "= Input image: =\n" errormsg += input+"[%d]\n"%(k+1) errormsg += "= had integer data values =\n" errormsg += "= with maximum bitvalues. =\n" errormsg += "===================================\n" arr_stack[k] = ext_dat #ext_hdu = fits.hdu.ImageHDU(data=ext_dat) rec = numpy.fromstring(dat[loc+data_size:loc+group_size], dtype=formats) loc += group_size # Add data from this GPB to table for i in range(1, pcount+1): val = rec[0][i-1] if i in bools: if val: val = 'T' else: val = 'F' cols[i-1].array[k] = val # Based on the first group, add GPB keywords to PRIMARY header if k == 0: # Create separate PyFITS Card objects for each entry in 'rec' # and update Primary HDU with these keywords after PSIZE for i in range(1, pcount+1): #val = rec.field(i-1)[0] val = rec[0][i-1] if val.dtype.kind == 'S': val = val.decode('ascii') if i in bools: if val: val = True else: val = False elif i in floats: # use fromstring, format in Card is deprecated in pyfits 0.9 _str = '%-8s= %20.13G / %s' % (key[i-1], val, comm[i-1]) _card = fits.Card.fromstring(_str) else: _card = fits.Card(key=key[i-1], value=val, comment=comm[i-1]) phdr.insert(phdr_indx+i, _card) # deal with bscale/bzero if (_bscale != 1 or _bzero != 0): phdr['BSCALE'] = _bscale phdr['BZERO'] = _bzero #hdulist.append(ext_hdu) # Define new table based on Column definitions ext_table = fits.TableHDU.from_columns(cols) ext_table.header.set('EXTNAME', value=input+'.tab', after='TFIELDS') # Add column descriptions to header of table extension to match stwfits output for i in range(len(key)): ext_table.header.append(fits.Card(keyword=key[i], value=comm[i])) if errormsg != "": errormsg += "===================================\n" errormsg += "= This file may have been =\n" errormsg += "= written out on a platform =\n" errormsg += "= with a different byte-order. =\n" errormsg += "= =\n" errormsg += "= Please verify that the values =\n" errormsg += "= are correct or apply the =\n" errormsg += "= '.byteswap()' method. =\n" errormsg += "===================================\n" print(errormsg) f1.close() hdulist = fits.HDUList([fits.PrimaryHDU(header=phdr, data=arr_stack)]) hdulist.append(ext_table) stsci2(hdulist,input) return hdulist
Input GEIS files "input" will be read and a HDUList object will be returned that matches the waiver-FITS format written out by 'stwfits' in IRAF. The user can use the writeto method to write the HDUList object to a FITS file.
entailment
def _loadConfig(self): """ loads configuration from some predictable locations. :return: the config. """ configPath = path.join(self._getConfigPath(), self._name + ".yml") if os.path.exists(configPath): self.logger.warning("Loading config from " + configPath) with open(configPath, 'r') as yml: return yaml.load(yml, Loader=yaml.FullLoader) defaultConfig = self.loadDefaultConfig() self._storeConfig(defaultConfig, configPath) return defaultConfig
loads configuration from some predictable locations. :return: the config.
entailment
def _storeConfig(self, config, configPath): """ Writes the config to the configPath. :param config a dict of config. :param configPath the path to the file to write to, intermediate dirs will be created as necessary. """ self.logger.info("Writing to " + str(configPath)) os.makedirs(os.path.dirname(configPath), exist_ok=True) with (open(configPath, 'w')) as yml: yaml.dump(config, yml, default_flow_style=False)
Writes the config to the configPath. :param config a dict of config. :param configPath the path to the file to write to, intermediate dirs will be created as necessary.
entailment
def _getConfigPath(self): """ Gets the currently configured config path. :return: the path, raises ValueError if it doesn't exist. """ confHome = environ.get('VIBE_CONFIG_HOME') return confHome if confHome is not None else path.join(path.expanduser("~"), '.vibe')
Gets the currently configured config path. :return: the path, raises ValueError if it doesn't exist.
entailment
def configureLogger(self): """ Configures the python logging system to log to a debug file and to stdout for warn and above. :return: the base logger. """ baseLogLevel = logging.DEBUG if self.isDebugLogging() else logging.INFO # create recorder app root logger logger = logging.getLogger(self._name) logger.setLevel(baseLogLevel) # file handler fh = handlers.RotatingFileHandler(path.join(self._getConfigPath(), self._name + '.log'), maxBytes=10 * 1024 * 1024, backupCount=10) fh.setLevel(baseLogLevel) # create console handler with a higher log level ch = logging.StreamHandler() ch.setLevel(logging.WARN) # create formatter and add it to the handlers formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(funcName)s - %(message)s') fh.setFormatter(formatter) ch.setFormatter(formatter) # add the handlers to the logger logger.addHandler(fh) logger.addHandler(ch) return logger
Configures the python logging system to log to a debug file and to stdout for warn and above. :return: the base logger.
entailment
def ibm_to_ieee(ibm): ''' Translate IBM-format floating point numbers (as bytes) to IEEE 754 64-bit floating point format (as Python float). ''' # IBM mainframe: sign * 0.mantissa * 16 ** (exponent - 64) # Python uses IEEE: sign * 1.mantissa * 2 ** (exponent - 1023) # Pad-out to 8 bytes if necessary. We expect 2 to 8 bytes, but # there's no need to check; bizarre sizes will cause a struct # module unpack error. ibm = ibm.ljust(8, b'\x00') # parse the 64 bits of IBM float as one 8-byte unsigned long long ulong, = struct.unpack('>Q', ibm) # IBM: 1-bit sign, 7-bits exponent, 56-bits mantissa sign = ulong & 0x8000000000000000 exponent = (ulong & 0x7f00000000000000) >> 56 mantissa = ulong & 0x00ffffffffffffff if mantissa == 0: if ibm[0:1] == b'\x00': return 0.0 elif ibm[0:1] in b'_.ABCDEFGHIJKLMNOPQRSTUVWXYZ': return float('nan') else: raise ValueError('Neither zero nor NaN: %r' % ibm) # IBM-format exponent is base 16, so the mantissa can have up to 3 # leading zero-bits in the binary mantissa. IEEE format exponent # is base 2, so we don't need any leading zero-bits and will shift # accordingly. This is one of the criticisms of IBM-format, its # wobbling precision. if ulong & 0x0080000000000000: shift = 3 elif ulong & 0x0040000000000000: shift = 2 elif ulong & 0x0020000000000000: shift = 1 else: shift = 0 mantissa >>= shift # clear the 1 bit to the left of the binary point # this is implicit in IEEE specification mantissa &= 0xffefffffffffffff # IBM exponent is excess 64, but we subtract 65, because of the # implicit 1 left of the radix point for the IEEE mantissa exponent -= 65 # IBM exponent is base 16, IEEE is base 2, so we multiply by 4 exponent <<= 2 # IEEE exponent is excess 1023, but we also increment for each # right-shift when aligning the mantissa's first 1-bit exponent += shift + 1023 # IEEE: 1-bit sign, 11-bits exponent, 52-bits mantissa # We didn't shift the sign bit, so it's already in the right spot ieee = sign | (exponent << 52) | mantissa return struct.unpack(">d", struct.pack(">Q", ieee))[0]
Translate IBM-format floating point numbers (as bytes) to IEEE 754 64-bit floating point format (as Python float).
entailment
def ieee_to_ibm(ieee): ''' Translate Python floating point numbers to IBM-format (as bytes). ''' # Python uses IEEE: sign * 1.mantissa * 2 ** (exponent - 1023) # IBM mainframe: sign * 0.mantissa * 16 ** (exponent - 64) if ieee == 0.0: return b'\x00' * 8 if ieee is None or math.isnan(ieee): return b'_' + b'\x00' * 7 if math.isinf(ieee): raise NotImplementedError('Cannot convert infinity') bits = struct.pack('>d', ieee) ulong, = struct.unpack('>Q', bits) sign = (ulong & (1 << 63)) >> 63 # 1-bit sign exponent = ((ulong & (0x7ff << 52)) >> 52) - 1023 # 11-bits exponent mantissa = ulong & 0x000fffffffffffff # 52-bits mantissa/significand if exponent > 248: msg = 'Cannot store magnitude more than ~ 16 ** 63 as IBM-format' raise Overflow(msg) if exponent < -260: msg = 'Cannot store magnitude less than ~ 16 ** -65 as IBM-format' raise Underflow(msg) # IEEE mantissa has an implicit 1 left of the radix: 1.significand # IBM mantissa has an implicit 0 left of the radix: 0.significand # We must bitwise-or the implicit 1.mmm into the mantissa # later we will increment the exponent to account for this change mantissa = 0x0010000000000000 | mantissa # IEEE exponents are for base 2: mantissa * 2 ** exponent # IBM exponents are for base 16: mantissa * 16 ** exponent # We must divide the exponent by 4, since 16 ** x == 2 ** (4 * x) quotient, remainder = divmod(exponent, 4) exponent = quotient # We don't want to lose information; # the remainder from the divided exponent adjusts the mantissa mantissa <<= remainder # Increment exponent, because of earlier adjustment to mantissa # this corresponds to the 1.mantissa vs 0.mantissa implicit bit exponent += 1 # IBM exponents are excess 64 exponent += 64 # IBM has 1-bit sign, 7-bits exponent, and 56-bits mantissa. # We must shift the sign and exponent into their places. sign <<= 63 exponent <<= 56 # We lose some precision, but who said floats were perfect? return struct.pack('>Q', sign | exponent | mantissa)
Translate Python floating point numbers to IBM-format (as bytes).
entailment
def dumps(columns): ''' Serialize ``columns`` to a JSON formatted ``bytes`` object. ''' fp = BytesIO() dump(columns, fp) fp.seek(0) return fp.read()
Serialize ``columns`` to a JSON formatted ``bytes`` object.
entailment
def match(header): ''' Parse the 3-line (240-byte) header of a SAS XPORT file. ''' mo = Library.header_re.match(header) if mo is None: raise ValueError(f'Not a SAS Version 5 or 6 XPORT file') return { 'created': strptime(mo['created']), 'modified': strptime(mo['modified']), 'sas_version': float(mo['version']), 'os_version': mo['os'].decode().strip(), }
Parse the 3-line (240-byte) header of a SAS XPORT file.
entailment
def header_match(cls, header): ''' Parse the 4-line (320-byte) library member header. ''' mo = cls.header_re.match(header) if mo is None: msg = f'Expected {cls.header_re.pattern!r}, got {header!r}' raise ValueError(msg) return { 'name': mo['name'].decode().strip(), 'label': mo['label'].decode().strip(), 'type': mo['type'].decode().strip(), 'created': strptime(mo['created']), 'modified': strptime(mo['modified']), 'sas_version': float(mo['version']), 'os_version': mo['os'].decode().strip(), 'namestr_size': mo['descriptor_size'], }
Parse the 4-line (320-byte) library member header.
entailment
def header_match(cls, data): ''' Parse a member namestrs header (1 line, 80 bytes). ''' mo = cls.header_re.match(data) return int(mo['n_variables'])
Parse a member namestrs header (1 line, 80 bytes).
entailment
def readall(cls, member, size): ''' Parse variable metadata for a XPORT file member. ''' fp = member.library.fp LINE = member.library.LINE n = cls.header_match(fp.read(LINE)) namestrs = [fp.read(size) for i in range(n)] # Each namestr field is 140 bytes long, but the fields are # streamed together and broken in 80-byte pieces. If the last # byte of the last namestr field does not fall in the last byte # of the 80-byte record, the record is padded with ASCII blanks # to 80 bytes. remainder = n * size % LINE if remainder: padding = 80 - remainder fp.read(padding) info = [cls.unpack(s) for s in namestrs] for d in info: d['format'] = Format(**d['format']) d['iformat'] = InputFormat(**d['iformat']) return [Variable(**d) for d in info]
Parse variable metadata for a XPORT file member.
entailment
def to_jd(year, month, day): '''Determine Julian day count from Islamic date''' return (day + ceil(29.5 * (month - 1)) + (year - 1) * 354 + trunc((3 + (11 * year)) / 30) + EPOCH) - 1
Determine Julian day count from Islamic date
entailment
def from_jd(jd): '''Calculate Islamic date from Julian day''' jd = trunc(jd) + 0.5 year = trunc(((30 * (jd - EPOCH)) + 10646) / 10631) month = min(12, ceil((jd - (29 + to_jd(year, 1, 1))) / 29.5) + 1) day = int(jd - to_jd(year, month, 1)) + 1 return (year, month, day)
Calculate Islamic date from Julian day
entailment
def checkAllTriggers(self, action): """ Go over all widgets and let them know they have been edited recently and they need to check for any trigger actions. This would be used right after all the widgets have their values set or forced (e.g. via setAllEntriesFromParList). """ for entry in self.entryNo: entry.widgetEdited(action=action, skipDups=False)
Go over all widgets and let them know they have been edited recently and they need to check for any trigger actions. This would be used right after all the widgets have their values set or forced (e.g. via setAllEntriesFromParList).
entailment
def freshenFocus(self): """ Did something which requires a new look. Move scrollbar up. This often needs to be delayed a bit however, to let other events in the queue through first. """ self.top.update_idletasks() self.top.after(10, self.setViewAtTop)
Did something which requires a new look. Move scrollbar up. This often needs to be delayed a bit however, to let other events in the queue through first.
entailment
def mwl(self, event): """Mouse Wheel - under tkinter we seem to need Tk v8.5+ for this """ if event.num == 4: # up on Linux self.top.f.canvas.yview_scroll(-1*self._tmwm, 'units') elif event.num == 5: # down on Linux self.top.f.canvas.yview_scroll(1*self._tmwm, 'units') else: # assume event.delta has the direction, but reversed sign self.top.f.canvas.yview_scroll(-(event.delta)*self._tmwm, 'units')
Mouse Wheel - under tkinter we seem to need Tk v8.5+ for this
entailment
def focusNext(self, event): """Set focus to next item in sequence""" try: event.widget.tk_focusNext().focus_set() except TypeError: # see tkinter equivalent code for tk_focusNext to see # commented original version name = event.widget.tk.call('tk_focusNext', event.widget._w) event.widget._nametowidget(str(name)).focus_set()
Set focus to next item in sequence
entailment
def focusPrev(self, event): """Set focus to previous item in sequence""" try: event.widget.tk_focusPrev().focus_set() except TypeError: # see tkinter equivalent code for tk_focusPrev to see # commented original version name = event.widget.tk.call('tk_focusPrev', event.widget._w) event.widget._nametowidget(str(name)).focus_set()
Set focus to previous item in sequence
entailment
def doScroll(self, event): """Scroll the panel down to ensure widget with focus to be visible Tracks the last widget that doScroll was called for and ignores repeated calls. That handles the case where the focus moves not between parameter entries but to someplace outside the hierarchy. In that case the scrolling is not expected. Returns false if the scroll is ignored, else true. """ canvas = self.top.f.canvas widgetWithFocus = event.widget if widgetWithFocus is self.lastFocusWidget: return FALSE self.lastFocusWidget = widgetWithFocus if widgetWithFocus is None: return TRUE # determine distance of widget from top & bottom edges of canvas y1 = widgetWithFocus.winfo_rooty() y2 = y1 + widgetWithFocus.winfo_height() cy1 = canvas.winfo_rooty() cy2 = cy1 + canvas.winfo_height() yinc = self.yscrollincrement if y1<cy1: # this will continue to work when integer division goes away sdist = int((y1-cy1-yinc+1.)/yinc) canvas.yview_scroll(sdist, "units") elif cy2<y2: sdist = int((y2-cy2+yinc-1.)/yinc) canvas.yview_scroll(sdist, "units") return TRUE
Scroll the panel down to ensure widget with focus to be visible Tracks the last widget that doScroll was called for and ignores repeated calls. That handles the case where the focus moves not between parameter entries but to someplace outside the hierarchy. In that case the scrolling is not expected. Returns false if the scroll is ignored, else true.
entailment
def _handleParListMismatch(self, probStr, extra=False): """ Handle the situation where two par lists do not match. This is meant to allow subclasses to override. Note that this only handles "missing" pars and "extra" pars, not wrong-type pars. """ errmsg = 'ERROR: mismatch between default and current par lists ' + \ 'for task "'+self.taskName+'"' if probStr: errmsg += '\n\t'+probStr errmsg += '\n(try: "unlearn '+self.taskName+'")' print(errmsg) return False
Handle the situation where two par lists do not match. This is meant to allow subclasses to override. Note that this only handles "missing" pars and "extra" pars, not wrong-type pars.
entailment
def _setupDefaultParamList(self): """ This creates self.defaultParamList. It also does some checks on the paramList, sets its order if needed, and deletes any extra or unknown pars if found. We assume the order of self.defaultParamList is the correct order. """ # Obtain the default parameter list self.defaultParamList = self._taskParsObj.getDefaultParList() theParamList = self._taskParsObj.getParList() # Lengths are probably equal but this isn't necessarily an error # here, so we check for differences below. if len(self.defaultParamList) != len(theParamList): # whoa, lengths don't match (could be some missing or some extra) pmsg = 'Current list not same length as default list' if not self._handleParListMismatch(pmsg): return False # convert current par values to a dict of { par-fullname:par-object } # for use below ourpardict = {} for par in theParamList: ourpardict[par.fullName()] = par # Sort our paramList according to the order of the defaultParamList # and repopulate the list according to that order. Create sortednames. sortednames = [p.fullName() for p in self.defaultParamList] # Rebuild par list sorted into correct order. Also find/flag any # missing pars or any extra/unknown pars. This automatically deletes # "extras" by not adding them to the sorted list in the first place. migrated = [] newList = [] for fullName in sortednames: if fullName in ourpardict: newList.append(ourpardict[fullName]) migrated.append(fullName) # make sure all get moved over else: # this is a missing par - insert the default version theDfltVer = \ [p for p in self.defaultParamList if p.fullName()==fullName] newList.append(copy.deepcopy(theDfltVer[0])) # Update! Next line writes to the self._taskParsObj.getParList() obj theParamList[:] = newList # fill with newList, keep same mem pointer # See if any got left out extras = [fn for fn in ourpardict if not fn in migrated] for fullName in extras: # this is an extra/unknown par - let subclass handle it if not self._handleParListMismatch('Unexpected par: "'+\ fullName+'"', extra=True): return False print('Ignoring unexpected par: "'+p+'"') # return value indicates that all is well to continue return True
This creates self.defaultParamList. It also does some checks on the paramList, sets its order if needed, and deletes any extra or unknown pars if found. We assume the order of self.defaultParamList is the correct order.
entailment
def _toggleSectionActiveState(self, sectionName, state, skipList): """ Make an entire section (minus skipList items) either active or inactive. sectionName is the same as the param's scope. """ # Get model data, the list of pars theParamList = self._taskParsObj.getParList() # Loop over their assoc. entries for i in range(self.numParams): if theParamList[i].scope == sectionName: if skipList and theParamList[i].name in skipList: # self.entryNo[i].setActiveState(True) # these always active pass # if it started active, we don't need to reactivate it else: self.entryNo[i].setActiveState(state)
Make an entire section (minus skipList items) either active or inactive. sectionName is the same as the param's scope.
entailment
def saveAs(self, event=None): """ Save the parameter settings to a user-specified file. Any changes here must be coordinated with the corresponding tpar save_as function. """ self.debug('Clicked Save as...') # On Linux Pers..Dlg causes the cwd to change, so get a copy of current curdir = os.getcwd() # The user wishes to save to a different name writeProtChoice = self._writeProtectOnSaveAs if capable.OF_TKFD_IN_EPAR: # Prompt using native looking dialog fname = asksaveasfilename(parent=self.top, title='Save Parameter File As', defaultextension=self._defSaveAsExt, initialdir=os.path.dirname(self._getSaveAsFilter())) else: # Prompt. (could use tkinter's FileDialog, but this one is prettier) # initWProtState is only used in the 1st call of a session from . import filedlg fd = filedlg.PersistSaveFileDialog(self.top, "Save Parameter File As", self._getSaveAsFilter(), initWProtState=writeProtChoice) if fd.Show() != 1: fd.DialogCleanup() os.chdir(curdir) # in case file dlg moved us return fname = fd.GetFileName() writeProtChoice = fd.GetWriteProtectChoice() fd.DialogCleanup() if not fname: return # canceled # First check the child parameters, aborting save if # invalid entries were encountered if self.checkSetSaveChildren(): os.chdir(curdir) # in case file dlg moved us return # Run any subclass-specific steps right before the save self._saveAsPreSave_Hook(fname) # Verify all the entries (without save), keeping track of the invalid # entries which have been reset to their original input values self.badEntriesList = self.checkSetSaveEntries(doSave=False) # If there were invalid entries, prepare the message dialog if self.badEntriesList: ansOKCANCEL = self.processBadEntries(self.badEntriesList, self.taskName) if not ansOKCANCEL: os.chdir(curdir) # in case file dlg moved us return # If there were no invalid entries or the user says OK, finally # save to their stated file. Since we have already processed the # bad entries, there should be none returned. mstr = "TASKMETA: task="+self.taskName+" package="+self.pkgName if self.checkSetSaveEntries(doSave=True, filename=fname, comment=mstr, set_ro=writeProtChoice, overwriteRO=True): os.chdir(curdir) # in case file dlg moved us raise Exception("Unexpected bad entries for: "+self.taskName) # Run any subclass-specific steps right after the save self._saveAsPostSave_Hook(fname) os.chdir(curdir)
Save the parameter settings to a user-specified file. Any changes here must be coordinated with the corresponding tpar save_as function.
entailment
def htmlHelp(self, helpString=None, title=None, istask=False, tag=None): """ Pop up the help in a browser window. By default, this tries to show the help for the current task. With the option arguments, it can be used to show any help string. """ # Check the help string. If it turns out to be a URL, launch that, # if not, dump it to a quick and dirty tmp html file to make it # presentable, and pass that file name as the URL. if not helpString: helpString = self.getHelpString(self.pkgName+'.'+self.taskName) if not title: title = self.taskName lwr = helpString.lower() if lwr.startswith("http:") or lwr.startswith("https:") or \ lwr.startswith("file:"): url = helpString if tag and url.find('#') < 0: url += '#'+tag # print('LAUNCHING: '+url) # DBG irafutils.launchBrowser(url, subj=title) else: # Write it to a temp HTML file to display (fd, fname) = tempfile.mkstemp(suffix='.html', prefix='editpar_') os.close(fd) f = open(fname, 'w') if istask and self._knowTaskHelpIsHtml: f.write(helpString) else: f.write('<html><head><title>'+title+'</title></head>\n') f.write('<body><h3>'+title+'</h3>\n') f.write('<pre>\n'+helpString+'\n</pre></body></html>') f.close() irafutils.launchBrowser("file://"+fname, subj=title)
Pop up the help in a browser window. By default, this tries to show the help for the current task. With the option arguments, it can be used to show any help string.
entailment
def _showAnyHelp(self, kind, tag=None): """ Invoke task/epar/etc. help and put the page in a window. This same logic is used for GUI help, task help, log msgs, etc. """ # sanity check assert kind in ('epar', 'task', 'log'), 'Unknown help kind: '+str(kind) #----------------------------------------- # See if they'd like to view in a browser #----------------------------------------- if self._showHelpInBrowser or (kind == 'task' and self._knowTaskHelpIsHtml): if kind == 'epar': self.htmlHelp(helpString=self._appHelpString, title='Parameter Editor Help') if kind == 'task': self.htmlHelp(istask=True, tag=tag) if kind == 'log': self.htmlHelp(helpString='\n'.join(self._msgHistory), title=self._appName+' Event Log') return #----------------------------------------- # Now try to pop up the regular Tk window #----------------------------------------- wins = {'epar':self.eparHelpWin, 'task':self.irafHelpWin, 'log': self.logHistWin, } window = wins[kind] try: if window.state() != NORMAL: window.deiconify() window.tkraise() return except (AttributeError, TclError): pass #--------------------------------------------------------- # That didn't succeed (window is still None), so build it #--------------------------------------------------------- if kind == 'epar': self.eparHelpWin = self.makeHelpWin(self._appHelpString, title='Parameter Editor Help') if kind == 'task': # Acquire the task help as a string # Need to include the package name for the task to # avoid name conflicts with tasks from other packages. WJH self.irafHelpWin = self.makeHelpWin(self.getHelpString( self.pkgName+'.'+self.taskName)) if kind == 'log': self.logHistWin = self.makeHelpWin('\n'.join(self._msgHistory), title=self._appName+' Event Log')
Invoke task/epar/etc. help and put the page in a window. This same logic is used for GUI help, task help, log msgs, etc.
entailment
def setAllEntriesFromParList(self, aParList, updateModel=False): """ Set all the parameter entry values in the GUI to the values in the given par list. If 'updateModel' is True, the internal param list will be updated to the new values as well as the GUI entries (slower and not always necessary). Note the corresponding TparDisplay method. """ # Get model data, the list of pars theParamList = self._taskParsObj.getParList() # we may modify members if len(aParList) != len(theParamList): showwarning(message="Attempting to set parameter values from a "+ \ "list of different length ("+str(len(aParList))+ \ ") than the number shown here ("+ \ str(len(theParamList))+"). Be aware.", title="Parameter List Length Mismatch") # LOOP THRU GUI PAR LIST for i in range(self.numParams): par = theParamList[i] if par.type == "pset": continue # skip PSET's for now gui_entry = self.entryNo[i] # Set the value in the paramList before setting it in the GUI # This may be in the form of a list, or an IrafParList (getValue) if isinstance(aParList, list): # Since "aParList" can have them in different order and number # than we do, we'll have to first find the matching param. found = False for newpar in aParList: if newpar.name==par.name and newpar.scope==par.scope: par.set(newpar.value) # same as .get(native=1,prompt=0) found = True break # Now see if newpar was found in our list if not found: pnm = par.name if len(par.scope): pnm = par.scope+'.'+par.name raise UnfoundParamError('Error - Unfound Parameter! \n\n'+\ 'Expected parameter "'+pnm+'" for task "'+ \ self.taskName+'". \nThere may be others...') else: # assume has getValue() par.set(aParList.getValue(par.name, native=1, prompt=0)) # gui holds a str, but par.value is native; conversion occurs gui_entry.forceValue(par.value, noteEdited=False) # no triggers yet if updateModel: # Update the model values via checkSetSaveEntries self.badEntriesList = self.checkSetSaveEntries(doSave=False) # If there were invalid entries, prepare the message dialog if self.badEntriesList: self.processBadEntries(self.badEntriesList, self.taskName, canCancel=False)
Set all the parameter entry values in the GUI to the values in the given par list. If 'updateModel' is True, the internal param list will be updated to the new values as well as the GUI entries (slower and not always necessary). Note the corresponding TparDisplay method.
entailment
def getValue(self, name, scope=None, native=False): """ Return current par value from the GUI. This does not do any validation, and it it not necessarily the same value saved in the model, which is always behind the GUI setting, in time. This is NOT to be used to get all the values - it would not be efficient. """ # Get model data, the list of pars theParamList = self._taskParsObj.getParList() # NOTE: If par scope is given, it will be used, otherwise it is # assumed to be unneeded and the first name-match is returned. fullName = basicpar.makeFullName(scope, name) # Loop over the parameters to find the requested par for i in range(self.numParams): par = theParamList[i] # IrafPar or subclass entry = self.entryNo[i] # EparOption or subclass if par.fullName() == fullName or \ (scope is None and par.name == name): if native: return entry.convertToNative(entry.choice.get()) else: return entry.choice.get() # We didn't find the requested par raise RuntimeError('Could not find par: "'+fullName+'"')
Return current par value from the GUI. This does not do any validation, and it it not necessarily the same value saved in the model, which is always behind the GUI setting, in time. This is NOT to be used to get all the values - it would not be efficient.
entailment
def checkSetSaveChildren(self, doSave=True): """Check, then set, then save the parameter settings for all child (pset) windows. Prompts if any problems are found. Returns None on success, list of bad entries on failure. """ if self.isChild: return # Need to get all the entries and verify them. # Save the children in backwards order to coincide with the # display of the dialogs (LIFO) for n in range (len(self.top.childList)-1, -1, -1): self.badEntriesList = self.top.childList[n]. \ checkSetSaveEntries(doSave=doSave) if self.badEntriesList: ansOKCANCEL = self.processBadEntries(self.badEntriesList, self.top.childList[n].taskName) if not ansOKCANCEL: return self.badEntriesList # If there were no invalid entries or the user says OK, # close down the child and increment to the next child self.top.childList[n].top.focus_set() self.top.childList[n].top.withdraw() del self.top.childList[n] # all windows saved successfully return
Check, then set, then save the parameter settings for all child (pset) windows. Prompts if any problems are found. Returns None on success, list of bad entries on failure.
entailment
def _pushMessages(self): """ Internal callback used to make sure the msg list keeps moving. """ # This continues to get itself called until no msgs are left in list. self.showStatus('') if len(self._statusMsgsToShow) > 0: self.top.after(200, self._pushMessages)
Internal callback used to make sure the msg list keeps moving.
entailment
def showStatus(self, msg, keep=0, cat=None): """ Show the given status string, but not until any given delay from the previous message has expired. keep is a time (secs) to force the message to remain without being overwritten or cleared. cat is a string category used only in the historical log. """ # prep it, space-wise msg = msg.strip() if len(msg) > 0: # right here is the ideal place to collect a history of messages forhist = msg if cat: forhist = '['+cat+'] '+msg forhist = time.strftime("%a %H:%M:%S")+': '+forhist self._msgHistory.append(forhist) # now set the spacing msg = ' '+msg # stop here if it is a category not shown in the GUI if cat == DBG: return # see if we can show it now = time.time() if now >= self._leaveStatusMsgUntil: # we are clear, can show a msg # first see if this msg is '' - if so we will show an important # waiting msg instead of the '', and then pop it off our list if len(msg) < 1 and len(self._statusMsgsToShow) > 0: msg, keep = self._statusMsgsToShow[0] # overwrite both args del self._statusMsgsToShow[0] # now actuall print the status out to the status widget self.top.status.config(text = msg) # reset our delay flag self._leaveStatusMsgUntil = 0 if keep > 0: self._leaveStatusMsgUntil = now + keep else: # there is a previous message still up, is this one important? if len(msg) > 0 and keep > 0: # Uh-oh, this is an important message that we don't want to # simply skip, but on the other hand we can't show it yet... # So we add it to _statusMsgsToShow and show it later (asap) if (msg,keep) not in self._statusMsgsToShow: if len(self._statusMsgsToShow) < 7: self._statusMsgsToShow.append( (msg,keep) ) # tuple # kick off timer loop to get this one pushed through if len(self._statusMsgsToShow) == 1: self._pushMessages() else: # should never happen, but just in case print("Lost message!: "+msg+" (too far behind...)")
Show the given status string, but not until any given delay from the previous message has expired. keep is a time (secs) to force the message to remain without being overwritten or cleared. cat is a string category used only in the historical log.
entailment
def teal(theTask, parent=None, loadOnly=False, returnAs="dict", canExecute=True, strict=False, errorsToTerm=False, autoClose=True, defaults=False): # overrides=None): """ Start the GUI session, or simply load a task's ConfigObj. """ if loadOnly: # this forces returnAs="dict" obj = None try: obj = cfgpars.getObjectFromTaskArg(theTask, strict, defaults) # obj.strictUpdate(overrides) # ! would need to re-verify after this ! except Exception as re: # catches RuntimeError and KeyError and ... # Since we are loadOnly, don't pop up the GUI for this if strict: raise else: print(re.message.replace('\n\n','\n')) return obj else: assert returnAs in ("dict", "status", None), \ "Invalid value for returnAs arg: "+str(returnAs) dlg = None try: # if setting to all defaults, go ahead and load it here, pre-GUI if defaults: theTask = cfgpars.getObjectFromTaskArg(theTask, strict, True) # now create/run the dialog dlg = ConfigObjEparDialog(theTask, parent=parent, autoClose=autoClose, strict=strict, canExecute=canExecute) # overrides=overrides) except cfgpars.NoCfgFileError as ncf: log_last_error() if errorsToTerm: print(str(ncf).replace('\n\n','\n')) else: popUpErr(parent=parent,message=str(ncf),title="Unfound Task") except Exception as re: # catches RuntimeError and KeyError and ... log_last_error() if errorsToTerm: print(re.message.replace('\n\n','\n')) else: popUpErr(parent=parent, message=re.message, title="Bad Parameters") # Return, depending on the mode in which we are operating if returnAs is None: return if returnAs == "dict": if dlg is None or dlg.canceled(): return None else: return dlg.getTaskParsObj() # else, returnAs == "status" if dlg is None or dlg.canceled(): return -1 if dlg.executed(): return 1 return 0
Start the GUI session, or simply load a task's ConfigObj.
entailment
def load(theTask, canExecute=True, strict=True, defaults=False): """ Shortcut to load TEAL .cfg files for non-GUI access where loadOnly=True. """ return teal(theTask, parent=None, loadOnly=True, returnAs="dict", canExecute=canExecute, strict=strict, errorsToTerm=True, defaults=defaults)
Shortcut to load TEAL .cfg files for non-GUI access where loadOnly=True.
entailment
def unlearn(taskPkgName, deleteAll=False): """ Find the task named taskPkgName, and delete any/all user-owned .cfg files in the user's resource directory which apply to that task. Like a unix utility, this returns 0 on success (no files found or only 1 found but deleted). For multiple files found, this uses deleteAll, returning the file-name-list if deleteAll is False (to indicate the problem) and without deleting any files. MUST check return value. This does not prompt the user or print to the screen. """ # this WILL throw an exception if the taskPkgName isn't found flist = cfgpars.getUsrCfgFilesForPyPkg(taskPkgName) # can raise if flist is None or len(flist) == 0: return 0 if len(flist) == 1: os.remove(flist[0]) return 0 # at this point, we know more than one matching file was found if deleteAll: for f in flist: os.remove(f) return 0 else: return flist
Find the task named taskPkgName, and delete any/all user-owned .cfg files in the user's resource directory which apply to that task. Like a unix utility, this returns 0 on success (no files found or only 1 found but deleted). For multiple files found, this uses deleteAll, returning the file-name-list if deleteAll is False (to indicate the problem) and without deleting any files. MUST check return value. This does not prompt the user or print to the screen.
entailment
def diffFromDefaults(theTask, report=False): """ Load the given file (or existing object), and return a dict of its values which are different from the default values. If report is set, print to stdout the differences. """ # get the 2 dicts (trees: dicts of dicts) defaultTree = load(theTask, canExecute=False, strict=True, defaults=True) thisTree = load(theTask, canExecute=False, strict=True, defaults=False) # they must be flattenable defaultFlat = cfgpars.flattenDictTree(defaultTree) thisFlat = cfgpars.flattenDictTree(thisTree) # use the "set" operations till there is a dict.diff() # thanks to: http://stackoverflow.com/questions/715234 diffFlat = dict( set(thisFlat.items()) - \ set(defaultFlat.items()) ) if report: defaults_of_diffs_only = {} # { k:defaultFlat[k] for k in diffFlat.keys() } for k in diffFlat: defaults_of_diffs_only[k] = defaultFlat[k] msg = 'Non-default values of "'+str(theTask)+'":\n'+ \ _flat2str(diffFlat)+ \ '\n\nDefault values:\n'+ \ _flat2str(defaults_of_diffs_only) print(msg) return diffFlat
Load the given file (or existing object), and return a dict of its values which are different from the default values. If report is set, print to stdout the differences.
entailment
def _isInstalled(fullFname): """ Return True if the given file name is located in an installed area (versus a user-owned file) """ if not fullFname: return False if not os.path.exists(fullFname): return False instAreas = [] try: import site instAreas = site.getsitepackages() except: pass # python 2.6 and lower don't have site.getsitepackages() if len(instAreas) < 1: instAreas = [ os.path.dirname(os.__file__) ] for ia in instAreas: if fullFname.find(ia) >= 0: return True return False
Return True if the given file name is located in an installed area (versus a user-owned file)
entailment
def execEmbCode(SCOPE, NAME, VAL, TEAL, codeStr): """ .cfgspc embedded code execution is done here, in a relatively confined space. The variables available to the code to be executed are: SCOPE, NAME, VAL, PARENT, TEAL The code string itself is expected to set a var named OUT """ # This was all we needed in Python 2.x # OUT = None # exec codeStr # return OUT # In Python 3 (& 2.x) be more explicit: http://bugs.python.org/issue4831 PARENT = None if TEAL: PARENT = TEAL.top OUT = None ldict = locals() # will have OUT in it exec(codeStr, globals(), ldict) return ldict['OUT']
.cfgspc embedded code execution is done here, in a relatively confined space. The variables available to the code to be executed are: SCOPE, NAME, VAL, PARENT, TEAL The code string itself is expected to set a var named OUT
entailment
def print_tasknames(pkgName, aDir, term_width=80, always=False, hidden=None): """ Print a message listing TEAL-enabled tasks available under a given installation directory (where pkgName resides). If always is True, this will always print when tasks are found; otherwise it will only print found tasks when in interactive mode. The parameter 'hidden' supports a list of input tasknames that should not be reported even though they still exist. """ # See if we can bail out early if not always: # We can't use the sys.ps1 check if in PyRAF since it changes sys if 'pyraf' not in sys.modules: # sys.ps1 is only defined in interactive mode if not hasattr(sys, 'ps1'): return # leave here, we're in someone's script # Check for tasks taskDict = cfgpars.findAllCfgTasksUnderDir(aDir) tasks = [x for x in taskDict.values() if len(x) > 0] if hidden: # could even account for a single taskname as input here if needed for x in hidden: if x in tasks: tasks.remove(x) # only be verbose if there something found if len(tasks) > 0: sortedUniqTasks = sorted(set(tasks)) if len(sortedUniqTasks) == 1: tlines = 'The following task in the '+pkgName+\ ' package can be run with TEAL:\n' else: tlines = 'The following tasks in the '+pkgName+\ ' package can be run with TEAL:\n' tlines += printColsAuto(sortedUniqTasks, term_width=term_width, min_pad=2) print(tlines)
Print a message listing TEAL-enabled tasks available under a given installation directory (where pkgName resides). If always is True, this will always print when tasks are found; otherwise it will only print found tasks when in interactive mode. The parameter 'hidden' supports a list of input tasknames that should not be reported even though they still exist.
entailment
def getHelpFileAsString(taskname,taskpath): """ This functions will return useful help as a string read from a file in the task's installed directory called "<module>.help". If no such file can be found, it will simply return an empty string. Notes ----- The location of the actual help file will be found under the task's installed directory using 'irafutils.rglob' to search all sub-dirs to find the file. This allows the help file to be either in the tasks installed directory or in any sub-directory, such as a "help/" directory. Parameters ---------- taskname: string Value of `__taskname__` for a module/task taskpath: string Value of `__file__` for an installed module which defines the task Returns ------- helpString: string multi-line string read from the file '<taskname>.help' """ #get the local library directory where the code is stored pathsplit=os.path.split(taskpath) # taskpath should be task's __file__ if taskname.find('.') > -1: # if taskname is given as package.taskname... helpname=taskname.split(".")[1] # taskname should be __taskname__ from task's module else: helpname = taskname localdir = pathsplit[0] if localdir == '': localdir = '.' helpfile=rglob(localdir,helpname+".help")[0] if os.access(helpfile,os.R_OK): fh=open(helpfile,'r') ss=fh.readlines() fh.close() helpString="" for line in ss: helpString+=line else: helpString= '' return helpString
This functions will return useful help as a string read from a file in the task's installed directory called "<module>.help". If no such file can be found, it will simply return an empty string. Notes ----- The location of the actual help file will be found under the task's installed directory using 'irafutils.rglob' to search all sub-dirs to find the file. This allows the help file to be either in the tasks installed directory or in any sub-directory, such as a "help/" directory. Parameters ---------- taskname: string Value of `__taskname__` for a module/task taskpath: string Value of `__file__` for an installed module which defines the task Returns ------- helpString: string multi-line string read from the file '<taskname>.help'
entailment
def cfgGetBool(theObj, name, dflt): """ Get a stringified val from a ConfigObj obj and return it as bool """ strval = theObj.get(name, None) if strval is None: return dflt return strval.lower().strip() == 'true'
Get a stringified val from a ConfigObj obj and return it as bool
entailment