sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def _overrideMasterSettings(self):
""" Override so that we can run in a different mode. """
# config-obj dict of defaults
cod = self._getGuiSettings()
# our own GUI setup
self._appName = APP_NAME
self._appHelpString = tealHelpString
self._useSimpleAutoClose = self._do_usac
self._showExtraHelpButton = False
self._saveAndCloseOnExec = cfgGetBool(cod, 'saveAndCloseOnExec', True)
self._showHelpInBrowser = cfgGetBool(cod, 'showHelpInBrowser', False)
self._writeProtectOnSaveAs = cfgGetBool(cod, 'writeProtectOnSaveAsOpt', True)
self._flagNonDefaultVals = cfgGetBool(cod, 'flagNonDefaultVals', None)
self._optFile = APP_NAME.lower()+".optionDB"
# our own colors
# prmdrss teal: #00ffaa, pure cyan (teal) #00ffff (darker) #008080
# "#aaaaee" is a darker but good blue, but "#bbbbff" pops
ltblu = "#ccccff" # light blue
drktl = "#008888" # darkish teal
self._frmeColor = cod.get('frameColor', drktl)
self._taskColor = cod.get('taskBoxColor', ltblu)
self._bboxColor = cod.get('buttonBoxColor', ltblu)
self._entsColor = cod.get('entriesColor', ltblu)
self._flagColor = cod.get('flaggedColor', 'brown')
# double check _canExecute, but only if it is still set to the default
if self._canExecute and self._taskParsObj: # default _canExecute=True
self._canExecute = self._taskParsObj.canExecute()
self._showExecuteButton = self._canExecute
# check on the help string - just to see if it is HTML
# (could use HTMLParser here if need be, be quick and simple tho)
hhh = self.getHelpString(self.pkgName+'.'+self.taskName)
if hhh:
hhh = hhh.lower()
if hhh.find('<html') >= 0 or hhh.find('</html>') > 0:
self._knowTaskHelpIsHtml = True
elif hhh.startswith('http:') or hhh.startswith('https:'):
self._knowTaskHelpIsHtml = True
elif hhh.startswith('file:') and \
(hhh.endswith('.htm') or hhh.endswith('.html')):
self._knowTaskHelpIsHtml = True
|
Override so that we can run in a different mode.
|
entailment
|
def _doActualSave(self, fname, comment, set_ro=False, overwriteRO=False):
""" Override this so we can handle case of file not writable, as
well as to make our _lastSavedState copy. """
self.debug('Saving, file name given: '+str(fname)+', set_ro: '+\
str(set_ro)+', overwriteRO: '+str(overwriteRO))
cantWrite = False
inInstArea = False
if fname in (None, ''): fname = self._taskParsObj.getFilename()
# now do some final checks then save
try:
if _isInstalled(fname): # check: may be installed but not read-only
inInstArea = cantWrite = True
else:
# in case of save-as, allow overwrite of read-only file
if overwriteRO and os.path.exists(fname):
setWritePrivs(fname, True, True) # try make writable
# do the save
rv=self._taskParsObj.saveParList(filename=fname,comment=comment)
except IOError:
cantWrite = True
# User does not have privs to write to this file. Get name of local
# choice and try to use that.
if cantWrite:
fname = self._taskParsObj.getDefaultSaveFilename()
# Tell them the context is changing, and where we are saving
msg = 'Read-only config file for task "'
if inInstArea:
msg = 'Installed config file for task "'
msg += self._taskParsObj.getName()+'" is not to be overwritten.'+\
' Values will be saved to: \n\n\t"'+fname+'".'
showwarning(message=msg, title="Will not overwrite!")
# Try saving to their local copy
rv=self._taskParsObj.saveParList(filename=fname, comment=comment)
# Treat like a save-as (update title for ALL save ops)
self._saveAsPostSave_Hook(fname)
# Limit write privs if requested (only if not in the rc dir)
if set_ro and os.path.dirname(os.path.abspath(fname)) != \
os.path.abspath(self._rcDir):
cfgpars.checkSetReadOnly(fname)
# Before returning, make a copy so we know what was last saved.
# The dict() method returns a deep-copy dict of the keyvals.
self._lastSavedState = self._taskParsObj.dict()
return rv
|
Override this so we can handle case of file not writable, as
well as to make our _lastSavedState copy.
|
entailment
|
def hasUnsavedChanges(self):
""" Determine if there are any edits in the GUI that have not yet been
saved (e.g. to a file). """
# Sanity check - this case shouldn't occur
assert self._lastSavedState is not None, \
"BUG: Please report this as it should never occur."
# Force the current GUI values into our model in memory, but don't
# change anything. Don't save to file, don't even convert bad
# values to their previous state in the gui. Note that this can
# leave the GUI in a half-saved state, but since we are about to exit
# this is OK. We only want prompting to occur if they decide to save.
badList = self.checkSetSaveEntries(doSave=False, fleeOnBadVals=True,
allowGuiChanges=False)
if badList:
return True
# Then compare our data to the last known saved state. MAKE SURE
# the LHS is the actual dict (and not 'self') to invoke the dict
# comparison only.
return self._lastSavedState != self._taskParsObj
|
Determine if there are any edits in the GUI that have not yet been
saved (e.g. to a file).
|
entailment
|
def _defineEditedCallbackObjectFor(self, parScope, parName):
""" Override to allow us to use an edited callback. """
# We know that the _taskParsObj is a ConfigObjPars
triggerStrs = self._taskParsObj.getTriggerStrings(parScope, parName)
# Some items will have a trigger, but likely most won't
if triggerStrs and len(triggerStrs) > 0:
return self
else:
return None
|
Override to allow us to use an edited callback.
|
entailment
|
def updateTitle(self, atitle):
""" Override so we can append read-only status. """
if atitle and os.path.exists(atitle):
if _isInstalled(atitle):
atitle += ' [installed]'
elif not os.access(atitle, os.W_OK):
atitle += ' [read only]'
super(ConfigObjEparDialog, self).updateTitle(atitle)
|
Override so we can append read-only status.
|
entailment
|
def edited(self, scope, name, lastSavedVal, newVal, action):
""" This is the callback function invoked when an item is edited.
This is only called for those items which were previously
specified to use this mechanism. We do not turn this on for
all items because the performance might be prohibitive.
This kicks off any previously registered triggers. """
# Get name(s) of any triggers that this par triggers
triggerNamesTup = self._taskParsObj.getTriggerStrings(scope, name)
assert triggerNamesTup is not None and len(triggerNamesTup) > 0, \
'Empty trigger name for: "'+name+'", consult the .cfgspc file.'
# Loop through all trigger names - each one is a trigger to kick off -
# in the order that they appear in the tuple we got. Most cases will
# probably only have a single trigger in the tuple.
for triggerName in triggerNamesTup:
# First handle the known/canned trigger names
# print (scope, name, newVal, action, triggerName) # DBG: debug line
# _section_switch_
if triggerName == '_section_switch_':
# Try to uniformly handle all possible par types here, not
# just boolean (e.g. str, int, float, etc.)
# Also, see logic in _BooleanMixin._coerceOneValue()
state = newVal not in self.FALSEVALS
self._toggleSectionActiveState(scope, state, (name,))
continue
# _2_section_switch_ (see notes above in _section_switch_)
if triggerName == '_2_section_switch_':
state = newVal not in self.FALSEVALS
# toggle most of 1st section (as usual) and ALL of next section
self._toggleSectionActiveState(scope, state, (name,))
# get first par of next section (fpons) - is a tuple
fpons = self.findNextSection(scope, name)
nextSectScope = fpons[0]
if nextSectScope:
self._toggleSectionActiveState(nextSectScope, state, None)
continue
# Now handle rules with embedded code (eg. triggerName=='_rule1_')
if '_RULES_' in self._taskParsObj and \
triggerName in self._taskParsObj['_RULES_'].configspec:
# Get codeStr to execute it, but before we do so, check 'when' -
# make sure this is an action that is allowed to cause a trigger
ruleSig = self._taskParsObj['_RULES_'].configspec[triggerName]
chkArgsDict = vtor_checks.sigStrToKwArgsDict(ruleSig)
codeStr = chkArgsDict.get('code') # or None if didn't specify
when2run = chkArgsDict.get('when') # or None if didn't specify
greenlight = False # do we have a green light to eval the rule?
if when2run is None:
greenlight = True # means run rule for any possible action
else: # 'when' was set to something so we need to check action
# check value of action (poor man's enum)
assert action in editpar.GROUP_ACTIONS, \
"Unknown action: "+str(action)+', expected one of: '+ \
str(editpar.GROUP_ACTIONS)
# check value of 'when' (allow them to use comma-sep'd str)
# (readers be aware that values must be those possible for
# 'action', and 'always' is also allowed)
whenlist = when2run.split(',')
# warn for invalid values
for w in whenlist:
if not w in editpar.GROUP_ACTIONS and w != 'always':
print('WARNING: skipping bad value for when kwd: "'+\
w+'" in trigger/rule: '+triggerName)
# finally, do the correlation
greenlight = 'always' in whenlist or action in whenlist
# SECURITY NOTE: because this part executes arbitrary code, that
# code string must always be found only in the configspec file,
# which is intended to only ever be root-installed w/ the pkg.
if codeStr:
if not greenlight:
continue # not an error, just skip this one
self.showStatus("Evaluating "+triggerName+' ...') #dont keep
self.top.update_idletasks() #allow msg to draw prior to exec
# execute it and retrieve the outcome
try:
outval = execEmbCode(scope, name, newVal, self, codeStr)
except Exception as ex:
outval = 'ERROR in '+triggerName+': '+str(ex)
print(outval)
msg = outval+':\n'+('-'*99)+'\n'+traceback.format_exc()
msg += 'CODE: '+codeStr+'\n'+'-'*99+'\n'
self.debug(msg)
self.showStatus(outval, keep=1)
# Leave this debug line in until it annoys someone
msg = 'Value of "'+name+'" triggered "'+triggerName+'"'
stroutval = str(outval)
if len(stroutval) < 30: msg += ' --> "'+stroutval+'"'
self.showStatus(msg, keep=0)
# Now that we have triggerName evaluated to outval, we need
# to look through all the parameters and see if there are
# any items to be affected by triggerName (e.g. '_rule1_')
self._applyTriggerValue(triggerName, outval)
continue
# If we get here, we have an unknown/unusable trigger
raise RuntimeError('Unknown trigger for: "'+name+'", named: "'+ \
str(triggerName)+'". Please consult the .cfgspc file.')
|
This is the callback function invoked when an item is edited.
This is only called for those items which were previously
specified to use this mechanism. We do not turn this on for
all items because the performance might be prohibitive.
This kicks off any previously registered triggers.
|
entailment
|
def findNextSection(self, scope, name):
""" Starts with given par (scope+name) and looks further down the list
of parameters until one of a different non-null scope is found. Upon
success, returns the (scope, name) tuple, otherwise (None, None). """
# first find index of starting point
plist = self._taskParsObj.getParList()
start = 0
for i in range(len(plist)):
if scope == plist[i].scope and name == plist[i].name:
start = i
break
else:
print('WARNING: could not find starting par: '+scope+'.'+name)
return (None, None)
# now find first different (non-null) scope in a par, after start
for i in range(start, len(plist)):
if len(plist[i].scope) > 0 and plist[i].scope != scope:
return (plist[i].scope, plist[i].name)
# else didn't find it
return (None, None)
|
Starts with given par (scope+name) and looks further down the list
of parameters until one of a different non-null scope is found. Upon
success, returns the (scope, name) tuple, otherwise (None, None).
|
entailment
|
def _setTaskParsObj(self, theTask):
""" Overridden version for ConfigObj. theTask can be either
a .cfg file name or a ConfigObjPars object. """
# Create the ConfigObjPars obj
self._taskParsObj = cfgpars.getObjectFromTaskArg(theTask,
self._strict, False)
# Tell it that we can be used for catching debug lines
self._taskParsObj.setDebugLogger(self)
# Immediately make a copy of it's un-tampered internal dict.
# The dict() method returns a deep-copy dict of the keyvals.
self._lastSavedState = self._taskParsObj.dict()
|
Overridden version for ConfigObj. theTask can be either
a .cfg file name or a ConfigObjPars object.
|
entailment
|
def _getSaveAsFilter(self):
""" Return a string to be used as the filter arg to the save file
dialog during Save-As. """
# figure the dir to use, start with the one from the file
absRcDir = os.path.abspath(self._rcDir)
thedir = os.path.abspath(os.path.dirname(self._taskParsObj.filename))
# skip if not writeable, or if is _rcDir
if thedir == absRcDir or not os.access(thedir, os.W_OK):
thedir = os.path.abspath(os.path.curdir)
# create save-as filter string
filt = thedir+'/*.cfg'
envVarName = APP_NAME.upper()+'_CFG'
if envVarName in os.environ:
upx = os.environ[envVarName]
if len(upx) > 0: filt = upx+"/*.cfg"
# done
return filt
|
Return a string to be used as the filter arg to the save file
dialog during Save-As.
|
entailment
|
def _getOpenChoices(self):
""" Go through all possible sites to find applicable .cfg files.
Return as an iterable. """
tsk = self._taskParsObj.getName()
taskFiles = set()
dirsSoFar = [] # this helps speed this up (skip unneeded globs)
# last dir
aDir = os.path.dirname(self._taskParsObj.filename)
if len(aDir) < 1: aDir = os.curdir
dirsSoFar.append(aDir)
taskFiles.update(cfgpars.getCfgFilesInDirForTask(aDir, tsk))
# current dir
aDir = os.getcwd()
if aDir not in dirsSoFar:
dirsSoFar.append(aDir)
taskFiles.update(cfgpars.getCfgFilesInDirForTask(aDir, tsk))
# task's python pkg dir (if tsk == python pkg name)
try:
x, pkgf = cfgpars.findCfgFileForPkg(tsk, '.cfg', taskName=tsk,
pkgObj=self._taskParsObj.getAssocPkg())
taskFiles.update( (pkgf,) )
except cfgpars.NoCfgFileError:
pass # no big deal - maybe there is no python package
# user's own resourceDir
aDir = self._rcDir
if aDir not in dirsSoFar:
dirsSoFar.append(aDir)
taskFiles.update(cfgpars.getCfgFilesInDirForTask(aDir, tsk))
# extra loc - see if they used the app's env. var
aDir = dirsSoFar[0] # flag to skip this if no env var found
envVarName = APP_NAME.upper()+'_CFG'
if envVarName in os.environ: aDir = os.environ[envVarName]
if aDir not in dirsSoFar:
dirsSoFar.append(aDir)
taskFiles.update(cfgpars.getCfgFilesInDirForTask(aDir, tsk))
# At the very end, add an option which we will later interpret to mean
# to open the file dialog.
taskFiles = list(taskFiles) # so as to keep next item at end of seq
taskFiles.sort()
taskFiles.append("Other ...")
return taskFiles
|
Go through all possible sites to find applicable .cfg files.
Return as an iterable.
|
entailment
|
def pfopen(self, event=None):
""" Load the parameter settings from a user-specified file. """
# Get the selected file name
fname = self._openMenuChoice.get()
# Also allow them to simply find any file - do not check _task_name_...
# (could use tkinter's FileDialog, but this one is prettier)
if fname[-3:] == '...':
if capable.OF_TKFD_IN_EPAR:
fname = askopenfilename(title="Load Config File",
parent=self.top)
else:
from . import filedlg
fd = filedlg.PersistLoadFileDialog(self.top,
"Load Config File",
self._getSaveAsFilter())
if fd.Show() != 1:
fd.DialogCleanup()
return
fname = fd.GetFileName()
fd.DialogCleanup()
if not fname: return # canceled
self.debug('Loading from: '+fname)
# load it into a tmp object (use associatedPkg if we have one)
try:
tmpObj = cfgpars.ConfigObjPars(fname, associatedPkg=\
self._taskParsObj.getAssocPkg(),
strict=self._strict)
except Exception as ex:
showerror(message=ex.message, title='Error in '+os.path.basename(fname))
self.debug('Error in '+os.path.basename(fname))
self.debug(traceback.format_exc())
return
# check it to make sure it is a match
if not self._taskParsObj.isSameTaskAs(tmpObj):
msg = 'The current task is "'+self._taskParsObj.getName()+ \
'", but the selected file is for task "'+ \
str(tmpObj.getName())+'". This file was not loaded.'
showerror(message=msg, title="Error in "+os.path.basename(fname))
self.debug(msg)
self.debug(traceback.format_exc())
return
# Set the GUI entries to these values (let the user Save after)
newParList = tmpObj.getParList()
try:
self.setAllEntriesFromParList(newParList, updateModel=True)
# go ahead and updateModel, even though it will take longer,
# we need it updated for the copy of the dict we make below
except editpar.UnfoundParamError as pe:
showwarning(message=str(pe), title="Error in "+os.path.basename(fname))
# trip any triggers
self.checkAllTriggers('fopen')
# This new fname is our current context
self.updateTitle(fname)
self._taskParsObj.filename = fname # !! maybe try setCurrentContext() ?
self.freshenFocus()
self.showStatus("Loaded values from: "+fname, keep=2)
# Since we are in a new context (and have made no changes yet), make
# a copy so we know what the last state was.
# The dict() method returns a deep-copy dict of the keyvals.
self._lastSavedState = self._taskParsObj.dict()
|
Load the parameter settings from a user-specified file.
|
entailment
|
def _handleParListMismatch(self, probStr, extra=False):
""" Override to include ConfigObj filename and specific errors.
Note that this only handles "missing" pars and "extra" pars, not
wrong-type pars. So it isn't that big of a deal. """
# keep down the duplicate errors
if extra:
return True # the base class is already stating it will be ignored
# find the actual errors, and then add that to the generic message
errmsg = 'Warning: '
if self._strict:
errmsg = 'ERROR: '
errmsg = errmsg+'mismatch between default and current par lists ' + \
'for task "'+self.taskName+'".'
if probStr:
errmsg += '\n\t'+probStr
errmsg += '\nTry editing/deleting: "' + \
self._taskParsObj.filename+'" (or, if in PyRAF: "unlearn ' + \
self.taskName+'").'
print(errmsg)
return True
|
Override to include ConfigObj filename and specific errors.
Note that this only handles "missing" pars and "extra" pars, not
wrong-type pars. So it isn't that big of a deal.
|
entailment
|
def _setToDefaults(self):
""" Load the default parameter settings into the GUI. """
# Create an empty object, where every item is set to it's default value
try:
tmpObj = cfgpars.ConfigObjPars(self._taskParsObj.filename,
associatedPkg=\
self._taskParsObj.getAssocPkg(),
setAllToDefaults=self.taskName,
strict=False)
except Exception as ex:
msg = "Error Determining Defaults"
showerror(message=msg+'\n\n'+ex.message, title="Error Determining Defaults")
return
# Set the GUI entries to these values (let the user Save after)
tmpObj.filename = self._taskParsObj.filename = '' # name it later
newParList = tmpObj.getParList()
try:
self.setAllEntriesFromParList(newParList) # needn't updateModel yet
self.checkAllTriggers('defaults')
self.updateTitle('')
self.showStatus("Loaded default "+self.taskName+" values via: "+ \
os.path.basename(tmpObj._original_configspec), keep=1)
except editpar.UnfoundParamError as pe:
showerror(message=str(pe), title="Error Setting to Default Values")
|
Load the default parameter settings into the GUI.
|
entailment
|
def getDict(self):
""" Retrieve the current parameter settings from the GUI."""
# We are going to have to return the dict so let's
# first make sure all of our models are up to date with the values in
# the GUI right now.
badList = self.checkSetSaveEntries(doSave=False)
if badList:
self.processBadEntries(badList, self.taskName, canCancel=False)
return self._taskParsObj.dict()
|
Retrieve the current parameter settings from the GUI.
|
entailment
|
def loadDict(self, theDict):
""" Load the parameter settings from a given dict into the GUI. """
# We are going to have to merge this info into ourselves so let's
# first make sure all of our models are up to date with the values in
# the GUI right now.
badList = self.checkSetSaveEntries(doSave=False)
if badList:
if not self.processBadEntries(badList, self.taskName):
return
# now, self._taskParsObj is up-to-date
# So now we update _taskParsObj with the input dict
cfgpars.mergeConfigObj(self._taskParsObj, theDict)
# now sync the _taskParsObj dict with its par list model
# '\n'.join([str(jj) for jj in self._taskParsObj.getParList()])
self._taskParsObj.syncParamList(False)
# Set the GUI entries to these values (let the user Save after)
try:
self.setAllEntriesFromParList(self._taskParsObj.getParList(),
updateModel=True)
self.checkAllTriggers('fopen')
self.freshenFocus()
self.showStatus('Loaded '+str(len(theDict))+ \
' user par values for: '+self.taskName, keep=1)
except Exception as ex:
showerror(message=ex.message, title="Error Setting to Loaded Values")
|
Load the parameter settings from a given dict into the GUI.
|
entailment
|
def _getGuiSettings(self):
""" Return a dict (ConfigObj) of all user settings found in rcFile. """
# Put the settings into a ConfigObj dict (don't use a config-spec)
rcFile = self._rcDir+os.sep+APP_NAME.lower()+'.cfg'
if os.path.exists(rcFile):
try:
return configobj.ConfigObj(rcFile)
except:
raise RuntimeError('Error parsing: '+os.path.realpath(rcFile))
# tho, for simple types, unrepr=True eliminates need for .cfgspc
# also, if we turn unrepr on, we don't need cfgGetBool
else:
return {}
|
Return a dict (ConfigObj) of all user settings found in rcFile.
|
entailment
|
def _saveGuiSettings(self):
""" The base class doesn't implement this, so we will - save settings
(only GUI stuff, not task related) to a file. """
# Put the settings into a ConfigObj dict (don't use a config-spec)
rcFile = self._rcDir+os.sep+APP_NAME.lower()+'.cfg'
#
if os.path.exists(rcFile): os.remove(rcFile)
co = configobj.ConfigObj(rcFile) # can skip try-block, won't read file
co['showHelpInBrowser'] = self._showHelpInBrowser
co['saveAndCloseOnExec'] = self._saveAndCloseOnExec
co['writeProtectOnSaveAsOpt'] = self._writeProtectOnSaveAs
co['flagNonDefaultVals'] = self._flagNonDefaultVals
co['frameColor'] = self._frmeColor
co['taskBoxColor'] = self._taskColor
co['buttonBoxColor'] = self._bboxColor
co['entriesColor'] = self._entsColor
co['flaggedColor'] = self._flagColor
co.initial_comment = ['Automatically generated by '+\
APP_NAME+'. All edits will eventually be overwritten.']
co.initial_comment.append('To use platform default colors, delete each color line below.')
co.final_comment = [''] # ensure \n at EOF
co.write()
|
The base class doesn't implement this, so we will - save settings
(only GUI stuff, not task related) to a file.
|
entailment
|
def _applyTriggerValue(self, triggerName, outval):
""" Here we look through the entire .cfgspc to see if any parameters
are affected by this trigger. For those that are, we apply the action
to the GUI widget. The action is specified by depType. """
# First find which items are dependent upon this trigger (cached)
# e.g. { scope1.name1 : dep'cy-type, scope2.name2 : dep'cy-type, ... }
depParsDict = self._taskParsObj.getParsWhoDependOn(triggerName)
if not depParsDict: return
if 0: print("Dependent parameters:\n"+str(depParsDict)+"\n")
# Get model data, the list of pars
theParamList = self._taskParsObj.getParList()
# Then go through the dependent pars and apply the trigger to them
settingMsg = ''
for absName in depParsDict:
used = False
# For each dep par, loop to find the widget for that scope.name
for i in range(self.numParams):
scopedName = theParamList[i].scope+'.'+theParamList[i].name # diff from makeFullName!!
if absName == scopedName: # a match was found
depType = depParsDict[absName]
if depType == 'active_if':
self.entryNo[i].setActiveState(outval)
elif depType == 'inactive_if':
self.entryNo[i].setActiveState(not outval)
elif depType == 'is_set_by':
self.entryNo[i].forceValue(outval, noteEdited=True)
# WARNING! noteEdited=True may start recursion!
if len(settingMsg) > 0: settingMsg += ", "
settingMsg += '"'+theParamList[i].name+'" to "'+\
outval+'"'
elif depType in ('set_yes_if', 'set_no_if'):
if bool(outval):
newval = 'yes'
if depType == 'set_no_if': newval = 'no'
self.entryNo[i].forceValue(newval, noteEdited=True)
# WARNING! noteEdited=True may start recursion!
if len(settingMsg) > 0: settingMsg += ", "
settingMsg += '"'+theParamList[i].name+'" to "'+\
newval+'"'
else:
if len(settingMsg) > 0: settingMsg += ", "
settingMsg += '"'+theParamList[i].name+\
'" (no change)'
elif depType == 'is_disabled_by':
# this one is only used with boolean types
on = self.entryNo[i].convertToNative(outval)
if on:
# do not activate whole section or change
# any values, only activate this one
self.entryNo[i].setActiveState(True)
else:
# for off, set the bool par AND grey WHOLE section
self.entryNo[i].forceValue(outval, noteEdited=True)
self.entryNo[i].setActiveState(False)
# we'd need this if the par had no _section_switch_
# self._toggleSectionActiveState(
# theParamList[i].scope, False, None)
if len(settingMsg) > 0: settingMsg += ", "
settingMsg += '"'+theParamList[i].name+'" to "'+\
outval+'"'
else:
raise RuntimeError('Unknown dependency: "'+depType+ \
'" for par: "'+scopedName+'"')
used = True
break
# Or maybe it is a whole section
if absName.endswith('._section_'):
scope = absName[:-10]
depType = depParsDict[absName]
if depType == 'active_if':
self._toggleSectionActiveState(scope, outval, None)
elif depType == 'inactive_if':
self._toggleSectionActiveState(scope, not outval, None)
used = True
# Help to debug the .cfgspc rules
if not used:
raise RuntimeError('UNUSED "'+triggerName+'" dependency: '+ \
str({absName:depParsDict[absName]}))
if len(settingMsg) > 0:
# why ?! self.freshenFocus()
self.showStatus('Automatically set '+settingMsg, keep=1)
|
Here we look through the entire .cfgspc to see if any parameters
are affected by this trigger. For those that are, we apply the action
to the GUI widget. The action is specified by depType.
|
entailment
|
def readASNTable(fname, output=None, prodonly=False):
"""
Given a fits filename repesenting an association table reads in the table as a
dictionary which can be used by pydrizzle and multidrizzle.
An association table is a FITS binary table with 2 required columns: 'MEMNAME',
'MEMTYPE'. It checks 'MEMPRSNT' column and removes all files for which its value is 'no'.
Parameters
----------
fname : str
name of association table
output : str
name of output product - if not specified by the user,
the first PROD-DTH name is used if present,
if not, the first PROD-RPT name is used if present,
if not, the rootname of the input association table is used.
prodonly : bool
what files should be considered as input
if True - select only MEMTYPE=PROD* as input
if False - select only MEMTYPE=EXP as input
Returns
-------
asndict : dict
A dictionary-like object with all the association information.
Examples
--------
An association table can be read from a file using the following commands::
>>> from stsci.tools import asnutil
>>> asntab = asnutil.readASNTable('j8bt06010_shifts_asn.fits', prodonly=False) # doctest: +SKIP
The `asntab` object can now be passed to other code to provide relationships
between input and output images defined by the association table.
"""
try:
f = fits.open(fu.osfn(fname))
except:
raise IOError("Can't open file %s\n" % fname)
colnames = f[1].data.names
try:
colunits = f[1].data.units
except AttributeError: pass
hdr = f[0].header
if 'MEMNAME' not in colnames or 'MEMTYPE' not in colnames:
msg = 'Association table incomplete: required column(s) MEMNAME/MEMTYPE NOT found!'
raise ValueError(msg)
d = {}
for n in colnames:
d[n]=f[1].data.field(n)
f.close()
valid_input = d['MEMPRSNT'].copy()
memtype = d['MEMTYPE'].copy()
prod_dth = (memtype.find('PROD-DTH')==0).nonzero()[0]
prod_rpt = (memtype.find('PROD-RPT')==0).nonzero()[0]
prod_crj = (memtype.find('PROD-CRJ')==0).nonzero()[0]
# set output name
if output is None:
if prod_dth:
output = d['MEMNAME'][prod_dth[0]]
elif prod_rpt:
output = d['MEMNAME'][prod_rpt[0]]
elif prod_crj:
output = d['MEMNAME'][prod_crj[0]]
else:
output = fname.split('_')[0]
if prodonly:
input = d['MEMTYPE'].find('PROD')==0
if prod_dth:
input[prod_dth] = False
else:
input = (d['MEMTYPE'].find('EXP')==0)
valid_input *= input
for k in d:
d[k] = d[k][valid_input]
infiles = list(d['MEMNAME'].lower())
if not infiles:
print("No valid input specified")
return None
if ('XOFFSET' in colnames and d['XOFFSET'].any()) or ('YOFFSET' in colnames and d['YOFFSET'].any()):
abshift = True
dshift = False
try:
units=colunits[colnames.index('XOFFSET')]
except: units='pixels'
xshifts = list(d['XOFFSET'])
yshifts = list(d['YOFFSET'])
elif ('XDELTA' in colnames and d['XDELTA'].any()) or ('YDELTA' in colnames and d['YDELTA'].any()):
abshift = False
dshift = True
try:
units=colunits[colnames.index('XDELTA')]
except: units='pixels'
xshifts = list(d['XDELTA'])
yshifts = list(d['YDELTA'])
else:
abshift = False
dshift = False
members = {}
if not abshift and not dshift:
asndict = ASNTable(infiles,output=output)
asndict.create()
return asndict
else:
try:
refimage = hdr['refimage']
except KeyError: refimage = None
try:
frame = hdr['shframe']
except KeyError: frame = 'input'
if 'ROTATION' in colnames:
rots = list(d['ROTATION'])
if 'SCALE' in colnames:
scales = list(d['SCALE'])
for r in range(len(infiles)):
row = r
xshift = xshifts[r]
yshift = yshifts[r]
if rots: rot = rots[r]
if scales: scale = scales[r]
members[infiles[r]] = ASNMember(row=row, dshift=dshift, abshift=abshift, rot=rot, xshift=xshift,
yshift=yshift, scale=scale, refimage=refimage, shift_frame=frame,
shift_units=units)
asndict= ASNTable(infiles, output=output)
asndict.create()
asndict['members'].update(members)
return asndict
|
Given a fits filename repesenting an association table reads in the table as a
dictionary which can be used by pydrizzle and multidrizzle.
An association table is a FITS binary table with 2 required columns: 'MEMNAME',
'MEMTYPE'. It checks 'MEMPRSNT' column and removes all files for which its value is 'no'.
Parameters
----------
fname : str
name of association table
output : str
name of output product - if not specified by the user,
the first PROD-DTH name is used if present,
if not, the first PROD-RPT name is used if present,
if not, the rootname of the input association table is used.
prodonly : bool
what files should be considered as input
if True - select only MEMTYPE=PROD* as input
if False - select only MEMTYPE=EXP as input
Returns
-------
asndict : dict
A dictionary-like object with all the association information.
Examples
--------
An association table can be read from a file using the following commands::
>>> from stsci.tools import asnutil
>>> asntab = asnutil.readASNTable('j8bt06010_shifts_asn.fits', prodonly=False) # doctest: +SKIP
The `asntab` object can now be passed to other code to provide relationships
between input and output images defined by the association table.
|
entailment
|
def write(self, output=None):
"""
Write association table to a file.
"""
if not output:
outfile = self['output']+'_asn.fits'
output = self['output']
else:
outfile = output
# Delete the file if it exists.
if os.path.exists(outfile):
warningmsg = "\n#########################################\n"
warningmsg += "# #\n"
warningmsg += "# WARNING: #\n"
warningmsg += "# The existing association table, #\n"
warningmsg += " " + str(outfile) + '\n'
warningmsg += "# is being replaced. #\n"
warningmsg += "# #\n"
warningmsg += "#########################################\n\n"
fasn = fits.HDUList()
# Compute maximum length of MEMNAME for table column definition
_maxlen = 0
for _fname in self['order']:
if len(_fname) > _maxlen: _maxlen = len(_fname)
# Enforce a mimimum size of 24
if _maxlen < 24: _maxlen = 24
namelen_str = str(_maxlen+2)+'A'
self.buildPrimary(fasn, output=output)
mname = self['order'][:]
mname.append(output)
mtype = ['EXP-DTH' for l in self['order']]
mtype.append('PROD-DTH')
mprsn = [True for l in self['order']]
mprsn.append(False)
xoff = [self['members'][l]['xoff'] for l in self['order']]
xoff.append(0.0)
yoff = [self['members'][l]['yoff'] for l in self['order']]
yoff.append(0.0)
xsh = [self['members'][l]['xshift'] for l in self['order']]
xsh.append(0.0)
ysh = [self['members'][l]['yshift'] for l in self['order']]
ysh.append(0.0)
rot = [self['members'][l]['rot'] for l in self['order']]
rot.append(0.0)
scl = [self['members'][l]['scale'] for l in self['order']]
scl.append(1.0)
memname = fits.Column(name='MEMNAME',format=namelen_str,array=N.char.array(mname))
memtype = fits.Column(name='MEMTYPE',format='14A',array=N.char.array(mtype))
memprsn = fits.Column(name='MEMPRSNT', format='L', array=N.array(mprsn).astype(N.uint8))
xoffset = fits.Column(name='XOFFSET', format='E', array=N.array(xoff))
yoffset = fits.Column(name='YOFFSET', format='E', array=N.array(yoff))
xdelta = fits.Column(name='XDELTA', format='E', array=N.array(xsh))
ydelta = fits.Column(name='YDELTA', format='E', array=N.array(ysh))
rotation = fits.Column(name='ROTATION', format='E', array=N.array(rot))
scale = fits.Column(name='SCALE', format='E', array=N.array(scl))
cols = fits.ColDefs([memname,memtype,memprsn,xoffset,yoffset,xdelta,ydelta,rotation,scale])
hdu = fits.BinTableHDU.from_columns(cols)
fasn.append(hdu)
if ASTROPY_VER_GE13:
fasn.writeto(outfile, overwrite=True)
else:
fasn.writeto(outfile, clobber=True)
fasn.close()
mem0 = self['order'][0]
refimg = self['members'][mem0]['refimage']
if refimg is not None:
whdu = wcsutil.WCSObject(refimg)
whdu.createReferenceWCS(outfile,overwrite=False)
ftab = fits.open(outfile)
ftab['primary'].header['refimage'] = outfile+"[wcs]"
ftab.close()
del whdu
|
Write association table to a file.
|
entailment
|
def readShiftFile(self, filename):
"""
Reads a shift file from disk and populates a dictionary.
"""
order = []
fshift = open(filename,'r')
flines = fshift.readlines()
fshift.close()
common = [f.strip('#').strip() for f in flines if f.startswith('#')]
c=[line.split(': ') for line in common]
# Remove any line comments in the shift file - lines starting with '#'
# but not part of the common block.
for l in c:
if l[0] not in ['frame', 'refimage', 'form', 'units']:
c.remove(l)
for line in c: line[1]=line[1].strip()
self.update(c)
files = [f.strip().split(' ',1) for f in flines if not (f.startswith('#') or f.strip() == '')]
for f in files:
order.append(f[0])
self['order'] = order
for f in files:
# Check to see if filename provided is a full filename that corresponds
# to a file on the path. If not, try to convert given rootname into
# a valid filename based on available files. This may or may not
# define the correct filename, which is why it prints out what it is
# doing, so that the user can verify and edit the shiftfile if needed.
#NOTE:
# Supporting the specification of only rootnames in the shiftfile with this
# filename expansion is NOT to be documented, but provided solely as
# an undocumented, dangerous and not fully supported helper function for
# some backwards compatibility.
if not os.path.exists(f[0]):
f[0] = fu.buildRootname(f[0])
print('Defining filename in shiftfile as: ', f[0])
f[1] = f[1].split()
try:
f[1] = [float(s) for s in f[1]]
except:
msg = 'Cannot read in ', s, ' from shiftfile ', filename, ' as a float number'
raise ValueError(msg)
msg = "At least 2 and at most 4 shift values should be provided in a shiftfile"
if len(f[1]) < 2:
raise ValueError(msg)
elif len(f[1]) == 3:
f[1].append(1.0)
elif len(f[1]) == 2:
f[1].extend([0.0, 1.0])
elif len(f[1]) > 4:
raise ValueError(msg)
fdict = dict(files)
self.update(fdict)
|
Reads a shift file from disk and populates a dictionary.
|
entailment
|
def writeShiftFile(self, filename="shifts.txt"):
"""
Writes a shift file object to a file on disk using the convention for shift file format.
"""
lines = ['# frame: ', self['frame'], '\n',
'# refimage: ', self['refimage'], '\n',
'# form: ', self['form'], '\n',
'# units: ', self['units'], '\n']
for o in self['order']:
ss = " "
for shift in self[o]:
ss += str(shift) + " "
line = str(o) + ss + "\n"
lines.append(line)
fshifts= open(filename, 'w')
fshifts.writelines(lines)
fshifts.close()
|
Writes a shift file object to a file on disk using the convention for shift file format.
|
entailment
|
def weave(target, aspects, **options):
"""
Send a message to a recipient
Args:
target (string, class, instance, function or builtin):
The object to weave.
aspects (:py:obj:`aspectlib.Aspect`, function decorator or list of):
The aspects to apply to the object.
subclasses (bool):
If ``True``, subclasses of target are weaved. *Only available for classes*
aliases (bool):
If ``True``, aliases of target are replaced.
lazy (bool):
If ``True`` only target's ``__init__`` method is patched, the rest of the methods are patched after
``__init__`` is called. *Only available for classes*.
methods (list or regex or string):
Methods from target to patch. *Only available for classes*
Returns:
aspectlib.Rollback: An object that can rollback the patches.
Raises:
TypeError: If target is a unacceptable object, or the specified options are not available for that type of
object.
.. versionchanged:: 0.4.0
Replaced `only_methods`, `skip_methods`, `skip_magicmethods` options with `methods`.
Renamed `on_init` option to `lazy`.
Added `aliases` option.
Replaced `skip_subclasses` option with `subclasses`.
"""
if not callable(aspects):
if not hasattr(aspects, '__iter__'):
raise ExpectedAdvice('%s must be an `Aspect` instance, a callable or an iterable of.' % aspects)
for obj in aspects:
if not callable(obj):
raise ExpectedAdvice('%s must be an `Aspect` instance or a callable.' % obj)
assert target, "Can't weave falsy value %r." % target
logdebug("weave (target=%s, aspects=%s, **options=%s)", target, aspects, options)
bag = options.setdefault('bag', ObjectBag())
if isinstance(target, (list, tuple)):
return Rollback([
weave(item, aspects, **options) for item in target
])
elif isinstance(target, basestring):
parts = target.split('.')
for part in parts:
_check_name(part)
if len(parts) == 1:
return weave_module(_import_module(part), aspects, **options)
for pos in reversed(range(1, len(parts))):
owner, name = '.'.join(parts[:pos]), '.'.join(parts[pos:])
try:
owner = _import_module(owner)
except ImportError:
continue
else:
break
else:
raise ImportError("Could not import %r. Last try was for %s" % (target, owner))
if '.' in name:
path, name = name.rsplit('.', 1)
path = deque(path.split('.'))
while path:
owner = getattr(owner, path.popleft())
logdebug("@ patching %s from %s ...", name, owner)
obj = getattr(owner, name)
if isinstance(obj, (type, ClassType)):
logdebug(" .. as a class %r.", obj)
return weave_class(
obj, aspects,
owner=owner, name=name, **options
)
elif callable(obj): # or isinstance(obj, FunctionType) ??
logdebug(" .. as a callable %r.", obj)
if bag.has(obj):
return Nothing
return patch_module_function(owner, obj, aspects, force_name=name, **options)
else:
return weave(obj, aspects, **options)
name = getattr(target, '__name__', None)
if name and getattr(__builtin__, name, None) is target:
if bag.has(target):
return Nothing
return patch_module_function(__builtin__, target, aspects, **options)
elif PY3 and ismethod(target):
if bag.has(target):
return Nothing
inst = target.__self__
name = target.__name__
logdebug("@ patching %r (%s) as instance method.", target, name)
func = target.__func__
setattr(inst, name, _checked_apply(aspects, func).__get__(inst, type(inst)))
return Rollback(lambda: delattr(inst, name))
elif PY3 and isfunction(target):
if bag.has(target):
return Nothing
owner = _import_module(target.__module__)
path = deque(target.__qualname__.split('.')[:-1])
while path:
owner = getattr(owner, path.popleft())
name = target.__name__
logdebug("@ patching %r (%s) as a property.", target, name)
func = owner.__dict__[name]
return patch_module(owner, name, _checked_apply(aspects, func), func, **options)
elif PY2 and isfunction(target):
if bag.has(target):
return Nothing
return patch_module_function(_import_module(target.__module__), target, aspects, **options)
elif PY2 and ismethod(target):
if target.im_self:
if bag.has(target):
return Nothing
inst = target.im_self
name = target.__name__
logdebug("@ patching %r (%s) as instance method.", target, name)
func = target.im_func
setattr(inst, name, _checked_apply(aspects, func).__get__(inst, type(inst)))
return Rollback(lambda: delattr(inst, name))
else:
klass = target.im_class
name = target.__name__
return weave(klass, aspects, methods='%s$' % name, **options)
elif isclass(target):
return weave_class(target, aspects, **options)
elif ismodule(target):
return weave_module(target, aspects, **options)
elif type(target).__module__ not in ('builtins', '__builtin__') or InstanceType and isinstance(target, InstanceType):
return weave_instance(target, aspects, **options)
else:
raise UnsupportedType("Can't weave object %s of type %s" % (target, type(target)))
|
Send a message to a recipient
Args:
target (string, class, instance, function or builtin):
The object to weave.
aspects (:py:obj:`aspectlib.Aspect`, function decorator or list of):
The aspects to apply to the object.
subclasses (bool):
If ``True``, subclasses of target are weaved. *Only available for classes*
aliases (bool):
If ``True``, aliases of target are replaced.
lazy (bool):
If ``True`` only target's ``__init__`` method is patched, the rest of the methods are patched after
``__init__`` is called. *Only available for classes*.
methods (list or regex or string):
Methods from target to patch. *Only available for classes*
Returns:
aspectlib.Rollback: An object that can rollback the patches.
Raises:
TypeError: If target is a unacceptable object, or the specified options are not available for that type of
object.
.. versionchanged:: 0.4.0
Replaced `only_methods`, `skip_methods`, `skip_magicmethods` options with `methods`.
Renamed `on_init` option to `lazy`.
Added `aliases` option.
Replaced `skip_subclasses` option with `subclasses`.
|
entailment
|
def weave_instance(instance, aspect, methods=NORMAL_METHODS, lazy=False, bag=BrokenBag, **options):
"""
Low-level weaver for instances.
.. warning:: You should not use this directly.
:returns: An :obj:`aspectlib.Rollback` object.
"""
if bag.has(instance):
return Nothing
entanglement = Rollback()
method_matches = make_method_matcher(methods)
logdebug("weave_instance (module=%r, aspect=%s, methods=%s, lazy=%s, **options=%s)",
instance, aspect, methods, lazy, options)
def fixup(func):
return func.__get__(instance, type(instance))
fixed_aspect = aspect + [fixup] if isinstance(aspect, (list, tuple)) else [aspect, fixup]
for attr in dir(instance):
func = getattr(instance, attr)
if method_matches(attr):
if ismethod(func):
if hasattr(func, '__func__'):
realfunc = func.__func__
else:
realfunc = func.im_func
entanglement.merge(
patch_module(instance, attr, _checked_apply(fixed_aspect, realfunc, module=None), **options)
)
return entanglement
|
Low-level weaver for instances.
.. warning:: You should not use this directly.
:returns: An :obj:`aspectlib.Rollback` object.
|
entailment
|
def weave_module(module, aspect, methods=NORMAL_METHODS, lazy=False, bag=BrokenBag, **options):
"""
Low-level weaver for "whole module weaving".
.. warning:: You should not use this directly.
:returns: An :obj:`aspectlib.Rollback` object.
"""
if bag.has(module):
return Nothing
entanglement = Rollback()
method_matches = make_method_matcher(methods)
logdebug("weave_module (module=%r, aspect=%s, methods=%s, lazy=%s, **options=%s)",
module, aspect, methods, lazy, options)
for attr in dir(module):
func = getattr(module, attr)
if method_matches(attr):
if isroutine(func):
entanglement.merge(patch_module_function(module, func, aspect, force_name=attr, **options))
elif isclass(func):
entanglement.merge(
weave_class(func, aspect, owner=module, name=attr, methods=methods, lazy=lazy, bag=bag, **options),
# it's not consistent with the other ways of weaving a class (it's never weaved as a routine).
# therefore it's disabled until it's considered useful.
# #patch_module_function(module, getattr(module, attr), aspect, force_name=attr, **options),
)
return entanglement
|
Low-level weaver for "whole module weaving".
.. warning:: You should not use this directly.
:returns: An :obj:`aspectlib.Rollback` object.
|
entailment
|
def weave_class(klass, aspect, methods=NORMAL_METHODS, subclasses=True, lazy=False,
owner=None, name=None, aliases=True, bases=True, bag=BrokenBag):
"""
Low-level weaver for classes.
.. warning:: You should not use this directly.
"""
assert isclass(klass), "Can't weave %r. Must be a class." % klass
if bag.has(klass):
return Nothing
entanglement = Rollback()
method_matches = make_method_matcher(methods)
logdebug("weave_class (klass=%r, methods=%s, subclasses=%s, lazy=%s, owner=%s, name=%s, aliases=%s, bases=%s)",
klass, methods, subclasses, lazy, owner, name, aliases, bases)
if subclasses and hasattr(klass, '__subclasses__'):
sub_targets = klass.__subclasses__()
if sub_targets:
logdebug("~ weaving subclasses: %s", sub_targets)
for sub_class in sub_targets:
if not issubclass(sub_class, Fabric):
entanglement.merge(weave_class(sub_class, aspect,
methods=methods, subclasses=subclasses, lazy=lazy, bag=bag))
if lazy:
def __init__(self, *args, **kwargs):
super(SubClass, self).__init__(*args, **kwargs)
for attr in dir(self):
func = getattr(self, attr, None)
if method_matches(attr) and attr not in wrappers and isroutine(func):
setattr(self, attr, _checked_apply(aspect, force_bind(func)).__get__(self, SubClass))
wrappers = {
'__init__': _checked_apply(aspect, __init__) if method_matches('__init__') else __init__
}
for attr, func in klass.__dict__.items():
if method_matches(attr):
if ismethoddescriptor(func):
wrappers[attr] = _rewrap_method(func, klass, aspect)
logdebug(" * creating subclass with attributes %r", wrappers)
name = name or klass.__name__
SubClass = type(name, (klass, Fabric), wrappers)
SubClass.__module__ = klass.__module__
module = owner or _import_module(klass.__module__)
entanglement.merge(patch_module(module, name, SubClass, original=klass, aliases=aliases))
else:
original = {}
for attr, func in klass.__dict__.items():
if method_matches(attr):
if isroutine(func):
logdebug("@ patching attribute %r (original: %r).", attr, func)
setattr(klass, attr, _rewrap_method(func, klass, aspect))
else:
continue
original[attr] = func
entanglement.merge(lambda: deque((
setattr(klass, attr, func) for attr, func in original.items()
), maxlen=0))
if bases:
super_original = set()
for sklass in _find_super_classes(klass):
if sklass is not object:
for attr, func in sklass.__dict__.items():
if method_matches(attr) and attr not in original and attr not in super_original:
if isroutine(func):
logdebug("@ patching attribute %r (from superclass: %s, original: %r).",
attr, sklass.__name__, func)
setattr(klass, attr, _rewrap_method(func, sklass, aspect))
else:
continue
super_original.add(attr)
entanglement.merge(lambda: deque((
delattr(klass, attr) for attr in super_original
), maxlen=0))
return entanglement
|
Low-level weaver for classes.
.. warning:: You should not use this directly.
|
entailment
|
def patch_module(module, name, replacement, original=UNSPECIFIED, aliases=True, location=None, **_bogus_options):
"""
Low-level attribute patcher.
:param module module: Object to patch.
:param str name: Attribute to patch
:param replacement: The replacement value.
:param original: The original value (in case the object beeing patched uses descriptors or is plain weird).
:param bool aliases: If ``True`` patch all the attributes that have the same original value.
:returns: An :obj:`aspectlib.Rollback` object.
"""
rollback = Rollback()
seen = False
original = getattr(module, name) if original is UNSPECIFIED else original
location = module.__name__ if hasattr(module, '__name__') else type(module).__module__
target = module.__name__ if hasattr(module, '__name__') else type(module).__name__
try:
replacement.__module__ = location
except (TypeError, AttributeError):
pass
for alias in dir(module):
logdebug("alias:%s (%s)", alias, name)
if hasattr(module, alias):
obj = getattr(module, alias)
logdebug("- %s:%s (%s)", obj, original, obj is original)
if obj is original:
if aliases or alias == name:
logdebug("= saving %s on %s.%s ...", replacement, target, alias)
setattr(module, alias, replacement)
rollback.merge(lambda alias=alias: setattr(module, alias, original))
if alias == name:
seen = True
elif alias == name:
if ismethod(obj):
logdebug("= saving %s on %s.%s ...", replacement, target, alias)
setattr(module, alias, replacement)
rollback.merge(lambda alias=alias: setattr(module, alias, original))
seen = True
else:
raise AssertionError("%s.%s = %s is not %s." % (module, alias, obj, original))
if not seen:
warnings.warn('Setting %s.%s to %s. There was no previous definition, probably patching the wrong module.' % (
target, name, replacement
))
logdebug("= saving %s on %s.%s ...", replacement, target, name)
setattr(module, name, replacement)
rollback.merge(lambda: setattr(module, name, original))
return rollback
|
Low-level attribute patcher.
:param module module: Object to patch.
:param str name: Attribute to patch
:param replacement: The replacement value.
:param original: The original value (in case the object beeing patched uses descriptors or is plain weird).
:param bool aliases: If ``True`` patch all the attributes that have the same original value.
:returns: An :obj:`aspectlib.Rollback` object.
|
entailment
|
def patch_module_function(module, target, aspect, force_name=None, bag=BrokenBag, **options):
"""
Low-level patcher for one function from a specified module.
.. warning:: You should not use this directly.
:returns: An :obj:`aspectlib.Rollback` object.
"""
logdebug("patch_module_function (module=%s, target=%s, aspect=%s, force_name=%s, **options=%s",
module, target, aspect, force_name, options)
name = force_name or target.__name__
return patch_module(module, name, _checked_apply(aspect, target, module=module), original=target, **options)
|
Low-level patcher for one function from a specified module.
.. warning:: You should not use this directly.
:returns: An :obj:`aspectlib.Rollback` object.
|
entailment
|
def unframe(packet):
"""
Strip leading DLE and trailing DLE/ETX from packet.
:param packet: TSIP packet with leading DLE and trailing DLE/ETX.
:type packet: Binary string.
:return: TSIP packet with leading DLE and trailing DLE/ETX removed.
:raise: ``ValueError`` if `packet` does not start with DLE and end in DLE/ETX.
"""
if is_framed(packet):
return packet.lstrip(CHR_DLE).rstrip(CHR_ETX).rstrip(CHR_DLE)
else:
raise ValueError('packet does not contain leading DLE and trailing DLE/ETX')
|
Strip leading DLE and trailing DLE/ETX from packet.
:param packet: TSIP packet with leading DLE and trailing DLE/ETX.
:type packet: Binary string.
:return: TSIP packet with leading DLE and trailing DLE/ETX removed.
:raise: ``ValueError`` if `packet` does not start with DLE and end in DLE/ETX.
|
entailment
|
def stuff(packet):
"""
Add byte stuffing to TSIP packet.
:param packet: TSIP packet with byte stuffing. The packet must already
have been stripped or `ValueError` will be raised.
:type packet: Binary string.
:return: Packet with byte stuffing.
"""
if is_framed(packet):
raise ValueError('packet contains leading DLE and trailing DLE/ETX')
else:
return packet.replace(CHR_DLE, CHR_DLE + CHR_DLE)
|
Add byte stuffing to TSIP packet.
:param packet: TSIP packet with byte stuffing. The packet must already
have been stripped or `ValueError` will be raised.
:type packet: Binary string.
:return: Packet with byte stuffing.
|
entailment
|
def unstuff(packet):
"""
Remove byte stuffing from a TSIP packet.
:param packet: TSIP packet with byte stuffing. The packet must already
have been stripped or `ValueError` will be raised.
:type packet: Binary string.
:return: Packet without byte stuffing.
"""
if is_framed(packet):
raise ValueError('packet contains leading DLE and trailing DLE/ETX')
else:
return packet.replace(CHR_DLE + CHR_DLE, CHR_DLE)
|
Remove byte stuffing from a TSIP packet.
:param packet: TSIP packet with byte stuffing. The packet must already
have been stripped or `ValueError` will be raised.
:type packet: Binary string.
:return: Packet without byte stuffing.
|
entailment
|
def parseFilename(filename):
"""
Parse out filename from any specified extensions.
Returns rootname and string version of extension name.
Modified from 'pydrizzle.fileutil' to allow this
module to be independent of PyDrizzle/MultiDrizzle.
"""
# Parse out any extension specified in filename
_indx = filename.find('[')
if _indx > 0:
# Read extension name provided
_fname = filename[:_indx]
extn = filename[_indx+1:-1]
# An extension was provided, so parse it out...
if repr(extn).find(',') > 1:
_extns = extn.split(',')
# Two values given for extension:
# for example, 'sci,1' or 'dq,1'
_extn = [_extns[0],int(_extns[1])]
elif repr(extn).find('/') > 1:
# We are working with GEIS group syntax
_indx = str(extn[:extn.find('/')])
_extn = [int(_indx)]
elif isinstance(extn, str):
# Only one extension value specified...
if extn.isdigit():
# We only have an extension number specified as a string...
_nextn = int(extn)
else:
# We only have EXTNAME specified...
_nextn = extn
_extn = [_nextn]
else:
# Only integer extension number given, or default of 0 is used.
_extn = [int(extn)]
else:
_fname = filename
_extn = None
return _fname,_extn
|
Parse out filename from any specified extensions.
Returns rootname and string version of extension name.
Modified from 'pydrizzle.fileutil' to allow this
module to be independent of PyDrizzle/MultiDrizzle.
|
entailment
|
def _shape(self):
""" Returns the shape of the data array associated with this file."""
hdu = self.open()
_shape = hdu.shape
if not self.inmemory:
self.close()
del hdu
return _shape
|
Returns the shape of the data array associated with this file.
|
entailment
|
def _data(self):
""" Returns the data array associated with this file/extenstion."""
hdu = self.open()
_data = hdu.data.copy()
if not self.inmemory:
self.close()
del hdu
return _data
|
Returns the data array associated with this file/extenstion.
|
entailment
|
def type(self):
""" Returns the shape of the data array associated with this file."""
hdu = self.open()
_type = hdu.data.dtype.name
if not self.inmemory:
self.close()
del hdu
return _type
|
Returns the shape of the data array associated with this file.
|
entailment
|
def open(self):
""" Opens the file for subsequent access. """
if self.handle is None:
self.handle = fits.open(self.fname, mode='readonly')
if self.extn:
if len(self.extn) == 1:
hdu = self.handle[self.extn[0]]
else:
hdu = self.handle[self.extn[0],self.extn[1]]
else:
hdu = self.handle[0]
if isinstance(hdu,fits.hdu.compressed.CompImageHDU):
self.compress = True
return hdu
|
Opens the file for subsequent access.
|
entailment
|
def overlapsWith(self, targetStartTime, duration):
"""
Tests if the given times overlap with this measurement.
:param targetStartTime: the target start time.
:param duration: the duration
:return: true if the given times overlap with this measurement.
"""
targetEndTime = targetStartTime + datetime.timedelta(days=0, seconds=duration)
return (self.startTime <= targetStartTime <= self.endTime) \
or (targetStartTime <= self.startTime <= targetEndTime)
|
Tests if the given times overlap with this measurement.
:param targetStartTime: the target start time.
:param duration: the duration
:return: true if the given times overlap with this measurement.
|
entailment
|
def updateDeviceStatus(self, deviceName, state, reason=None):
"""
Updates the current device status.
:param deviceName: the device name.
:param state: the state.
:param reason: the reason for the change.
:return:
"""
logger.info('Updating recording device state for ' + deviceName + ' to ' + state.name +
('' if reason is None else '[reason: ' + reason + ']'))
currentState = self.recordingDevices.get(deviceName)
count = 0
if currentState is not None:
if currentState['state'] == MeasurementStatus.RECORDING.name:
count = currentState['count']
self.recordingDevices[deviceName] = {
'state': state.name,
'reason': reason,
'time': datetime.datetime.utcnow().strftime(DATETIME_FORMAT),
'count': count
}
|
Updates the current device status.
:param deviceName: the device name.
:param state: the state.
:param reason: the reason for the change.
:return:
|
entailment
|
def stillRecording(self, deviceId, dataCount):
"""
For a device that is recording, updates the last timestamp so we now when we last received data.
:param deviceId: the device id.
:param dataCount: the no of items of data recorded in this batch.
:return:
"""
status = self.recordingDevices[deviceId]
if status is not None:
if status['state'] == MeasurementStatus.RECORDING.name:
status['last'] = datetime.datetime.utcnow().strftime(DATETIME_FORMAT)
status['count'] = status['count'] + dataCount
|
For a device that is recording, updates the last timestamp so we now when we last received data.
:param deviceId: the device id.
:param dataCount: the no of items of data recorded in this batch.
:return:
|
entailment
|
def inflate(self):
"""
loads the recording into memory and returns it as a Signal
:return:
"""
if self.measurementParameters['accelerometerEnabled']:
if len(self.data) == 0:
logger.info('Loading measurement data for ' + self.name)
self.data = {name: self._loadXYZ(name) for name, value in self.recordingDevices.items()}
return True
else:
# TODO error handling
return False
|
loads the recording into memory and returns it as a Signal
:return:
|
entailment
|
def _sweep(self):
"""
Checks the state of each measurement and verifies their state, if an active measurement is now complete then
passes them to the completed measurement set, if failed then to the failed set, if failed and old then evicts.
:return:
"""
while self.running:
for am in list(self.activeMeasurements):
now = datetime.datetime.utcnow()
# devices were allocated and have completed == complete
recordingDeviceCount = len(am.recordingDevices)
if recordingDeviceCount > 0:
if all(entry['state'] == RecordStatus.COMPLETE.name for entry in am.recordingDevices.values()):
logger.info("Detected completedmeasurement " + am.id)
self._moveToComplete(am)
# we have reached the end time and we have either all failed devices or no devices == kill
if now > (am.endTime + datetime.timedelta(days=0, seconds=1)):
allFailed = all(entry['state'] == RecordStatus.FAILED.name
for entry in am.recordingDevices.values())
if (recordingDeviceCount > 0 and allFailed) or recordingDeviceCount == 0:
logger.warning("Detected failed measurement " + am.id + " with " + str(recordingDeviceCount)
+ " devices, allFailed: " + str(allFailed))
self._moveToFailed(am)
# we are well past the end time and we have failed devices or an ongoing recording == kill or deathbed
if now > (am.endTime + datetime.timedelta(days=0, seconds=self.maxTimeTilDeathbedSeconds)):
if any(entry['state'] == RecordStatus.FAILED.name for entry in am.recordingDevices.values()):
logger.warning("Detected failed and incomplete measurement " + am.id + ", assumed dead")
self._moveToFailed(am)
elif all(entry['state'] == RecordStatus.RECORDING.name for entry in am.recordingDevices.values()):
self._handleDeathbed(am)
time.sleep(0.1)
logger.warning("MeasurementCaretaker is now shutdown")
|
Checks the state of each measurement and verifies their state, if an active measurement is now complete then
passes them to the completed measurement set, if failed then to the failed set, if failed and old then evicts.
:return:
|
entailment
|
def schedule(self, name, duration, startTime, description=None):
"""
Schedules a new measurement with the given name.
:param name:
:param duration:
:param startTime:
:param description:
:return: a tuple
boolean: measurement was scheduled if true
message: description, generally only used as an error code
"""
if self._clashes(startTime, duration):
return False, MEASUREMENT_TIMES_CLASH
else:
am = ActiveMeasurement(name, startTime, duration, self.targetStateProvider.state, description=description)
logger.info("Scheduling measurement " + am.id + " for " + str(duration) + "s")
self.activeMeasurements.append(am)
devices = self.deviceController.scheduleMeasurement(am.id, am.duration, am.startTime)
anyFail = False
for device, status in devices.items():
if status == 200:
deviceStatus = RecordStatus.SCHEDULED
else:
deviceStatus = RecordStatus.FAILED
anyFail = True
am.updateDeviceStatus(device.deviceId, deviceStatus)
if anyFail:
am.status = MeasurementStatus.FAILED
else:
if am.status is MeasurementStatus.NEW:
am.status = MeasurementStatus.SCHEDULED
return True, None
|
Schedules a new measurement with the given name.
:param name:
:param duration:
:param startTime:
:param description:
:return: a tuple
boolean: measurement was scheduled if true
message: description, generally only used as an error code
|
entailment
|
def _clashes(self, startTime, duration):
"""
verifies that this measurement does not clash with an already scheduled measurement.
:param startTime: the start time.
:param duration: the duration.
:return: true if the measurement is allowed.
"""
return [m for m in self.activeMeasurements if m.overlapsWith(startTime, duration)]
|
verifies that this measurement does not clash with an already scheduled measurement.
:param startTime: the start time.
:param duration: the duration.
:return: true if the measurement is allowed.
|
entailment
|
def startMeasurement(self, measurementId, deviceId):
"""
Starts the measurement for the device.
:param deviceId: the device that is starting.
:param measurementId: the measurement that is started.
:return: true if it started (i.e. device and measurement exists).
"""
am, handler = self.getDataHandler(measurementId, deviceId)
if am is not None:
am.status = MeasurementStatus.RECORDING
am.updateDeviceStatus(deviceId, RecordStatus.RECORDING)
handler.start(am.idAsPath)
return True
else:
return False
|
Starts the measurement for the device.
:param deviceId: the device that is starting.
:param measurementId: the measurement that is started.
:return: true if it started (i.e. device and measurement exists).
|
entailment
|
def getDataHandler(self, measurementId, deviceId):
"""
finds the handler.
:param measurementId: the measurement
:param deviceId: the device.
:return: active measurement and handler
"""
am = next((m for m in self.activeMeasurements if m.id == measurementId), None)
if am is None:
return None, None
else:
device = self.deviceController.getDevice(deviceId)
if device is None:
return None, None
else:
return am, device.dataHandler
|
finds the handler.
:param measurementId: the measurement
:param deviceId: the device.
:return: active measurement and handler
|
entailment
|
def recordData(self, measurementId, deviceId, data):
"""
Passes the data to the handler.
:param deviceId: the device the data comes from.
:param measurementId: the measurement id.
:param data: the data.
:return: true if the data was handled.
"""
am, handler = self.getDataHandler(measurementId, deviceId)
if handler is not None:
am.stillRecording(deviceId, len(data))
handler.handle(data)
return True
else:
logger.error('Received data for unknown handler ' + deviceId + '/' + measurementId)
return False
|
Passes the data to the handler.
:param deviceId: the device the data comes from.
:param measurementId: the measurement id.
:param data: the data.
:return: true if the data was handled.
|
entailment
|
def completeMeasurement(self, measurementId, deviceId):
"""
Completes the measurement session.
:param deviceId: the device id.
:param measurementId: the measurement id.
:return: true if it was completed.
"""
am, handler = self.getDataHandler(measurementId, deviceId)
if handler is not None:
handler.stop(measurementId)
am.updateDeviceStatus(deviceId, RecordStatus.COMPLETE)
return True
else:
return False
|
Completes the measurement session.
:param deviceId: the device id.
:param measurementId: the measurement id.
:return: true if it was completed.
|
entailment
|
def failMeasurement(self, measurementId, deviceName, failureReason=None):
"""
Fails the measurement session.
:param deviceName: the device name.
:param measurementId: the measurement name.
:param failureReason: why it failed.
:return: true if it was completed.
"""
am, handler = self.getDataHandler(measurementId, deviceName)
if handler is not None:
am.updateDeviceStatus(deviceName, RecordStatus.FAILED, reason=failureReason)
handler.stop(measurementId)
return True
else:
return False
|
Fails the measurement session.
:param deviceName: the device name.
:param measurementId: the measurement name.
:param failureReason: why it failed.
:return: true if it was completed.
|
entailment
|
def _deleteCompletedMeasurement(self, measurementId):
"""
Deletes the named measurement from the completed measurement store if it exists.
:param measurementId:
:return:
String: error messages
Integer: count of measurements deleted
"""
message, count, deleted = self.deleteFrom(measurementId, self.completeMeasurements)
if count is 0:
message, count, deleted = self.deleteFrom(measurementId, self.failedMeasurements)
return message, count, deleted
|
Deletes the named measurement from the completed measurement store if it exists.
:param measurementId:
:return:
String: error messages
Integer: count of measurements deleted
|
entailment
|
def reloadCompletedMeasurements(self):
"""
Reloads the completed measurements from the backing store.
"""
from pathlib import Path
reloaded = [self.load(x.resolve()) for x in Path(self.dataDir).glob('*/*/*') if x.is_dir()]
logger.info('Reloaded ' + str(len(reloaded)) + ' completed measurements')
self.completeMeasurements = [x for x in reloaded if x is not None and x.status == MeasurementStatus.COMPLETE]
self.failedMeasurements = [x for x in reloaded if x is not None and x.status == MeasurementStatus.FAILED]
|
Reloads the completed measurements from the backing store.
|
entailment
|
def getMeasurements(self, measurementStatus=None):
"""
Gets all available measurements.
:param measurementStatus return only the measurements in the given state.
:return:
"""
if measurementStatus is None:
return self.activeMeasurements + self.completeMeasurements + self.failedMeasurements
elif measurementStatus == MeasurementStatus.COMPLETE:
return self.completeMeasurements
elif measurementStatus == MeasurementStatus.FAILED:
return self.failedMeasurements
elif measurementStatus == MeasurementStatus.DYING:
return list(self.deathBed.keys())
else:
return [x for x in self.activeMeasurements if x.status == measurementStatus]
|
Gets all available measurements.
:param measurementStatus return only the measurements in the given state.
:return:
|
entailment
|
def getMeasurement(self, measurementId, measurementStatus=None):
"""
Gets the measurement with the given id.
:param measurementId: the id.
:param measurementStatus: the status of the requested measurement.
:return: the matching measurement or none if it doesn't exist.
"""
return next((x for x in self.getMeasurements(measurementStatus) if x.id == measurementId), None)
|
Gets the measurement with the given id.
:param measurementId: the id.
:param measurementStatus: the status of the requested measurement.
:return: the matching measurement or none if it doesn't exist.
|
entailment
|
def store(self, measurement):
"""
Writes the measurement metadata to disk on completion.
:param activeMeasurement: the measurement that has completed.
:returns the persisted metadata.
"""
os.makedirs(self._getPathToMeasurementMetaDir(measurement.idAsPath), exist_ok=True)
output = marshal(measurement, measurementFields)
with open(self._getPathToMeasurementMetaFile(measurement.idAsPath), 'w') as outfile:
json.dump(output, outfile)
return output
|
Writes the measurement metadata to disk on completion.
:param activeMeasurement: the measurement that has completed.
:returns the persisted metadata.
|
entailment
|
def load(self, path):
"""
Loads a CompletedMeasurement from the path.á
:param path: the path at which the data is found.
:return: the measurement
"""
meta = self._loadMetaFromJson(path)
return CompleteMeasurement(meta, self.dataDir) if meta is not None else None
|
Loads a CompletedMeasurement from the path.á
:param path: the path at which the data is found.
:return: the measurement
|
entailment
|
def _loadMetaFromJson(self, path):
"""
Reads the json meta into memory.
:return: the meta.
"""
try:
with (path / 'metadata.json').open() as infile:
return json.load(infile)
except FileNotFoundError:
logger.error('Metadata does not exist at ' + str(path))
return None
|
Reads the json meta into memory.
:return: the meta.
|
entailment
|
def editMeasurement(self, measurementId, data):
"""
Edits the specified measurement with the provided data.
:param measurementId: the measurement id.
:param data: the data to update.
:return: true if the measurement was edited
"""
oldMeasurement = self.getMeasurement(measurementId, measurementStatus=MeasurementStatus.COMPLETE)
if oldMeasurement:
import copy
newMeasurement = copy.deepcopy(oldMeasurement)
deleteOld = False
createdFilteredCopy = False
newName = data.get('name', None)
newDesc = data.get('description', None)
newStart = float(data.get('start', 0))
newEnd = float(data.get('end', oldMeasurement.duration))
newDuration = newEnd - newStart
newDevices = data.get('devices', None)
if newName:
logger.info('Updating name from ' + oldMeasurement.name + ' to ' + newName)
newMeasurement.updateName(newName)
createdFilteredCopy = True
deleteOld = True
if newDesc:
logger.info('Updating description from ' + str(oldMeasurement.description) + ' to ' + str(newDesc))
newMeasurement.description = newDesc
if newDuration != oldMeasurement.duration:
logger.info('Copying measurement to allow support new duration ' + str(newDuration))
if oldMeasurement.name == newMeasurement.name:
newMeasurement.updateName(newMeasurement.name + '-' + str(int(time.time())))
newMeasurement.duration = newDuration
createdFilteredCopy = True
if createdFilteredCopy:
logger.info('Copying measurement data from ' + oldMeasurement.idAsPath + ' to ' + newMeasurement.idAsPath)
newMeasurementPath = self._getPathToMeasurementMetaDir(newMeasurement.idAsPath)
dataSearchPattern = self._getPathToMeasurementMetaDir(oldMeasurement.idAsPath) + '/**/data.out'
newDataCountsByDevice = [self._filterCopy(dataFile, newStart, newEnd, newMeasurementPath)
for dataFile in glob.glob(dataSearchPattern)]
for device, count in newDataCountsByDevice:
newMeasurement.recordingDevices.get(device)['count'] = count
self.store(newMeasurement)
if newDevices:
for renames in newDevices:
logger.info('Updating device name from ' + str(renames[0]) + ' to ' + str(renames[1]))
deviceState = newMeasurement.recordingDevices.get(renames[0])
newMeasurement.recordingDevices[renames[1]] = deviceState
del newMeasurement.recordingDevices[renames[0]]
os.rename(os.path.join(self._getPathToMeasurementMetaDir(newMeasurement.idAsPath), renames[0]),
os.path.join(self._getPathToMeasurementMetaDir(newMeasurement.idAsPath), renames[1]))
self.store(newMeasurement)
if deleteOld or createdFilteredCopy or newDevices:
self.completeMeasurements.append(newMeasurement)
if deleteOld:
self.delete(oldMeasurement.id)
return True
else:
return False
|
Edits the specified measurement with the provided data.
:param measurementId: the measurement id.
:param data: the data to update.
:return: true if the measurement was edited
|
entailment
|
def _filterCopy(self, dataFile, newStart, newEnd, newDataDir):
"""
Copies the data file to a new file in the tmp dir, filtering it according to newStart and newEnd and adjusting
the times as appropriate so it starts from 0.
:param dataFile: the input file.
:param newStart: the new start time.
:param newEnd: the new end time.
:param newDataDir: the tmp dir to write to.
:return: the device name & no of rows in the data.
"""
import csv
pathToData = os.path.split(dataFile)
dataFileName = pathToData[1]
dataDeviceName = os.path.split(pathToData[0])[1]
os.makedirs(os.path.join(newDataDir, dataDeviceName), exist_ok=True)
outputFile = os.path.join(newDataDir, dataDeviceName, dataFileName)
dataCount = 0
rowNum = 0
with open(dataFile, mode='rt', newline='') as dataIn, open(outputFile, mode='wt', newline='') as dataOut:
writer = csv.writer(dataOut, delimiter=',')
for row in csv.reader(dataIn, delimiter=','):
if len(row) > 0:
time = float(row[0])
if newStart <= time <= newEnd:
newRow = row[:]
if newStart > 0:
newRow[0] = "{0:.3f}".format(time - newStart)
writer.writerow(newRow)
dataCount += 1
else:
logger.warning('Ignoring empty row ' + str(rowNum) + ' in ' + str(dataFile))
rowNum += 1
return dataDeviceName, dataCount
|
Copies the data file to a new file in the tmp dir, filtering it according to newStart and newEnd and adjusting
the times as appropriate so it starts from 0.
:param dataFile: the input file.
:param newStart: the new start time.
:param newEnd: the new end time.
:param newDataDir: the tmp dir to write to.
:return: the device name & no of rows in the data.
|
entailment
|
def checkFiles(filelist,ivmlist = None):
"""
- Converts waiver fits sciece and data quality files to MEF format
- Converts GEIS science and data quality files to MEF format
- Checks for stis association tables and splits them into single imsets
- Removes files with EXPTIME=0 and the corresponding ivm files
- Removes files with NGOODPIX == 0 (to exclude saturated images)
- Removes files with missing PA_V3 keyword
The list of science files should match the list of ivm files at the end.
"""
toclose = False
if isinstance(filelist[0], str):
toclose = True
newfilelist, ivmlist = checkFITSFormat(filelist, ivmlist)
# check for STIS association files. This must be done before
# the other checks in order to handle correctly stis
# assoc files
newfilelist, ivmlist = checkStisFiles(newfilelist, ivmlist)
if newfilelist == []:
return [], []
removed_expt_files = check_exptime(newfilelist)
newfilelist, ivmlist = update_input(newfilelist, ivmlist, removed_expt_files)
if newfilelist == []:
return [], []
removed_ngood_files = checkNGOODPIX(newfilelist)
newfilelist, ivmlist = update_input(newfilelist, ivmlist, removed_ngood_files)
if newfilelist == []:
return [], []
removed_pav3_files = checkPA_V3(newfilelist)
newfilelist, ivmlist = update_input(newfilelist, ivmlist, removed_pav3_files)
newfilelist, ivmlist = update_input(newfilelist, ivmlist,[])
if newfilelist == []:
return [], []
if toclose:
newfilelist = [hdul.filename() for hdul in newfilelist]
return newfilelist, ivmlist
|
- Converts waiver fits sciece and data quality files to MEF format
- Converts GEIS science and data quality files to MEF format
- Checks for stis association tables and splits them into single imsets
- Removes files with EXPTIME=0 and the corresponding ivm files
- Removes files with NGOODPIX == 0 (to exclude saturated images)
- Removes files with missing PA_V3 keyword
The list of science files should match the list of ivm files at the end.
|
entailment
|
def checkFITSFormat(filelist, ivmlist=None):
"""
This code will check whether or not files are GEIS or WAIVER FITS and
convert them to MEF if found. It also keeps the IVMLIST consistent with
the input filelist, in the case that some inputs get dropped during
the check/conversion.
"""
if ivmlist is None:
ivmlist = [None for l in filelist]
sci_ivm = list(zip(filelist, ivmlist))
removed_files, translated_names, newivmlist = convert2fits(sci_ivm)
newfilelist, ivmlist = update_input(filelist, ivmlist, removed_files)
if newfilelist == [] and translated_names == []:
return [], []
elif translated_names != []:
newfilelist.extend(translated_names)
ivmlist.extend(newivmlist)
return newfilelist, ivmlist
|
This code will check whether or not files are GEIS or WAIVER FITS and
convert them to MEF if found. It also keeps the IVMLIST consistent with
the input filelist, in the case that some inputs get dropped during
the check/conversion.
|
entailment
|
def check_exptime(filelist):
"""
Removes files with EXPTIME==0 from filelist.
"""
toclose = False
removed_files = []
for f in filelist:
if isinstance(f, str):
f = fits.open(f)
toclose = True
try:
exptime = f[0].header['EXPTIME']
except KeyError:
removed_files.append(f)
print("Warning: There are files without keyword EXPTIME")
continue
if exptime <= 0:
removed_files.append(f)
print("Warning: There are files with zero exposure time: keyword EXPTIME = 0.0")
if removed_files != []:
print("Warning: Removing the following files from input list")
for f in removed_files:
print('\t',f.filename() or "")
return removed_files
|
Removes files with EXPTIME==0 from filelist.
|
entailment
|
def checkNGOODPIX(filelist):
"""
Only for ACS, WFC3 and STIS, check NGOODPIX
If all pixels are 'bad' on all chips, exclude this image
from further processing.
Similar checks requiring comparing 'driz_sep_bits' against
WFPC2 c1f.fits arrays and NICMOS DQ arrays will need to be
done separately (and later).
"""
toclose = False
removed_files = []
supported_instruments = ['ACS','STIS','WFC3']
for inputfile in filelist:
if isinstance(inputfile, str):
if fileutil.getKeyword(inputfile,'instrume') in supported_instruments:
inputfile = fits.open(inputfile)
toclose = True
elif inputfile[0].header['instrume'] not in supported_instruments:
continue
ngood = 0
for extn in inputfile:
if 'EXTNAME' in extn.header and extn.header['EXTNAME'] == 'SCI':
ngood += extn.header['NGOODPIX']
if (ngood == 0):
removed_files.append(inputfile)
if toclose:
inputfile.close()
if removed_files != []:
print("Warning: Files without valid pixels detected: keyword NGOODPIX = 0.0")
print("Warning: Removing the following files from input list")
for f in removed_files:
print('\t',f.filename() or "")
return removed_files
|
Only for ACS, WFC3 and STIS, check NGOODPIX
If all pixels are 'bad' on all chips, exclude this image
from further processing.
Similar checks requiring comparing 'driz_sep_bits' against
WFPC2 c1f.fits arrays and NICMOS DQ arrays will need to be
done separately (and later).
|
entailment
|
def stisObsCount(input):
"""
Input: A stis multiextension file
Output: Number of stis science extensions in input
"""
count = 0
toclose = False
if isinstance(input, str):
input = fits.open(input)
toclose = True
for ext in input:
if 'extname' in ext.header:
if (ext.header['extname'].upper() == 'SCI'):
count += 1
if toclose:
input.close()
return count
|
Input: A stis multiextension file
Output: Number of stis science extensions in input
|
entailment
|
def splitStis(stisfile, sci_count):
"""
Split a STIS association file into multiple imset MEF files.
Split the corresponding spt file if present into single spt files.
If an spt file can't be split or is missing a Warning is printed.
Returns
-------
names: list
a list with the names of the new flt files.
"""
newfiles = []
toclose = False
if isinstance(stisfile, str):
f = fits.open(stisfile)
toclose = True
else:
f = stisfile
hdu0 = f[0].copy()
stisfilename = stisfile.filename()
for count in range(1,sci_count+1):
fitsobj = fits.HDUList()
fitsobj.append(hdu0)
hdu = f[('sci',count)].copy()
fitsobj.append(hdu)
rootname = hdu.header['EXPNAME']
newfilename = fileutil.buildNewRootname(rootname, extn='_flt.fits')
try:
# Verify error array exists
if f[('err', count)].data is None:
raise ValueError
# Verify dq array exists
if f[('dq', count)].data is None:
raise ValueError
# Copy the err extension
hdu = f[('err',count)].copy()
fitsobj.append(hdu)
# Copy the dq extension
hdu = f[('dq',count)].copy()
fitsobj.append(hdu)
fitsobj[1].header['EXTVER'] = 1
fitsobj[2].header['EXTVER'] = 1
fitsobj[3].header['EXTVER'] = 1
except ValueError:
print('\nWarning:')
print('Extension version %d of the input file %s does not' %(count, stisfile))
print('contain all required image extensions. Each must contain')
print('populates SCI, ERR and DQ arrays.')
continue
# Determine if the file you wish to create already exists on the disk.
# If the file does exist, replace it.
if (os.path.exists(newfilename)):
os.remove(newfilename)
print(" Replacing "+newfilename+"...")
# Write out the new file
fitsobj.writeto(newfilename)
# Insure returned HDUList is associated with a file
fitsobj.close()
fitsobj = fits.open(newfilename, mode='update')
newfiles.append(fitsobj) # Return HDUList, not filename
f.close()
sptfilename = fileutil.buildNewRootname(stisfilename, extn='_spt.fits')
try:
sptfile = fits.open(sptfilename)
except IOError:
print('SPT file not found %s \n' % sptfilename)
return newfiles
if sptfile:
hdu0 = sptfile[0].copy()
try:
for count in range(1,sci_count+1):
fitsobj = fits.HDUList()
fitsobj.append(hdu0)
hdu = sptfile[count].copy()
fitsobj.append(hdu)
rootname = hdu.header['EXPNAME']
newfilename = fileutil.buildNewRootname(rootname, extn='_spt.fits')
fitsobj[1].header['EXTVER'] = 1
if (os.path.exists(newfilename)):
os.remove(newfilename)
print(" Replacing "+newfilename+"...")
# Write out the new file
fitsobj.writeto(newfilename)
except:
print("Warning: Unable to split spt file %s " % sptfilename)
if toclose:
sptfile.close()
return newfiles
|
Split a STIS association file into multiple imset MEF files.
Split the corresponding spt file if present into single spt files.
If an spt file can't be split or is missing a Warning is printed.
Returns
-------
names: list
a list with the names of the new flt files.
|
entailment
|
def stisExt2PrimKw(stisfiles):
"""
Several kw which are usually in the primary header
are in the extension header for STIS. They are copied to
the primary header for convenience.
List if kw:
'DATE-OBS', 'EXPEND', 'EXPSTART', 'EXPTIME'
"""
kw_list = ['DATE-OBS', 'EXPEND', 'EXPSTART', 'EXPTIME']
for sfile in stisfiles:
toclose = False
if isinstance(sfile, str):
sfile = fits.open(sfile, mode='update')
toclose = True
#d = {}
for k in kw_list:
sfile[0].header[k] = sfile[1].header[k]
sfile[0].header.comments[k] = "Copied from extension header"
if toclose:
sfile.close()
|
Several kw which are usually in the primary header
are in the extension header for STIS. They are copied to
the primary header for convenience.
List if kw:
'DATE-OBS', 'EXPEND', 'EXPSTART', 'EXPTIME'
|
entailment
|
def convert2fits(sci_ivm):
"""
Checks if a file is in WAIVER of GEIS format and converts it to MEF
"""
removed_files = []
translated_names = []
newivmlist = []
for file in sci_ivm:
#find out what the input is
# if science file is not found on disk, add it to removed_files for removal
try:
imgfits,imgtype = fileutil.isFits(file[0])
except IOError:
print("Warning: File %s could not be found" %file[0])
print("Warning: Removing file %s from input list" %file[0])
removed_files.append(file[0])
continue
# Check for existence of waiver FITS input, and quit if found.
# Or should we print a warning and continue but not use that file
if imgfits and imgtype == 'waiver':
newfilename = waiver2mef(file[0], convert_dq=True)
if newfilename is None:
print("Removing file %s from input list - could not convert WAIVER format to MEF\n" %file[0])
removed_files.append(file[0])
else:
removed_files.append(file[0])
translated_names.append(newfilename)
newivmlist.append(file[1])
# If a GEIS image is provided as input, create a new MEF file with
# a name generated using 'buildFITSName()'
# Convert the corresponding data quality file if present
if not imgfits:
newfilename = geis2mef(file[0], convert_dq=True)
if newfilename is None:
print("Removing file %s from input list - could not convert GEIS format to MEF\n" %file[0])
removed_files.append(file[0])
else:
removed_files.append(file[0])
translated_names.append(newfilename)
newivmlist.append(file[1])
return removed_files, translated_names, newivmlist
|
Checks if a file is in WAIVER of GEIS format and converts it to MEF
|
entailment
|
def waiver2mef(sciname, newname=None, convert_dq=True, writefits=True):
"""
Converts a GEIS science file and its corresponding
data quality file (if present) to MEF format
Writes out both files to disk.
Returns the new name of the science image.
"""
if isinstance(sciname, fits.HDUList):
filename = sciname.filename()
else:
filename = sciname
try:
clobber = True
fimg = convertwaiveredfits.convertwaiveredfits(filename)
#check for the existence of a data quality file
_dqname = fileutil.buildNewRootname(filename, extn='_c1f.fits')
dqexists = os.path.exists(_dqname)
if convert_dq and dqexists:
try:
dqfile = convertwaiveredfits.convertwaiveredfits(_dqname)
dqfitsname = fileutil.buildNewRootname(_dqname, extn='_c1h.fits')
except Exception:
print("Could not read data quality file %s" % _dqname)
if writefits:
# User wants to make a FITS copy and update it
# using the filename they have provided
rname = fileutil.buildNewRootname(filename)
fitsname = fileutil.buildNewRootname(rname, extn='_c0h.fits')
# Write out GEIS image as multi-extension FITS.
fexists = os.path.exists(fitsname)
if (fexists and clobber) or not fexists:
print('Writing out WAIVERED as MEF to ', fitsname)
if ASTROPY_VER_GE13:
fimg.writeto(fitsname, overwrite=clobber)
else:
fimg.writeto(fitsname, clobber=clobber)
if dqexists:
print('Writing out WAIVERED as MEF to ', dqfitsname)
if ASTROPY_VER_GE13:
dqfile.writeto(dqfitsname, overwrite=clobber)
else:
dqfile.writeto(dqfitsname, clobber=clobber)
# Now close input GEIS image, and open writable
# handle to output FITS image instead...
fimg.close()
del fimg
fimg = fits.open(fitsname, mode='update', memmap=False)
return fimg
except IOError:
print('Warning: File %s could not be found' % sciname)
return None
|
Converts a GEIS science file and its corresponding
data quality file (if present) to MEF format
Writes out both files to disk.
Returns the new name of the science image.
|
entailment
|
def geis2mef(sciname, convert_dq=True):
"""
Converts a GEIS science file and its corresponding
data quality file (if present) to MEF format
Writes out both files to disk.
Returns the new name of the science image.
"""
clobber = True
mode = 'update'
memmap = True
# Input was specified as a GEIS image, but no FITS copy
# exists. Read it in with 'readgeis' and make a copy
# then open the FITS copy...
try:
# Open as a GEIS image for reading only
fimg = readgeis.readgeis(sciname)
except Exception:
raise IOError("Could not open GEIS input: %s" % sciname)
#check for the existence of a data quality file
_dqname = fileutil.buildNewRootname(sciname, extn='.c1h')
dqexists = os.path.exists(_dqname)
if dqexists:
try:
dqfile = readgeis.readgeis(_dqname)
dqfitsname = fileutil.buildFITSName(_dqname)
except Exception:
print("Could not read data quality file %s" % _dqname)
# Check to see if user wanted to update GEIS header.
# or write out a multi-extension FITS file and return a handle to it
# User wants to make a FITS copy and update it
# using the filename they have provided
fitsname = fileutil.buildFITSName(sciname)
# Write out GEIS image as multi-extension FITS.
fexists = os.path.exists(fitsname)
if (fexists and clobber) or not fexists:
print('Writing out GEIS as MEF to ', fitsname)
if ASTROPY_VER_GE13:
fimg.writeto(fitsname, overwrite=clobber)
else:
fimg.writeto(fitsname, clobber=clobber)
if dqexists:
print('Writing out GEIS as MEF to ', dqfitsname)
if ASTROPY_VER_GE13:
dqfile.writeto(dqfitsname, overwrite=clobber)
else:
dqfile.writeto(dqfitsname, clobber=clobber)
# Now close input GEIS image, and open writable
# handle to output FITS image instead...
fimg.close()
del fimg
fimg = fits.open(fitsname, mode=mode, memmap=memmap)
return fimg
|
Converts a GEIS science file and its corresponding
data quality file (if present) to MEF format
Writes out both files to disk.
Returns the new name of the science image.
|
entailment
|
def journals(self):
"""
Retrieve journals attribute for this very Issue
"""
try:
target = self._item_path
json_data = self._redmine.get(target % str(self.id),
parms={'include': 'journals'})
data = self._redmine.unwrap_json(None, json_data)
journals = [Journal(redmine=self._redmine,
data=journal,
type='issue_journal')
for journal in data['issue']['journals']]
return journals
except Exception:
return []
|
Retrieve journals attribute for this very Issue
|
entailment
|
def save(self, notes=None):
'''Save all changes back to Redmine with optional notes.'''
# Capture the notes if given
if notes:
self._changes['notes'] = notes
# Call the base-class save function
super(Issue, self).save()
|
Save all changes back to Redmine with optional notes.
|
entailment
|
def set_status(self, new_status, notes=None):
'''Save all changes and set to the given new_status'''
self.status_id = new_status
try:
self.status['id'] = self.status_id
# We don't have the id to name mapping, so blank the name
self.status['name'] = None
except:
pass
self.save(notes)
|
Save all changes and set to the given new_status
|
entailment
|
def resolve(self, notes=None):
'''Save all changes and resolve this issue'''
self.set_status(self._redmine.ISSUE_STATUS_ID_RESOLVED, notes=notes)
|
Save all changes and resolve this issue
|
entailment
|
def close(self, notes=None):
'''Save all changes and close this issue'''
self.set_status(self._redmine.ISSUE_STATUS_ID_CLOSED, notes=notes)
|
Save all changes and close this issue
|
entailment
|
def _objectify(self, json_data=None, data={}):
'''Return an object derived from the given json data.'''
if json_data:
# Parse the data
try:
data = json.loads(json_data)
except ValueError:
# If parsing failed, then raise the string which likely
# contains an error message instead of data
raise RedmineError(json_data)
# Check to see if there is a data wrapper
# Some replies will have {'issue':{<data>}} instead of just {<data>}
try:
data = data[self._item_type]
except KeyError:
pass
# If there's no ID but a source path
if ('id' not in data) and ('_source_path' in data):
# use the path between /projects/ and .json as the ID
data['id'] = data['_source_path']\
.partition('/projects/')[2]\
.partition('.json')[0]
# Call the base class objectify method
return super(Redmine_Wiki_Pages_Manager, self)._objectify(data=data)
|
Return an object derived from the given json data.
|
entailment
|
def new(self, page_name, **dict):
'''
Create a new item with the provided dict information
at the given page_name. Returns the new item.
As of version 2.2 of Redmine, this doesn't seem to function.
'''
self._item_new_path = '/projects/%s/wiki/%s.json' % \
(self._project.identifier, page_name)
# Call the base class new method
return super(Redmine_Wiki_Pages_Manager, self).new(**dict)
|
Create a new item with the provided dict information
at the given page_name. Returns the new item.
As of version 2.2 of Redmine, this doesn't seem to function.
|
entailment
|
def _set_version(self, version):
'''
Set up this object based on the capabilities of the
known versions of Redmine
'''
# Store the version we are evaluating
self.version = version or None
# To evaluate the version capabilities,
# assume the best-case if no version is provided.
version_check = version or 9999.0
if version_check < 1.0:
raise RedmineError('This library will only work with '
'Redmine version 1.0 and higher.')
## SECURITY AUGMENTATION
# All versions support the key in the request
# (http://server/stuff.json?key=blah)
# But versions 1.1 and higher can put the key in a header field
# for better security.
# If no version was provided (0.0) then assume we should
# set the key with the request.
self.key_in_header = version >= 1.1
# it puts the key in the header or
# it gets the hose, but not for 1.0.
self.impersonation_supported = version_check >= 2.2
self.has_project_memberships = version_check >= 1.4
self.has_project_versions = version_check >= 1.3
self.has_wiki_pages = version_check >= 2.2
## ITEM MANAGERS
# Step through all the item managers by version
# and instatiate and item manager for that item.
for manager_version in self._item_managers_by_version:
if version_check >= manager_version:
managers = self._item_managers_by_version[manager_version]
for attribute_name, item in managers.iteritems():
setattr(self, attribute_name,
Redmine_Items_Manager(self, item))
|
Set up this object based on the capabilities of the
known versions of Redmine
|
entailment
|
def loadTargetState(targetStateConfig, existingTargetState=None):
"""
extracts a new TargetState object from the specified configuration
:param targetStateConfig: the config dict.
:param existingTargetState: the existing state
:return:
"""
from analyser.common.targetstatecontroller import TargetState
targetState = TargetState() if existingTargetState is None else existingTargetState
# FIXFIX validate
if targetStateConfig is not None:
val = targetStateConfig.get('fs')
if val is not None:
targetState.fs = val
val = targetStateConfig.get('samplesPerBatch')
if val is not None:
targetState.samplesPerBatch = val
val = targetStateConfig.get('gyroEnabled')
if val is not None:
targetState.gyroEnabled = val
val = targetStateConfig.get('gyroSens')
if val is not None:
targetState.gyroSens = val
val = targetStateConfig.get('accelerometerEnabled')
if val is not None:
targetState.accelerometerEnabled = val
val = targetStateConfig.get('accelerometerSens')
if val is not None:
targetState.accelerometerSens = val
return targetState
|
extracts a new TargetState object from the specified configuration
:param targetStateConfig: the config dict.
:param existingTargetState: the existing state
:return:
|
entailment
|
def interpret_bit_flags(bit_flags, flip_bits=None):
"""
Converts input bit flags to a single integer value (bitmask) or `None`.
When input is a list of flags (either a Python list of integer flags or a
sting of comma- or '+'-separated list of flags), the returned bitmask
is obtained by summing input flags.
.. note::
In order to flip the bits of the returned bitmask,
for input of `str` type, prepend '~' to the input string. '~' must
be prepended to the *entire string* and not to each bit flag! For
input that is already a bitmask or a Python list of bit flags, set
`flip_bits` for `True` in order to flip the bits of the returned
bitmask.
Parameters
----------
bit_flags : int, str, list, None
An integer bitmask or flag, `None`, a string of comma- or
'+'-separated list of integer bit flags, or a Python list of integer
bit flags. If `bit_flags` is a `str` and if it is prepended with '~',
then the output bitmask will have its bits flipped (compared to simple
sum of input flags). For input `bit_flags` that is already a bitmask
or a Python list of bit flags, bit-flipping can be controlled through
`flip_bits` parameter.
flip_bits : bool, None
Indicates whether or not to flip the bits of the returned bitmask
obtained from input bit flags. This parameter must be set to `None`
when input `bit_flags` is either `None` or a Python list of flags.
Returns
-------
bitmask : int or None
Returns and integer bit mask formed from the input bit value
or `None` if input `bit_flags` parameter is `None` or an empty string.
If input string value was prepended with '~' (or `flip_bits` was
set to `True`), then returned value will have its bits flipped
(inverse mask).
Examples
--------
>>> from stsci.tools.bitmask import interpret_bit_flags
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags(28))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('4,8,16'))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~4,8,16'))
'1111111111100011'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~(4+8+16)'))
'1111111111100011'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags([4, 8, 16]))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags([4, 8, 16], flip_bits=True))
'1111111111100011'
"""
has_flip_bits = flip_bits is not None
flip_bits = bool(flip_bits)
allow_non_flags = False
if _is_int(bit_flags):
return (~int(bit_flags) if flip_bits else int(bit_flags))
elif bit_flags is None:
if has_flip_bits:
raise TypeError(
"Keyword argument 'flip_bits' must be set to 'None' when "
"input 'bit_flags' is None."
)
return None
elif isinstance(bit_flags, six.string_types):
if has_flip_bits:
raise TypeError(
"Keyword argument 'flip_bits' is not permitted for "
"comma-separated string lists of bit flags. Prepend '~' to "
"the string to indicate bit-flipping."
)
bit_flags = str(bit_flags).strip()
if bit_flags.upper() in ['', 'NONE', 'INDEF']:
return None
# check whether bitwise-NOT is present and if it is, check that it is
# in the first position:
bitflip_pos = bit_flags.find('~')
if bitflip_pos == 0:
flip_bits = True
bit_flags = bit_flags[1:].lstrip()
else:
if bitflip_pos > 0:
raise ValueError("Bitwise-NOT must precede bit flag list.")
flip_bits = False
# basic check for correct use of parenthesis:
while True:
nlpar = bit_flags.count('(')
nrpar = bit_flags.count(')')
if nlpar == 0 and nrpar == 0:
break
if nlpar != nrpar:
raise ValueError("Unbalanced parantheses in bit flag list.")
lpar_pos = bit_flags.find('(')
rpar_pos = bit_flags.rfind(')')
if lpar_pos > 0 or rpar_pos < (len(bit_flags) - 1):
raise ValueError("Incorrect syntax (incorrect use of "
"parenthesis) in bit flag list.")
bit_flags = bit_flags[1:-1].strip()
if ',' in bit_flags:
bit_flags = bit_flags.split(',')
elif '+' in bit_flags:
bit_flags = bit_flags.split('+')
else:
if bit_flags == '':
raise ValueError(
"Empty bit flag lists not allowed when either bitwise-NOT "
"or parenthesis are present."
)
bit_flags = [bit_flags]
allow_non_flags = len(bit_flags) == 1
elif hasattr(bit_flags, '__iter__'):
if not all([_is_int(flag) for flag in bit_flags]):
raise TypeError("Each bit flag in a list must be an integer.")
else:
raise TypeError("Unsupported type for argument 'bit_flags'.")
bitset = set(map(int, bit_flags))
if len(bitset) != len(bit_flags):
warnings.warn("Duplicate bit flags will be ignored")
bitmask = 0
for v in bitset:
if not is_bit_flag(v) and not allow_non_flags:
raise ValueError("Input list contains invalid (not powers of two) "
"bit flags")
bitmask += v
if flip_bits:
bitmask = ~bitmask
return bitmask
|
Converts input bit flags to a single integer value (bitmask) or `None`.
When input is a list of flags (either a Python list of integer flags or a
sting of comma- or '+'-separated list of flags), the returned bitmask
is obtained by summing input flags.
.. note::
In order to flip the bits of the returned bitmask,
for input of `str` type, prepend '~' to the input string. '~' must
be prepended to the *entire string* and not to each bit flag! For
input that is already a bitmask or a Python list of bit flags, set
`flip_bits` for `True` in order to flip the bits of the returned
bitmask.
Parameters
----------
bit_flags : int, str, list, None
An integer bitmask or flag, `None`, a string of comma- or
'+'-separated list of integer bit flags, or a Python list of integer
bit flags. If `bit_flags` is a `str` and if it is prepended with '~',
then the output bitmask will have its bits flipped (compared to simple
sum of input flags). For input `bit_flags` that is already a bitmask
or a Python list of bit flags, bit-flipping can be controlled through
`flip_bits` parameter.
flip_bits : bool, None
Indicates whether or not to flip the bits of the returned bitmask
obtained from input bit flags. This parameter must be set to `None`
when input `bit_flags` is either `None` or a Python list of flags.
Returns
-------
bitmask : int or None
Returns and integer bit mask formed from the input bit value
or `None` if input `bit_flags` parameter is `None` or an empty string.
If input string value was prepended with '~' (or `flip_bits` was
set to `True`), then returned value will have its bits flipped
(inverse mask).
Examples
--------
>>> from stsci.tools.bitmask import interpret_bit_flags
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags(28))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('4,8,16'))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~4,8,16'))
'1111111111100011'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~(4+8+16)'))
'1111111111100011'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags([4, 8, 16]))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags([4, 8, 16], flip_bits=True))
'1111111111100011'
|
entailment
|
def bitfield_to_boolean_mask(bitfield, ignore_flags=0, flip_bits=None,
good_mask_value=True, dtype=np.bool_):
"""
bitfield_to_boolean_mask(bitfield, ignore_flags=None, flip_bits=None, \
good_mask_value=True, dtype=numpy.bool\_)
Converts an array of bit fields to a boolean (or integer) mask array
according to a bitmask constructed from the supplied bit flags (see
``ignore_flags`` parameter).
This function is particularly useful to convert data quality arrays to
boolean masks with selective filtering of DQ flags.
Parameters
----------
bitfield : numpy.ndarray
An array of bit flags. By default, values different from zero are
interpreted as "bad" values and values equal to zero are considered
as "good" values. However, see ``ignore_flags`` parameter on how to
selectively ignore some bits in the ``bitfield`` array data.
ignore_flags : int, str, list, None (Default = 0)
An integer bitmask, a Python list of bit flags, a comma- or
'+'-separated string list of integer bit flags that indicate what
bits in the input ``bitfield`` should be *ignored* (i.e., zeroed), or
`None`.
| Setting ``ignore_flags`` to `None` effectively will make
`bitfield_to_boolean_mask` interpret all ``bitfield`` elements
as "good" regardless of their value.
| When ``ignore_flags`` argument is an integer bitmask, it will be
combined using bitwise-NOT and bitwise-AND with each element of the
input ``bitfield`` array (``~ignore_flags & bitfield``). If the
resultant bitfield element is non-zero, that element will be
interpreted as a "bad" in the output boolean mask and it will be
interpreted as "good" otherwise. ``flip_bits`` parameter may be used
to flip the bits (``bitwise-NOT``) of the bitmask thus effectively
changing the meaning of the ``ignore_flags`` parameter from "ignore"
to "use only" these flags.
.. note::
Setting ``ignore_flags`` to 0 effectively will assume that all
non-zero elements in the input ``bitfield`` array are to be
interpreted as "bad".
| When ``ignore_flags`` argument is an Python list of integer bit
flags, these flags are added together to create an integer bitmask.
Each item in the list must be a flag, i.e., an integer that is an
integer power of 2. In order to flip the bits of the resultant
bitmask, use ``flip_bits`` parameter.
| Alternatively, ``ignore_flags`` may be a string of comma- or
'+'-separated list of integer bit flags that should be added together
to create an integer bitmask. For example, both ``'4,8'`` and
``'4+8'`` are equivalent and indicate that bit flags 4 and 8 in
the input ``bitfield`` array should be ignored when generating
boolean mask.
.. note::
``'None'``, ``'INDEF'``, and empty (or all white space) strings
are special values of string ``ignore_flags`` that are
interpreted as `None`.
.. note::
Each item in the list must be a flag, i.e., an integer that is an
integer power of 2. In addition, for convenience, an arbitrary
**single** integer is allowed and it will be interpretted as an
integer bitmask. For example, instead of ``'4,8'`` one could
simply provide string ``'12'``.
.. note::
When ``ignore_flags`` is a `str` and when it is prepended with
'~', then the meaning of ``ignore_flags`` parameters will be
reversed: now it will be interpreted as a list of bit flags to be
*used* (or *not ignored*) when deciding which elements of the
input ``bitfield`` array are "bad". Following this convention,
an ``ignore_flags`` string value of ``'~0'`` would be equivalent
to setting ``ignore_flags=None``.
.. warning::
Because prepending '~' to a string ``ignore_flags`` is equivalent
to setting ``flip_bits`` to `True`, ``flip_bits`` cannot be used
with string ``ignore_flags`` and it must be set to `None`.
flip_bits : bool, None (Default = None)
Specifies whether or not to invert the bits of the bitmask either
supplied directly through ``ignore_flags`` parameter or built from the
bit flags passed through ``ignore_flags`` (only when bit flags are
passed as Python lists of integer bit flags). Occasionally, it may be
useful to *consider only specific bit flags* in the ``bitfield``
array when creating a boolean mask as opposite to *ignoring* specific
bit flags as ``ignore_flags`` behaves by default. This can be achieved
by inverting/flipping the bits of the bitmask created from
``ignore_flags`` flags which effectively changes the meaning of the
``ignore_flags`` parameter from "ignore" to "use only" these flags.
Setting ``flip_bits`` to `None` means that no bit flipping will be
performed. Bit flipping for string lists of bit flags must be
specified by prepending '~' to string bit flag lists
(see documentation for ``ignore_flags`` for more details).
.. warning::
This parameter can be set to either `True` or `False` **ONLY** when
``ignore_flags`` is either an integer bitmask or a Python
list of integer bit flags. When ``ignore_flags`` is either
`None` or a string list of flags, ``flip_bits`` **MUST** be set
to `None`.
good_mask_value : int, bool (Default = True)
This parameter is used to derive the values that will be assigned to
the elements in the output boolean mask array that correspond to the
"good" bit fields (that are 0 after zeroing bits specified by
``ignore_flags``) in the input ``bitfield`` array. When
``good_mask_value`` is non-zero or `True` then values in the output
boolean mask array corresponding to "good" bit fields in ``bitfield``
will be `True` (if ``dtype`` is `numpy.bool_`) or 1 (if ``dtype`` is
of numerical type) and values of corresponding to "bad" flags will be
`False` (or 0). When ``good_mask_value`` is zero or `False` then the
values in the output boolean mask array corresponding to "good" bit
fields in ``bitfield`` will be `False` (if ``dtype`` is `numpy.bool_`)
or 0 (if ``dtype`` is of numerical type) and values of corresponding
to "bad" flags will be `True` (or 1).
dtype : data-type (Default = numpy.bool\_)
The desired data-type for the output binary mask array.
Returns
-------
mask : numpy.ndarray
Returns an array of the same dimensionality as the input ``bitfield``
array whose elements can have two possible values,
e.g., `True` or `False` (or 1 or 0 for integer ``dtype``) according to
values of to the input ``bitfield`` elements, ``ignore_flags``
parameter, and the ``good_mask_value`` parameter.
Examples
--------
>>> from stsci.tools import bitmask
>>> import numpy as np
>>> dqbits = np.asarray([[0,0,1,2,0,8,12,0],[10,4,0,0,0,16,6,0]])
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=0, dtype=int)
array([[1, 1, 0, 0, 1, 0, 0, 1],
[0, 0, 1, 1, 1, 0, 0, 1]])
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=0, dtype=bool)
array([[ True, True, False, False, True, False, False, True],
[False, False, True, True, True, False, False, True]])
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=6, good_mask_value=0, dtype=int)
array([[0, 0, 1, 0, 0, 1, 1, 0],
[1, 0, 0, 0, 0, 1, 0, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=~6, good_mask_value=0, dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=6, flip_bits=True, good_mask_value=0, dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags='~(2+4)', good_mask_value=0, dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=[2, 4], flip_bits=True, good_mask_value=0, dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
"""
bitfield = np.asarray(bitfield)
if not np.issubdtype(bitfield.dtype, np.integer):
raise TypeError("Input bitfield array must be of integer type.")
ignore_mask = interpret_bit_flags(ignore_flags, flip_bits=flip_bits)
if ignore_mask is None:
if good_mask_value:
mask = np.ones_like(bitfield, dtype=dtype)
else:
mask = np.zeros_like(bitfield, dtype=dtype)
return mask
# filter out bits beyond the maximum supported by the data type:
ignore_mask = ignore_mask & SUPPORTED_FLAGS
# invert the "ignore" mask:
ignore_mask = np.bitwise_not(ignore_mask, dtype=bitfield.dtype,
casting='unsafe')
mask = np.empty_like(bitfield, dtype=np.bool_)
np.bitwise_and(bitfield, ignore_mask, out=mask, casting='unsafe')
if good_mask_value:
np.logical_not(mask, out=mask)
return mask.astype(dtype=dtype, subok=False, copy=False)
|
bitfield_to_boolean_mask(bitfield, ignore_flags=None, flip_bits=None, \
good_mask_value=True, dtype=numpy.bool\_)
Converts an array of bit fields to a boolean (or integer) mask array
according to a bitmask constructed from the supplied bit flags (see
``ignore_flags`` parameter).
This function is particularly useful to convert data quality arrays to
boolean masks with selective filtering of DQ flags.
Parameters
----------
bitfield : numpy.ndarray
An array of bit flags. By default, values different from zero are
interpreted as "bad" values and values equal to zero are considered
as "good" values. However, see ``ignore_flags`` parameter on how to
selectively ignore some bits in the ``bitfield`` array data.
ignore_flags : int, str, list, None (Default = 0)
An integer bitmask, a Python list of bit flags, a comma- or
'+'-separated string list of integer bit flags that indicate what
bits in the input ``bitfield`` should be *ignored* (i.e., zeroed), or
`None`.
| Setting ``ignore_flags`` to `None` effectively will make
`bitfield_to_boolean_mask` interpret all ``bitfield`` elements
as "good" regardless of their value.
| When ``ignore_flags`` argument is an integer bitmask, it will be
combined using bitwise-NOT and bitwise-AND with each element of the
input ``bitfield`` array (``~ignore_flags & bitfield``). If the
resultant bitfield element is non-zero, that element will be
interpreted as a "bad" in the output boolean mask and it will be
interpreted as "good" otherwise. ``flip_bits`` parameter may be used
to flip the bits (``bitwise-NOT``) of the bitmask thus effectively
changing the meaning of the ``ignore_flags`` parameter from "ignore"
to "use only" these flags.
.. note::
Setting ``ignore_flags`` to 0 effectively will assume that all
non-zero elements in the input ``bitfield`` array are to be
interpreted as "bad".
| When ``ignore_flags`` argument is an Python list of integer bit
flags, these flags are added together to create an integer bitmask.
Each item in the list must be a flag, i.e., an integer that is an
integer power of 2. In order to flip the bits of the resultant
bitmask, use ``flip_bits`` parameter.
| Alternatively, ``ignore_flags`` may be a string of comma- or
'+'-separated list of integer bit flags that should be added together
to create an integer bitmask. For example, both ``'4,8'`` and
``'4+8'`` are equivalent and indicate that bit flags 4 and 8 in
the input ``bitfield`` array should be ignored when generating
boolean mask.
.. note::
``'None'``, ``'INDEF'``, and empty (or all white space) strings
are special values of string ``ignore_flags`` that are
interpreted as `None`.
.. note::
Each item in the list must be a flag, i.e., an integer that is an
integer power of 2. In addition, for convenience, an arbitrary
**single** integer is allowed and it will be interpretted as an
integer bitmask. For example, instead of ``'4,8'`` one could
simply provide string ``'12'``.
.. note::
When ``ignore_flags`` is a `str` and when it is prepended with
'~', then the meaning of ``ignore_flags`` parameters will be
reversed: now it will be interpreted as a list of bit flags to be
*used* (or *not ignored*) when deciding which elements of the
input ``bitfield`` array are "bad". Following this convention,
an ``ignore_flags`` string value of ``'~0'`` would be equivalent
to setting ``ignore_flags=None``.
.. warning::
Because prepending '~' to a string ``ignore_flags`` is equivalent
to setting ``flip_bits`` to `True`, ``flip_bits`` cannot be used
with string ``ignore_flags`` and it must be set to `None`.
flip_bits : bool, None (Default = None)
Specifies whether or not to invert the bits of the bitmask either
supplied directly through ``ignore_flags`` parameter or built from the
bit flags passed through ``ignore_flags`` (only when bit flags are
passed as Python lists of integer bit flags). Occasionally, it may be
useful to *consider only specific bit flags* in the ``bitfield``
array when creating a boolean mask as opposite to *ignoring* specific
bit flags as ``ignore_flags`` behaves by default. This can be achieved
by inverting/flipping the bits of the bitmask created from
``ignore_flags`` flags which effectively changes the meaning of the
``ignore_flags`` parameter from "ignore" to "use only" these flags.
Setting ``flip_bits`` to `None` means that no bit flipping will be
performed. Bit flipping for string lists of bit flags must be
specified by prepending '~' to string bit flag lists
(see documentation for ``ignore_flags`` for more details).
.. warning::
This parameter can be set to either `True` or `False` **ONLY** when
``ignore_flags`` is either an integer bitmask or a Python
list of integer bit flags. When ``ignore_flags`` is either
`None` or a string list of flags, ``flip_bits`` **MUST** be set
to `None`.
good_mask_value : int, bool (Default = True)
This parameter is used to derive the values that will be assigned to
the elements in the output boolean mask array that correspond to the
"good" bit fields (that are 0 after zeroing bits specified by
``ignore_flags``) in the input ``bitfield`` array. When
``good_mask_value`` is non-zero or `True` then values in the output
boolean mask array corresponding to "good" bit fields in ``bitfield``
will be `True` (if ``dtype`` is `numpy.bool_`) or 1 (if ``dtype`` is
of numerical type) and values of corresponding to "bad" flags will be
`False` (or 0). When ``good_mask_value`` is zero or `False` then the
values in the output boolean mask array corresponding to "good" bit
fields in ``bitfield`` will be `False` (if ``dtype`` is `numpy.bool_`)
or 0 (if ``dtype`` is of numerical type) and values of corresponding
to "bad" flags will be `True` (or 1).
dtype : data-type (Default = numpy.bool\_)
The desired data-type for the output binary mask array.
Returns
-------
mask : numpy.ndarray
Returns an array of the same dimensionality as the input ``bitfield``
array whose elements can have two possible values,
e.g., `True` or `False` (or 1 or 0 for integer ``dtype``) according to
values of to the input ``bitfield`` elements, ``ignore_flags``
parameter, and the ``good_mask_value`` parameter.
Examples
--------
>>> from stsci.tools import bitmask
>>> import numpy as np
>>> dqbits = np.asarray([[0,0,1,2,0,8,12,0],[10,4,0,0,0,16,6,0]])
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=0, dtype=int)
array([[1, 1, 0, 0, 1, 0, 0, 1],
[0, 0, 1, 1, 1, 0, 0, 1]])
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=0, dtype=bool)
array([[ True, True, False, False, True, False, False, True],
[False, False, True, True, True, False, False, True]])
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=6, good_mask_value=0, dtype=int)
array([[0, 0, 1, 0, 0, 1, 1, 0],
[1, 0, 0, 0, 0, 1, 0, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=~6, good_mask_value=0, dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=6, flip_bits=True, good_mask_value=0, dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags='~(2+4)', good_mask_value=0, dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqbits, ignore_flags=[2, 4], flip_bits=True, good_mask_value=0, dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
|
entailment
|
def interpret_bits_value(val):
"""
Converts input bits value from string to a single integer value or None.
If a comma- or '+'-separated set of values are provided, they are summed.
.. note::
In order to flip the bits of the final result (after summation),
for input of `str` type, prepend '~' to the input string. '~' must
be prepended to the *entire string* and not to each bit flag!
Parameters
----------
val : int, str, None
An integer bit mask or flag, `None`, or a comma- or '+'-separated
string list of integer bit values. If `val` is a `str` and if
it is prepended with '~', then the output bit mask will have its
bits flipped (compared to simple sum of input val).
Returns
-------
bitmask : int or None
Returns and integer bit mask formed from the input bit value
or `None` if input `val` parameter is `None` or an empty string.
If input string value was prepended with '~', then returned
value will have its bits flipped (inverse mask).
"""
if isinstance(val, int) or val is None:
return val
else:
val = str(val).strip()
if val.startswith('~'):
flip_bits = True
val = val[1:].lstrip()
else:
flip_bits = False
if val.startswith('('):
if val.endswith(')'):
val = val[1:-1].strip()
else:
raise ValueError('Unbalanced parantheses or incorrect syntax.')
if ',' in val:
valspl = val.split(',')
bitmask = 0
for v in valspl:
bitmask += int(v)
elif '+' in val:
valspl = val.split('+')
bitmask = 0
for v in valspl:
bitmask += int(v)
elif val.upper() in ['', 'NONE', 'INDEF']:
return None
else:
bitmask = int(val)
if flip_bits:
bitmask = ~bitmask
return bitmask
|
Converts input bits value from string to a single integer value or None.
If a comma- or '+'-separated set of values are provided, they are summed.
.. note::
In order to flip the bits of the final result (after summation),
for input of `str` type, prepend '~' to the input string. '~' must
be prepended to the *entire string* and not to each bit flag!
Parameters
----------
val : int, str, None
An integer bit mask or flag, `None`, or a comma- or '+'-separated
string list of integer bit values. If `val` is a `str` and if
it is prepended with '~', then the output bit mask will have its
bits flipped (compared to simple sum of input val).
Returns
-------
bitmask : int or None
Returns and integer bit mask formed from the input bit value
or `None` if input `val` parameter is `None` or an empty string.
If input string value was prepended with '~', then returned
value will have its bits flipped (inverse mask).
|
entailment
|
def bitmask2mask(bitmask, ignore_bits, good_mask_value=1, dtype=np.bool_):
"""
bitmask2mask(bitmask, ignore_bits, good_mask_value=1, dtype=numpy.bool\_)
Interprets an array of bit flags and converts it to a "binary" mask array.
This function is particularly useful to convert data quality arrays to
binary masks.
Parameters
----------
bitmask : numpy.ndarray
An array of bit flags. Values different from zero are interpreted as
"bad" values and values equal to zero are considered as "good" values.
However, see `ignore_bits` parameter on how to ignore some bits
in the `bitmask` array.
ignore_bits : int, str, None
An integer bit mask, `None`, or a comma- or '+'-separated
string list of integer bit values that indicate what bits in the
input `bitmask` should be *ignored* (i.e., zeroed). If `ignore_bits`
is a `str` and if it is prepended with '~', then the meaning
of `ignore_bits` parameters will be reversed: now it will be
interpreted as a list of bits to be *used* (or *not ignored*) when
deciding what elements of the input `bitmask` array are "bad".
The `ignore_bits` parameter is the integer sum of all of the bit
values from the input `bitmask` array that should be considered
"good" when creating the output binary mask. For example, if
values in the `bitmask` array can be combinations
of 1, 2, 4, and 8 flags and one wants to consider that
values having *only* bit flags 2 and/or 4 as being "good",
then `ignore_bits` should be set to 2+4=6. Then a `bitmask` element
having values 2,4, or 6 will be considered "good", while an
element with a value, e.g., 1+2=3, 4+8=12, etc. will be interpreted
as "bad".
Alternatively, one can enter a comma- or '+'-separated list
of integer bit flags that should be added to obtain the
final "good" bits. For example, both ``4,8`` and ``4+8``
are equivalent to setting `ignore_bits` to 12.
See :py:func:`interpret_bits_value` for examples.
| Setting `ignore_bits` to `None` effectively will interpret
all `bitmask` elements as "good" regardless of their value.
| Setting `ignore_bits` to 0 effectively will assume that all
non-zero elements in the input `bitmask` array are to be
interpreted as "bad".
| In order to reverse the meaning of the `ignore_bits`
parameter from indicating bits in the values of `bitmask`
elements that should be ignored when deciding which elements
are "good" (these are the elements that are zero after ignoring
`ignore_bits`), to indicating the bits should be used
exclusively in deciding whether a `bitmask` element is "good",
prepend '~' to the string value. For example, in order to use
**only** (or **exclusively**) flags 4 and 8 (2nd and 3rd bits)
in the values of the input `bitmask` array when deciding whether
or not that element is "good", set `ignore_bits` to ``~4+8``,
or ``~4,8 To obtain the same effect with an `int` input value
(except for 0), enter -(4+8+1)=-9. Following this convention,
a `ignore_bits` string value of ``'~0'`` would be equivalent to
setting ``ignore_bits=None``.
good_mask_value : int, bool (Default = 1)
This parameter is used to derive the values that will be assigned to
the elements in the output `mask` array that correspond to the "good"
flags (that are 0 after zeroing bits specified by `ignore_bits`)
in the input `bitmask` array. When `good_mask_value` is non-zero or
`True` then values in the output mask array corresponding to "good"
bit flags in `bitmask` will be 1 (or `True` if `dtype` is `bool`) and
values of corresponding to "bad" flags will be 0. When
`good_mask_value` is zero or `False` then values in the output mask
array corresponding to "good" bit flags in `bitmask` will be 0
(or `False` if `dtype` is `bool`) and values of corresponding
to "bad" flags will be 1.
dtype : data-type (Default = numpy.uint8)
The desired data-type for the output binary mask array.
Returns
-------
mask : numpy.ndarray
Returns an array whose elements can have two possible values,
e.g., 1 or 0 (or `True` or `False` if `dtype` is `bool`) according to
values of to the input `bitmask` elements, `ignore_bits` parameter,
and the `good_mask_value` parameter.
"""
if not np.issubdtype(bitmask.dtype, np.integer):
raise TypeError("Input 'bitmask' array must be of integer type.")
ignore_bits = interpret_bits_value(ignore_bits)
if ignore_bits is None:
if good_mask_value:
mask = np.ones_like(bitmask, dtype=dtype)
else:
mask = np.zeros_like(bitmask, dtype=dtype)
return mask
ignore_bits = ~bitmask.dtype.type(ignore_bits)
mask = np.empty_like(bitmask, dtype=np.bool_)
np.bitwise_and(bitmask, ignore_bits, out=mask, casting='unsafe')
if good_mask_value:
np.logical_not(mask, out=mask)
return mask.astype(dtype=dtype, subok=False, copy=False)
|
bitmask2mask(bitmask, ignore_bits, good_mask_value=1, dtype=numpy.bool\_)
Interprets an array of bit flags and converts it to a "binary" mask array.
This function is particularly useful to convert data quality arrays to
binary masks.
Parameters
----------
bitmask : numpy.ndarray
An array of bit flags. Values different from zero are interpreted as
"bad" values and values equal to zero are considered as "good" values.
However, see `ignore_bits` parameter on how to ignore some bits
in the `bitmask` array.
ignore_bits : int, str, None
An integer bit mask, `None`, or a comma- or '+'-separated
string list of integer bit values that indicate what bits in the
input `bitmask` should be *ignored* (i.e., zeroed). If `ignore_bits`
is a `str` and if it is prepended with '~', then the meaning
of `ignore_bits` parameters will be reversed: now it will be
interpreted as a list of bits to be *used* (or *not ignored*) when
deciding what elements of the input `bitmask` array are "bad".
The `ignore_bits` parameter is the integer sum of all of the bit
values from the input `bitmask` array that should be considered
"good" when creating the output binary mask. For example, if
values in the `bitmask` array can be combinations
of 1, 2, 4, and 8 flags and one wants to consider that
values having *only* bit flags 2 and/or 4 as being "good",
then `ignore_bits` should be set to 2+4=6. Then a `bitmask` element
having values 2,4, or 6 will be considered "good", while an
element with a value, e.g., 1+2=3, 4+8=12, etc. will be interpreted
as "bad".
Alternatively, one can enter a comma- or '+'-separated list
of integer bit flags that should be added to obtain the
final "good" bits. For example, both ``4,8`` and ``4+8``
are equivalent to setting `ignore_bits` to 12.
See :py:func:`interpret_bits_value` for examples.
| Setting `ignore_bits` to `None` effectively will interpret
all `bitmask` elements as "good" regardless of their value.
| Setting `ignore_bits` to 0 effectively will assume that all
non-zero elements in the input `bitmask` array are to be
interpreted as "bad".
| In order to reverse the meaning of the `ignore_bits`
parameter from indicating bits in the values of `bitmask`
elements that should be ignored when deciding which elements
are "good" (these are the elements that are zero after ignoring
`ignore_bits`), to indicating the bits should be used
exclusively in deciding whether a `bitmask` element is "good",
prepend '~' to the string value. For example, in order to use
**only** (or **exclusively**) flags 4 and 8 (2nd and 3rd bits)
in the values of the input `bitmask` array when deciding whether
or not that element is "good", set `ignore_bits` to ``~4+8``,
or ``~4,8 To obtain the same effect with an `int` input value
(except for 0), enter -(4+8+1)=-9. Following this convention,
a `ignore_bits` string value of ``'~0'`` would be equivalent to
setting ``ignore_bits=None``.
good_mask_value : int, bool (Default = 1)
This parameter is used to derive the values that will be assigned to
the elements in the output `mask` array that correspond to the "good"
flags (that are 0 after zeroing bits specified by `ignore_bits`)
in the input `bitmask` array. When `good_mask_value` is non-zero or
`True` then values in the output mask array corresponding to "good"
bit flags in `bitmask` will be 1 (or `True` if `dtype` is `bool`) and
values of corresponding to "bad" flags will be 0. When
`good_mask_value` is zero or `False` then values in the output mask
array corresponding to "good" bit flags in `bitmask` will be 0
(or `False` if `dtype` is `bool`) and values of corresponding
to "bad" flags will be 1.
dtype : data-type (Default = numpy.uint8)
The desired data-type for the output binary mask array.
Returns
-------
mask : numpy.ndarray
Returns an array whose elements can have two possible values,
e.g., 1 or 0 (or `True` or `False` if `dtype` is `bool`) according to
values of to the input `bitmask` elements, `ignore_bits` parameter,
and the `good_mask_value` parameter.
|
entailment
|
def output_text(account, all_data, show_hourly=False):
"""Format data to get a readable output."""
print("""
#################################
# Hydro Quebec data for account #
# {}
#################################""".format(account))
for contract, data in all_data.items():
data['contract'] = contract
if data['period_total_bill'] is None:
data['period_total_bill'] = 0.0
if data['period_projection'] is None:
data['period_projection'] = 0.0
if data['period_mean_daily_bill'] is None:
data['period_mean_daily_bill'] = 0.0
output = ("""
----------------------------------------------------------------
Contract: {d[contract]}
===================
Balance: {d[balance]:.2f} $
Period Info
===========
Period day number: {d[period_length]:d}
Period total days: {d[period_total_days]:d} days
Period current bill
===================
Total Bill: {d[period_total_bill]:.2f} $
Projection bill: {d[period_projection]:.2f} $
Mean Daily Bill: {d[period_mean_daily_bill]:.2f} $
Total period consumption
========================
Lower price: {d[period_lower_price_consumption]:.2f} kWh
Higher price: {d[period_higher_price_consumption]:.2f} kWh
Total: {d[period_total_consumption]:.2f} kWh
Mean daily: {d[period_mean_daily_consumption]:.2f} kWh""")
print(output.format(d=data))
if data.get("period_average_temperature") is not None:
output2 = ("""Temperature: {d[period_average_temperature]:d} °C""")
print(output2.format(d=data))
if data.get("yesterday_average_temperature") is not None:
output3 = ("""
Yesterday consumption
=====================
Temperature: {d[yesterday_average_temperature]:d} °C
Lower price: {d[yesterday_lower_price_consumption]:.2f} kWh
Higher price: {d[yesterday_higher_price_consumption]:.2f} kWh
Total: {d[yesterday_total_consumption]:.2f} kWh""")
print(output3.format(d=data))
if show_hourly:
msg = ("""
Yesterday consumption details
-----------------------------
Hour | Temperature | Lower price consumption | Higher price consumption | total comsumption
""")
for hdata in data['yesterday_hourly_consumption']:
msg += ("{d[hour]} | {d[temp]:8d} °C | {d[lower]:19.2f} kWh | "
"{d[high]:20.2f} kWh | {d[total]:.2f} kWh\n").format(d=hdata)
print(msg)
if data['annual_total_bill']:
output3 = ("""
Annual Total
============
Start date: {d[annual_date_start]}
End date: {d[annual_date_end]}
Total bill: {d[annual_total_bill]} $
Mean daily bill: {d[annual_mean_daily_bill]} $
Total consumption: {d[annual_total_consumption]} kWh
Mean dailyconsumption: {d[annual_mean_daily_consumption]} kWh
kWh price: {d[annual_kwh_price_cent]} ¢
""")
print(output3.format(d=data))
|
Format data to get a readable output.
|
entailment
|
def output_influx(data):
"""Print data using influxDB format."""
for contract in data:
# Pop yesterdays data
yesterday_data = data[contract]['yesterday_hourly_consumption']
del data[contract]['yesterday_hourly_consumption']
# Print general data
out = "pyhydroquebec,contract=" + contract + " "
for index, key in enumerate(data[contract]):
if index != 0:
out = out + ","
if key in ("annual_date_start", "annual_date_end"):
out += key + "=\"" + str(data[contract][key]) + "\""
else:
out += key + "=" + str(data[contract][key])
out += " " + str(int(datetime.datetime.now(HQ_TIMEZONE).timestamp() * 1000000000))
print(out)
# Print yesterday values
yesterday = datetime.datetime.now(HQ_TIMEZONE) - datetime.timedelta(days=1)
yesterday = yesterday.replace(minute=0, hour=0, second=0, microsecond=0)
for hour in yesterday_data:
msg = "pyhydroquebec,contract={} {} {}"
data = ",".join(["{}={}".format(key, value) for key, value in hour.items()
if key != 'hour'])
datatime = datetime.datetime.strptime(hour['hour'], '%H:%M:%S')
yesterday = yesterday.replace(hour=datatime.hour)
yesterday_str = str(int(yesterday.timestamp() * 1000000000))
print(msg.format(contract, data, yesterday_str))
|
Print data using influxDB format.
|
entailment
|
def which_darwin_linkage(force_otool_check=False):
""" Convenience function. Returns one of ('x11', 'aqua') in answer to the
question of whether this is an X11-linked Python/tkinter, or a natively
built (framework, Aqua) one. This is only for OSX.
This relies on the assumption that on OSX, PyObjC is installed
in the Framework builds of Python. If it doesn't find PyObjC,
this inspects the actual tkinter library binary via otool.
One driving requirement here is to try to make the determination quickly
and quietly without actually importing/loading any GUI libraries. We
even want to avoid importing tkinter if we can.
"""
# sanity check
assert sys.platform=='darwin', 'Incorrect usage, not on OSX'
# If not forced to run otool, then make some quick and dirty
# simple checks/assumptions, which do not add to startup time and do not
# attempt to initialize any graphics.
if not force_otool_check:
# There will (for now) only ever be an aqua-linked Python/tkinter
# when using Ureka on darwin, so this is an easy short-circuit check.
if 'UR_DIR' in os.environ:
return "aqua"
# There will *usually* be PyObjC modules on sys.path on the natively-
# linked Python. This is assumed to be always correct on Python 2.x, as
# of 2012. This is kludgy but quick and effective.
sp = ",".join(sys.path)
sp = sp.lower().strip(',')
if '/pyobjc' in sp or 'pyobjc,' in sp or 'pyobjc/' in sp or sp.endswith('pyobjc'):
return "aqua"
# Try one more thing - look for the physical PyObjC install dir under site-packages
# The assumption above using sys.path does not seem to be correct as of the
# combination of Python2.7.9/PyObjC3.0.4/2015.
sitepacksloc = os.path.split(os.__file__)[0]+'/site-packages/objc'
if os.path.exists(sitepacksloc):
return "aqua"
# OK, no trace of PyObjC found - need to fall through to the forced otool check.
# Use otool shell command
if PY3K:
import tkinter as TKNTR
else:
import Tkinter as TKNTR
import subprocess
try:
tk_dyn_lib = TKNTR._tkinter.__file__
except AttributeError: # happens on Ureka
if 'UR_DIR' in os.environ:
return 'aqua'
else:
return 'unknown'
libs = subprocess.check_output(('/usr/bin/otool', '-L', tk_dyn_lib)).decode('ascii')
if libs.find('/libX11.') >= 0:
return "x11"
else:
return "aqua"
|
Convenience function. Returns one of ('x11', 'aqua') in answer to the
question of whether this is an X11-linked Python/tkinter, or a natively
built (framework, Aqua) one. This is only for OSX.
This relies on the assumption that on OSX, PyObjC is installed
in the Framework builds of Python. If it doesn't find PyObjC,
this inspects the actual tkinter library binary via otool.
One driving requirement here is to try to make the determination quickly
and quietly without actually importing/loading any GUI libraries. We
even want to avoid importing tkinter if we can.
|
entailment
|
def get_dc_owner(raises, mask_if_self):
""" Convenience function to return owner of /dev/console.
If raises is True, this raises an exception on any error.
If not, it returns any error string as the owner name.
If owner is self, and if mask_if_self, returns "<self>"."""
try:
from pwd import getpwuid
owner_uid = os.stat('/dev/console').st_uid
self_uid = os.getuid()
if mask_if_self and owner_uid == self_uid:
return "<self>"
owner_name = getpwuid(owner_uid).pw_name
return owner_name
except Exception as e:
if raises:
raise e
else:
return str(e)
|
Convenience function to return owner of /dev/console.
If raises is True, this raises an exception on any error.
If not, it returns any error string as the owner name.
If owner is self, and if mask_if_self, returns "<self>".
|
entailment
|
def to_jd(year, month, day):
'''Determine Julian day from Bahai date'''
gy = year - 1 + EPOCH_GREGORIAN_YEAR
if month != 20:
m = 0
else:
if isleap(gy + 1):
m = -14
else:
m = -15
return gregorian.to_jd(gy, 3, 20) + (19 * (month - 1)) + m + day
|
Determine Julian day from Bahai date
|
entailment
|
def from_jd(jd):
'''Calculate Bahai date from Julian day'''
jd = trunc(jd) + 0.5
g = gregorian.from_jd(jd)
gy = g[0]
bstarty = EPOCH_GREGORIAN_YEAR
if jd <= gregorian.to_jd(gy, 3, 20):
x = 1
else:
x = 0
# verify this next line...
bys = gy - (bstarty + (((gregorian.to_jd(gy, 1, 1) <= jd) and x)))
year = bys + 1
days = jd - to_jd(year, 1, 1)
bld = to_jd(year, 20, 1)
if jd >= bld:
month = 20
else:
month = trunc(days / 19) + 1
day = int((jd + 1) - to_jd(year, month, 1))
return year, month, day
|
Calculate Bahai date from Julian day
|
entailment
|
def to_jd(baktun, katun, tun, uinal, kin):
'''Determine Julian day from Mayan long count'''
return EPOCH + (baktun * 144000) + (katun * 7200) + (tun * 360) + (uinal * 20) + kin
|
Determine Julian day from Mayan long count
|
entailment
|
def from_jd(jd):
'''Calculate Mayan long count from Julian day'''
d = jd - EPOCH
baktun = trunc(d / 144000)
d = (d % 144000)
katun = trunc(d / 7200)
d = (d % 7200)
tun = trunc(d / 360)
d = (d % 360)
uinal = trunc(d / 20)
kin = int((d % 20))
return (baktun, katun, tun, uinal, kin)
|
Calculate Mayan long count from Julian day
|
entailment
|
def to_haab(jd):
'''Determine Mayan Haab "month" and day from Julian day'''
# Number of days since the start of the long count
lcount = trunc(jd) + 0.5 - EPOCH
# Long Count begins 348 days after the start of the cycle
day = (lcount + 348) % 365
count = day % 20
month = trunc(day / 20)
return int(count), HAAB_MONTHS[month]
|
Determine Mayan Haab "month" and day from Julian day
|
entailment
|
def to_tzolkin(jd):
'''Determine Mayan Tzolkin "month" and day from Julian day'''
lcount = trunc(jd) + 0.5 - EPOCH
day = amod(lcount + 4, 13)
name = amod(lcount + 20, 20)
return int(day), TZOLKIN_NAMES[int(name) - 1]
|
Determine Mayan Tzolkin "month" and day from Julian day
|
entailment
|
def _haab_count(day, month):
'''Return the count of the given haab in the cycle. e.g. 0 Pop == 1, 5 Wayeb' == 365'''
if day < 0 or day > 19:
raise IndexError("Invalid day number")
try:
i = HAAB_MONTHS.index(month)
except ValueError:
raise ValueError("'{0}' is not a valid Haab' month".format(month))
return min(i * 20, 360) + day
|
Return the count of the given haab in the cycle. e.g. 0 Pop == 1, 5 Wayeb' == 365
|
entailment
|
def tzolkin_generator(number=None, name=None):
'''For a given tzolkin name/number combination, return a generator
that gives cycle, starting with the input'''
# By default, it will start at the beginning
number = number or 13
name = name or "Ajaw"
if number > 13:
raise ValueError("Invalid day number")
if name not in TZOLKIN_NAMES:
raise ValueError("Invalid day name")
count = _tzolkin_count(number, name)
ranged = itertools.chain(list(range(count, 260)), list(range(1, count)))
for i in ranged:
yield _tzolkin_from_count(i)
|
For a given tzolkin name/number combination, return a generator
that gives cycle, starting with the input
|
entailment
|
def longcount_generator(baktun, katun, tun, uinal, kin):
'''Generate long counts, starting with input'''
j = to_jd(baktun, katun, tun, uinal, kin)
while True:
yield from_jd(j)
j = j + 1
|
Generate long counts, starting with input
|
entailment
|
def next_haab(month, jd):
'''For a given haab month and a julian day count, find the next start of that month on or after the JDC'''
if jd < EPOCH:
raise IndexError("Input day is before Mayan epoch.")
hday, hmonth = to_haab(jd)
if hmonth == month:
days = 1 - hday
else:
count1 = _haab_count(hday, hmonth)
count2 = _haab_count(1, month)
# Find number of days between haab of given jd and desired haab
days = (count2 - count1) % 365
# add in the number of days and return new jd
return jd + days
|
For a given haab month and a julian day count, find the next start of that month on or after the JDC
|
entailment
|
def next_tzolkin(tzolkin, jd):
'''For a given tzolk'in day, and a julian day count, find the next occurrance of that tzolk'in after the date'''
if jd < EPOCH:
raise IndexError("Input day is before Mayan epoch.")
count1 = _tzolkin_count(*to_tzolkin(jd))
count2 = _tzolkin_count(*tzolkin)
add_days = (count2 - count1) % 260
return jd + add_days
|
For a given tzolk'in day, and a julian day count, find the next occurrance of that tzolk'in after the date
|
entailment
|
def next_tzolkin_haab(tzolkin, haab, jd):
'''For a given haab-tzolk'in combination, and a Julian day count, find the next occurrance of the combination after the date'''
# get H & T of input jd, and their place in the 18,980 day cycle
haabcount = _haab_count(*to_haab(jd))
haab_desired_count = _haab_count(*haab)
# How many days between the input day and the desired day?
haab_days = (haab_desired_count - haabcount) % 365
possible_haab = set(h + haab_days for h in range(0, 18980, 365))
tzcount = _tzolkin_count(*to_tzolkin(jd))
tz_desired_count = _tzolkin_count(*tzolkin)
# How many days between the input day and the desired day?
tzolkin_days = (tz_desired_count - tzcount) % 260
possible_tz = set(t + tzolkin_days for t in range(0, 18980, 260))
try:
return possible_tz.intersection(possible_haab).pop() + jd
except KeyError:
raise IndexError("That Haab'-Tzolk'in combination isn't possible")
|
For a given haab-tzolk'in combination, and a Julian day count, find the next occurrance of the combination after the date
|
entailment
|
def haab_monthcalendar(baktun=None, katun=None, tun=None, uinal=None, kin=None, jdc=None):
'''For a given long count, return a calender of the current haab month, divided into tzolkin "weeks"'''
if not jdc:
jdc = to_jd(baktun, katun, tun, uinal, kin)
haab_number, haab_month = to_haab(jdc)
first_j = jdc - haab_number + 1
tzolkin_start_number, tzolkin_start_name = to_tzolkin(first_j)
gen_longcount = longcount_generator(*from_jd(first_j))
gen_tzolkin = tzolkin_generator(tzolkin_start_number, tzolkin_start_name)
# 13 day long tzolkin 'weeks'
lpad = tzolkin_start_number - 1
rpad = 13 - (tzolkin_start_number + 19 % 13)
monlen = month_length(haab_month)
days = [None] * lpad + list(range(1, monlen + 1)) + rpad * [None]
def g(x, generate):
if x is None:
return None
return next(generate)
return [[(k, g(k, gen_tzolkin), g(k, gen_longcount)) for k in days[i:i + 13]] for i in range(0, len(days), 13)]
|
For a given long count, return a calender of the current haab month, divided into tzolkin "weeks"
|
entailment
|
def _update_data(self, data={}):
'''Update the data in this object.'''
# Store the changes to prevent this update from affecting it
pending_changes = self._changes or {}
try:
del self._changes
except:
pass
# Map custom fields into our custom fields object
try:
custom_field_data = data.pop('custom_fields')
except KeyError:
pass
else:
self.custom_fields = Custom_Fields(custom_field_data)
# Map all other dictionary data to object attributes
for key, value in data.iteritems():
lookup_key = self._field_type.get(key, key)
# if it's a datetime object, turn into proper DT object
if lookup_key == 'datetime' or lookup_key == 'date':
self.__dict__[key] = datetime_parse(value)
else:
# Check to see if there's cache data for this item.
# Will return an object if it's recognized as one.
self.__dict__[key] = self._redmine.check_cache(lookup_key, value)
# Set the changes dict to track all changes from here on out
self._changes = pending_changes
|
Update the data in this object.
|
entailment
|
def _remap_tag_to_tag_id(cls, tag, new_data):
'''Remaps a given changed field from tag to tag_id.'''
try:
value = new_data[tag]
except:
# If tag wasn't changed, just return
return
tag_id = tag + '_id'
try:
# Remap the ID change to the required field
new_data[tag_id] = value['id']
except:
try:
# Try and grab the id of the object
new_data[tag_id] = value.id
except AttributeError:
# If the changes field is not a dict or object, just use whatever value was given
new_data[tag_id] = value
# Remove the tag from the changed data
del new_data[tag]
|
Remaps a given changed field from tag to tag_id.
|
entailment
|
def _add_item_manager(self, key, item_class, **paths):
'''
Add an item manager to this object.
'''
updated_paths = {}
for path_type, path_value in paths.iteritems():
updated_paths[path_type] = path_value.format(**self.__dict__)
manager = Redmine_Items_Manager(self._redmine, item_class,
**updated_paths)
self.__dict__[key] = manager
|
Add an item manager to this object.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.