desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Suppresses the output of this ParserElement; useful to keep punctuation from cluttering up returned output.'
def suppress(self):
return Suppress(self)
'Disables the skipping of whitespace before matching the characters in the ParserElement\'s defined pattern. This is normally only used internally by the pyparsing module, but may be needed in some whitespace-sensitive grammars.'
def leaveWhitespace(self):
self.skipWhitespace = False return self
'Overrides the default whitespace chars'
def setWhitespaceChars(self, chars):
self.skipWhitespace = True self.whiteChars = chars self.copyDefaultWhiteChars = False return self
'Overrides default behavior to expand <TAB>s to spaces before parsing the input string. Must be called before parseString when the input grammar contains elements that match <TAB> characters.'
def parseWithTabs(self):
self.keepTabs = True return self
'Define expression to be ignored (e.g., comments) while doing pattern matching; may be called repeatedly, to define multiple comment or other ignorable patterns.'
def ignore(self, other):
if isinstance(other, Suppress): if (other not in self.ignoreExprs): self.ignoreExprs.append(other) else: self.ignoreExprs.append(Suppress(other)) return self
'Enable display of debugging messages while doing pattern matching.'
def setDebugActions(self, startAction, successAction, exceptionAction):
self.debugActions = ((startAction or _defaultStartDebugAction), (successAction or _defaultSuccessDebugAction), (exceptionAction or _defaultExceptionDebugAction)) self.debug = True return self
'Enable display of debugging messages while doing pattern matching. Set flag to True to enable, False to disable.'
def setDebug(self, flag=True):
if flag: self.setDebugActions(_defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction) else: self.debug = False return self
'Check defined expressions for valid structure, check for infinite recursive definitions.'
def validate(self, validateTrace=[]):
self.checkRecursion([])
'Execute the parse expression on the given file or filename. If a filename is specified (instead of a file object), the entire file is opened, read, and closed before parsing.'
def parseFile(self, file_or_filename):
try: file_contents = file_or_filename.read() except AttributeError: f = open(file_or_filename, 'rb') file_contents = f.read() f.close() return self.parseString(file_contents)
'Overrides the default Keyword chars'
def setDefaultKeywordChars(chars):
Keyword.DEFAULT_KEYWORD_CHARS = chars
'The parameters pattern and flags are passed to the re.compile() function as-is. See the Python re module for an explanation of the acceptable patterns and flags.'
def __init__(self, pattern, flags=0):
super(Regex, self).__init__() if (len(pattern) == 0): warnings.warn('null string passed to Regex; use Empty() instead', SyntaxWarning, stacklevel=2) self.pattern = pattern self.flags = flags try: self.re = re.compile(self.pattern, self.flags) self.reStrin...
'Defined with the following parameters: - quoteChar - string of one or more characters defining the quote delimiting string - escChar - character to escape quotes, typically backslash (default=None) - escQuote - special quote sequence to escape an embedded quote string (such as SQL\'s "" to escape an embedded ") (defau...
def __init__(self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None):
super(QuotedString, self).__init__() quoteChar = quoteChar.strip() if (len(quoteChar) == 0): warnings.warn('quoteChar cannot be the empty string', SyntaxWarning, stacklevel=2) raise SyntaxError() if (endQuoteChar is None): endQuoteChar = quoteChar else: ...
'Extends leaveWhitespace defined in base class, and also invokes leaveWhitespace on all contained expressions.'
def leaveWhitespace(self):
self.skipWhitespace = False self.exprs = [e.copy() for e in self.exprs] for e in self.exprs: e.leaveWhitespace() return self
'Give plot a pause, so data is drawn and GUI\'s event loop can run.'
def refreshGUI(self):
plt.pause(0.0001)
'Parameters: _inputDimensions: The size of the input. (m,n) will give a size m x n _columnDimensions: The size of the 2 dimensional array of columns'
def __init__(self, inputDimensions, columnDimensions):
self.inputDimensions = inputDimensions self.columnDimensions = columnDimensions self.inputSize = np.array(inputDimensions).prod() self.columnNumber = np.array(columnDimensions).prod() self.inputArray = np.zeros(self.inputSize, dtype=uintType) self.activeArray = np.zeros(self.columnNumber, dtype=...
'create a random input vector'
def createInput(self):
print ((('-' * 70) + 'Creating a random input vector') + ('-' * 70)) self.inputArray[0:] = 0 for i in range(self.inputSize): self.inputArray[i] = random.randrange(2)
'Run the spatial pooler with the input vector'
def run(self):
print ((('-' * 80) + 'Computing the SDR') + ('-' * 80)) self.sp.compute(self.inputArray, True, self.activeArray) print self.activeArray.nonzero()
'Flip the value of 10% of input bits (add noise) :param noiseLevel: The percentage of total input bits that should be flipped'
def addNoise(self, noiseLevel):
for _ in range(int((noiseLevel * self.inputSize))): randomPosition = int((random.random() * self.inputSize)) if (self.inputArray[randomPosition] == 1): self.inputArray[randomPosition] = 0 else: self.inputArray[randomPosition] = 1
'Run one iteration of IdentityRegion\'s compute'
def compute(self, inputs, outputs):
outputs['out'][:] = inputs['in']
'Return the Spec for IdentityRegion.'
@classmethod def getSpec(cls):
spec = {'description': IdentityRegion.__doc__, 'singleNodeOnly': True, 'inputs': {'in': {'description': 'The input vector.', 'dataType': 'Real32', 'count': 0, 'required': True, 'regionLevel': False, 'isDefaultInput': True, 'requireSplitterMap': False}}, 'outputs': {'out': {'description': 'A copy of t...
'Test with fast learning, make sure PAM allows us to train with fewer repeats of the training data.'
def testFastLearning(self):
numOnBitsPerPattern = 3 baseParams = dict(seqFunction=buildOverlappedSequences, numSequences=2, seqLen=10, sharedElements=[2, 3], numOnBitsPerPattern=numOnBitsPerPattern, includeCPP=INCLUDE_CPP_TM, numCols=None, activationThreshold=numOnBitsPerPattern, minThreshold=numOnBitsPerPattern, newSynapseCount=numOnBits...
'Test with slow learning, make sure PAM allows us to train with fewer repeats of the training data.'
def testSlowLearning(self):
numOnBitsPerPattern = 3 baseParams = dict(seqFunction=buildOverlappedSequences, numSequences=2, seqLen=10, sharedElements=[2, 3], numOnBitsPerPattern=numOnBitsPerPattern, includeCPP=INCLUDE_CPP_TM, numCols=None, activationThreshold=numOnBitsPerPattern, minThreshold=numOnBitsPerPattern, newSynapseCount=numOnBits...
'Test with slow learning, some overlap in the patterns, and TM thresholds of 80% of newSynapseCount Make sure PAM allows us to train with fewer repeats of the training data.'
def testSlowLearningWithOverlap(self):
if SHORT: self.skipTest('Test skipped by default. Enable with --long.') numOnBitsPerPattern = 5 baseParams = dict(seqFunction=buildOverlappedSequences, numSequences=2, seqLen=10, sharedElements=[2, 3], numOnBitsPerPattern=numOnBitsPerPattern, patternOverlap=2, includeCPP=INCLUDE_CP...
'Test with "Forbes-like" data. A bunch of sequences of lengths between 2 and 10 elements long. We will test with both fast and slow learning. Make sure PAM allows us to train with fewer repeats of the training data.'
def testForbesLikeData(self):
if SHORT: self.skipTest('Test skipped by default. Enable with --long.') numOnBitsPerPattern = 3 baseParams = dict(seqFunction=buildSequencePool, numSequences=20, seqLen=[3, 10], numPatterns=10, numOnBitsPerPattern=numOnBitsPerPattern, patternOverlap=1, includeCPP=INCLUDE_CPP_TM, nu...
'Test creation, pickling, and basic run of learning and inference.'
def _basicTest(self, tm=None):
trainingSet = _getSimplePatterns(10, 10) for _ in range(2): for seq in trainingSet[0:5]: for _ in range(10): tm.learn(seq) tm.reset() print 'Learning completed' print 'Running inference' tm.collectStats = True for seq in trainingSet[0:5]: ...
'Print a single vector succinctly.'
def _printOneTrainingVector(self, x):
print ''.join((('1' if (k != 0) else '.') for k in x))
'Print all vectors'
def _printAllTrainingSequences(self, trainingSequences):
for (i, trainingSequence) in enumerate(trainingSequences): print '============= Sequence', i, '=================' for pattern in trainingSequence: self._printOneTrainingVector(pattern)
'Set verbosity level on the TM'
def _setVerbosity(self, verbosity, tm, tmPy):
tm.cells4.setVerbosity(verbosity) tm.verbosity = verbosity tmPy.verbosity = verbosity
'Create an instance of the appropriate temporal memory. We isolate all parameters as constants specified here.'
def _createTMs(self, numCols, fixedResources=False, checkSynapseConsistency=True):
minThreshold = 4 activationThreshold = 8 newSynapseCount = 15 initialPerm = 0.3 connectedPerm = 0.5 permanenceInc = 0.1 permanenceDec = 0.05 if fixedResources: permanenceDec = 0.1 maxSegmentsPerCell = 5 maxSynapsesPerSegment = 15 globalDecay = 0 ma...
'Very simple patterns. Each pattern has numOnes consecutive bits on. There are numPatterns*numOnes bits in the vector. These patterns are used as elements of sequences when building up a training set.'
def _getSimplePatterns(self, numOnes, numPatterns):
numCols = (numOnes * numPatterns) p = [] for i in xrange(numPatterns): x = numpy.zeros(numCols, dtype='float32') x[(i * numOnes):((i + 1) * numOnes)] = 1 p.append(x) return p
'A simple sequence of 5 patterns. The left half of the vector contains the pattern elements, each with numOnes consecutive bits. The right half contains numOnes random bits. The function returns a pair: trainingSequences: A list containing numRepetitions instances of the above sequence testSequence: A single...
def _buildSegmentLearningTrainingSet(self, numOnes=10, numRepetitions=10):
numPatterns = 5 numCols = ((2 * numPatterns) * numOnes) halfCols = (numPatterns * numOnes) numNoiseBits = numOnes p = self._getSimplePatterns(numOnes, numPatterns) trainingSequences = [] for _ in xrange(numRepetitions): sequence = [] for j in xrange(numPatterns): ...
'Three simple sequences, composed of the same 5 static patterns. The left half of the vector contains the pattern elements, each with numOnes consecutive bits. The right half contains numOnes random bits. Sequence 1 is: p0, p1, p2, p3, p4 Sequence 2 is: p4, p3, p2, p1, p0 Sequence 3 is: p2, p0, p4, p1, p3 The function ...
def _buildSL2TrainingSet(self, numOnes=10, numRepetitions=10):
numPatterns = 5 numCols = ((2 * numPatterns) * numOnes) halfCols = (numPatterns * numOnes) numNoiseBits = numOnes p = self._getSimplePatterns(numOnes, numPatterns) numSequences = 3 indices = [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0], [2, 0, 4, 1, 3]] trainingSequences = [] for i in xrange((n...
'Train the given TM once on the entire training set. on the Test a single set of sequences once and check that individual predictions reflect the true relative frequencies. Return a success code. Success code is 1 for pass, 0 for fail.'
def _testSegmentLearningSequence(self, tms, trainingSequences, testSequences, doResets=True):
if (testSequences == None): testSequences = trainingSequences (cppTM, pyTM) = (tms[0], tms[1]) if (cppTM is not None): assert (fdrutils.tmDiff2(cppTM, pyTM, g_options.verbosity) == True) if (g_options.verbosity > 0): print '============= Training =================' ...
'Test segment learning'
def _testSL1(self, numOnes=10, numRepetitions=6, fixedResources=False, checkSynapseConsistency=True):
if fixedResources: testName = 'TestSL1_FS' else: testName = 'TestSL1' print ('\nRunning %s...' % testName) (trainingSet, testSet) = self._buildSegmentLearningTrainingSet(numOnes, numRepetitions) numCols = len(trainingSet[0][0]) tms = self._createTMs(numCols=numCols, fixedResou...
'Test segment learning'
def _testSL2(self, numOnes=10, numRepetitions=10, fixedResources=False, checkSynapseConsistency=True):
if fixedResources: testName = 'TestSL2_FS' else: testName = 'TestSL2' print ('\nRunning %s...' % testName) (trainingSet, testSet) = self._buildSL2TrainingSet(numOnes, numRepetitions) numCols = len(trainingSet[0][0]) tms = self._createTMs(numCols=numCols, fixedResources=fixedRe...
'Test segment learning without fixed resources'
def test_SL1NoFixedResources(self):
self._testSL1(fixedResources=False, checkSynapseConsistency=g_options.long)
'Test segment learning with fixed resources'
def test_SL1WithFixedResources(self):
if (not g_options.long): print ('Test %s only enabled with the --long option' % self._testMethodName) return self._testSL1(fixedResources=True, checkSynapseConsistency=g_options.long)
'Test segment learning without fixed resources'
def test_SL2NoFixedResources(self):
if (not g_options.long): print ('Test %s only enabled with the --long option' % self._testMethodName) return self._testSL2(fixedResources=False, checkSynapseConsistency=g_options.long)
'Test segment learning with fixed resources'
def test_SL2WithFixedResources(self):
if (not g_options.long): print ('Test %s only enabled with the --long option' % self._testMethodName) return self._testSL2(fixedResources=True, checkSynapseConsistency=g_options.long)
'requestedActivities: a sequence of PeriodicActivityRequest elements'
def __init__(self, requestedActivities):
self.__activities = [] for req in requestedActivities: act = self.Activity(repeating=req.repeating, period=req.period, cb=req.cb, iteratorHolder=[iter(xrange(req.period))]) self.__activities.append(act) return
'Activity tick handler; services all activities Returns: True if controlling iterator says it\'s okay to keep going; False to stop'
def tick(self):
for act in self.__activities: if (not act.iteratorHolder[0]): continue try: next(act.iteratorHolder[0]) except StopIteration: act.cb() if act.repeating: act.iteratorHolder[0] = iter(xrange(act.period)) else: ...
'dirPath: the path that we attempted to create for experiment files reason: any object that can be converted to a string that explains the reason (may be an exception)'
def __init__(self, dirPath, reason):
super(_CreateDirectoryException, self).__init__((('Error creating directory ' + '<%s>: %s.') % (str(dirPath), str(reason)))) self.reason = reason
'problem: a string-convertible object that describes the problem experienced by the error-reporting funciton. precursor: a string-convertible object that explains the original error that the error-reporting function was attempting to report when it encountered its own failure.'
def __init__(self, problem, precursor):
super(_ErrorReportingException, self).__init__((("Encountered error: '%s' while reporting " + "error: '%s'.") % (problem, precursor)))
'Get the sensor input element that corresponds to the given inference element. This is mainly used for metrics and prediction logging'
@staticmethod def getInputElement(inferenceElement):
return InferenceElement.__inferenceInputMap.get(inferenceElement, None)
'Returns True if the inference from this timestep is predicted the input for the NEXT timestep. NOTE: This should only be checked IF THE MODEL\'S INFERENCE TYPE IS ALSO TEMPORAL. That is, a temporal model CAN have non-temporal inference elements, but a non-temporal model CANNOT have temporal inference elements'
@staticmethod def isTemporal(inferenceElement):
if (InferenceElement.__temporalInferenceElements is None): InferenceElement.__temporalInferenceElements = set([InferenceElement.prediction]) return (inferenceElement in InferenceElement.__temporalInferenceElements)
'Returns the number of records that elapse between when an inference is made and when the corresponding input record will appear. For example, a multistep prediction for 3 timesteps out will have a delay of 3 Parameters: inferenceElement: The InferenceElement value being delayed key: If the inference i...
@staticmethod def getTemporalDelay(inferenceElement, key=None):
if (inferenceElement in (InferenceElement.prediction, InferenceElement.encodings)): return 1 if (inferenceElement in (InferenceElement.anomalyScore, InferenceElement.anomalyLabel, InferenceElement.classification, InferenceElement.classConfidences)): return 0 if (inferenceElement in (Inferenc...
'Returns the maximum delay for the InferenceElements in the inference dictionary Parameters: inferences: A dictionary where the keys are InferenceElements'
@staticmethod def getMaxDelay(inferences):
maxDelay = 0 for (inferenceElement, inference) in inferences.iteritems(): if isinstance(inference, dict): for key in inference.iterkeys(): maxDelay = max(InferenceElement.getTemporalDelay(inferenceElement, key), maxDelay) else: maxDelay = max(InferenceElem...
'Returns True if the inference type is \'temporal\', i.e. requires a temporal memory in the network.'
@staticmethod def isTemporal(inferenceType):
if (InferenceType.__temporalInferenceTypes is None): InferenceType.__temporalInferenceTypes = set([InferenceType.TemporalNextStep, InferenceType.TemporalClassification, InferenceType.TemporalAnomaly, InferenceType.TemporalMultiStep, InferenceType.NontemporalMultiStep]) return (inferenceType in Inference...
'Instantiate the Hypersearch worker Parameters: options: The command line options. See the main() method for a description of these options cmdLineArgs: Copy of the command line arguments, so we can place them in the log'
def __init__(self, options, cmdLineArgs):
self._options = options self.logger = logging.getLogger('.'.join(['com.numenta.nupic.swarming', self.__class__.__name__])) if (options.logLevel is not None): self.logger.setLevel(options.logLevel) self.logger.info(('Launched with command line arguments: %s' % str(cmdLineArgs))) ...
'For all models that modified their results since last time this method was called, send their latest results to the Hypersearch implementation.'
def _processUpdatedModels(self, cjDAO):
curModelIDCtrList = cjDAO.modelsGetUpdateCounters(self._options.jobID) if (len(curModelIDCtrList) == 0): return self.logger.debug(('current modelID/updateCounters: %s' % str(curModelIDCtrList))) self.logger.debug(('last modelID/updateCounters: %s' % str(self._modelIDCtrList))) cu...
'Run this worker. Parameters: retval: jobID of the job we ran. This is used by unit test code when calling this working using the --params command line option (which tells this worker to insert the job itself).'
def run(self):
options = self._options self.logger.info('Connecting to the jobs database') cjDAO = ClientJobsDAO.get() self._workerID = cjDAO.getConnectionID() if options.clearModels: cjDAO.modelsClearAll() if (options.params is not None): options.jobID = cjDAO.jobInsert(client='hwT...
'Parameters: modelID: ID of this model in the models table jobID: params: a dictionary of parameters for this dummy model. The possible keys are: delay: OPTIONAL-This specifies the amount of time (in seconds) that the experiment should wait before STARTING to process records. This is useful for simulati...
def __init__(self, modelID, jobID, params, predictedField, reportKeyPatterns, optimizeKeyPattern, jobsDAO, modelCheckpointGUID, logLevel=None, predictionCacheMaxRecords=None):
super(OPFDummyModelRunner, self).__init__(modelID=modelID, jobID=jobID, predictedField=predictedField, experimentDir=None, reportKeyPatterns=reportKeyPatterns, optimizeKeyPattern=optimizeKeyPattern, jobsDAO=jobsDAO, modelCheckpointGUID=modelCheckpointGUID, logLevel=logLevel, predictionCacheMaxRecords=None) self...
'Loads all the parameters for this dummy model. For any paramters specified as lists, read the appropriate value for this model using the model index'
def _loadDummyModelParameters(self, params):
for (key, value) in params.iteritems(): if (type(value) == list): index = (self.modelIndex % len(params[key])) self._params[key] = params[key][index] else: self._params[key] = params[key]
'Computes the amount of time (if any) to delay the run of this model. This can be determined by two mutually exclusive parameters: delay and sleepModelRange. \'delay\' specifies the number of seconds a model should be delayed. If a list is specified, the appropriate amount of delay is determined by using the model\'s m...
def _computModelDelay(self):
if ((self._params['delay'] is not None) and (self._params['sleepModelRange'] is not None)): raise RuntimeError("Only one of 'delay' or 'sleepModelRange' may be specified") if (self._sleepModelRange is not None): (range, delay) = self._sleepModelRange.split(':') de...
'Protected function that can be overridden by subclasses. Its main purpose is to allow the the OPFDummyModelRunner to override this with deterministic values Returns: All the metrics being computed for this model'
def _getMetrics(self):
metric = None if (self.metrics is not None): metric = self.metrics((self._currentRecordIndex + 1)) elif (self.metricValue is not None): metric = self.metricValue else: raise RuntimeError('No metrics or metric value specified for dummy model') return {s...
'Runs the given OPF task against the given Model instance'
def run(self):
self._logger.debug(('Starting Dummy Model: modelID=%s;' % self._modelID)) periodic = self._initPeriodicActivities() self._optimizedMetricLabel = self._optimizeKeyPattern self._reportMetricLabels = [self._optimizeKeyPattern] if (self._iterations >= 0): iterTracker = iter(xrange(self....
'Creates the model\'s PredictionLogger object, which is an interface to write model results to a permanent storage location'
def _createPredictionLogger(self):
class DummyLogger: def writeRecord(self, record): pass def writeRecords(self, records, progressCB): pass def close(self): pass self._predictionLogger = DummyLogger()
'Checks to see if the model should exit based on the exitAfter dummy parameter'
def __shouldSysExit(self, iteration):
if ((self._exitAfter is None) or (iteration < self._exitAfter)): return False results = self._jobsDAO.modelsGetFieldsForJob(self._jobID, ['params']) modelIDs = [e[0] for e in results] modelNums = [json.loads(e[1][0])['structuredParams']['__model_num'] for e in results] sameModelNumbers = fil...
'Create our state object. Parameters: hsObj: Reference to the HypersesarchV2 instance cjDAO: ClientJobsDAO instance logger: logger to use jobID: our JobID'
def __init__(self, hsObj):
self._hsObj = hsObj self.logger = self._hsObj.logger self._state = None self._priorStateJSON = None self._dirty = False self.readStateFromDB()
'Return true if our local copy of the state has changed since the last time we read from the DB.'
def isDirty(self):
return self._dirty
'Return true if the search should be considered over.'
def isSearchOver(self):
return self._state['searchOver']
'Set our state to that obtained from the engWorkerState field of the job record. Parameters: stateJSON: JSON encoded state from job record'
def readStateFromDB(self):
self._priorStateJSON = self._hsObj._cjDAO.jobGetFields(self._hsObj._jobID, ['engWorkerState'])[0] if (self._priorStateJSON is None): swarms = dict() if (self._hsObj._fixedFields is not None): print self._hsObj._fixedFields encoderSet = [] for field in self._hs...
'Update the state in the job record with our local changes (if any). If we don\'t have the latest state in our priorStateJSON, then re-load in the latest state and return False. If we were successful writing out our changes, return True Parameters: retval: True if we were successful writing out our changes False if ...
def writeStateToDB(self):
if (not self._dirty): return True self._state['lastUpdateTime'] = time.time() newStateJSON = json.dumps(self._state) success = self._hsObj._cjDAO.jobSetFieldIfEqual(self._hsObj._jobID, 'engWorkerState', str(newStateJSON), str(self._priorStateJSON)) if success: self.logger.debug(('Suc...
'Given an encoder dictionary key, get the encoder name. Encoders are a sub-dict within model params, and in HSv2, their key is structured like this for example: \'modelParams|sensorParams|encoders|home_winloss\' The encoderName is the last word in the | separated key name'
def getEncoderNameFromKey(self, key):
return key.split('|')[(-1)]
'Given an encoder name, get the key. Encoders are a sub-dict within model params, and in HSv2, their key is structured like this for example: \'modelParams|sensorParams|encoders|home_winloss\' The encoderName is the last word in the | separated key name'
def getEncoderKeyFromName(self, name):
return ('modelParams|sensorParams|encoders|%s' % name)
'Return the field contributions statistics. Parameters: retval: Dictionary where the keys are the field names and the values are how much each field contributed to the best score.'
def getFieldContributions(self):
if (self._hsObj._fixedFields is not None): return (dict(), dict()) predictedEncoderName = self._hsObj._predictedFieldEncoder fieldScores = [] for (swarmId, info) in self._state['swarms'].iteritems(): encodersUsed = swarmId.split('.') if (len(encodersUsed) != 1): conti...
'Return the list of all swarms in the given sprint. Parameters: retval: list of active swarm Ids in the given sprint'
def getAllSwarms(self, sprintIdx):
swarmIds = [] for (swarmId, info) in self._state['swarms'].iteritems(): if (info['sprintIdx'] == sprintIdx): swarmIds.append(swarmId) return swarmIds
'Return the list of active swarms in the given sprint. These are swarms which still need new particles created in them. Parameters: sprintIdx: which sprint to query. If None, get active swarms from all sprints retval: list of active swarm Ids in the given sprint'
def getActiveSwarms(self, sprintIdx=None):
swarmIds = [] for (swarmId, info) in self._state['swarms'].iteritems(): if ((sprintIdx is not None) and (info['sprintIdx'] != sprintIdx)): continue if (info['status'] == 'active'): swarmIds.append(swarmId) return swarmIds
'Return the list of swarms in the given sprint that were not killed. This is called when we are trying to figure out which encoders to carry forward to the next sprint. We don\'t want to carry forward encoder combintations which were obviously bad (in killed swarms). Parameters: retval: list of active swarm Ids in th...
def getNonKilledSwarms(self, sprintIdx):
swarmIds = [] for (swarmId, info) in self._state['swarms'].iteritems(): if ((info['sprintIdx'] == sprintIdx) and (info['status'] != 'killed')): swarmIds.append(swarmId) return swarmIds
'Return the list of all completed swarms. Parameters: retval: list of active swarm Ids'
def getCompletedSwarms(self):
swarmIds = [] for (swarmId, info) in self._state['swarms'].iteritems(): if (info['status'] == 'completed'): swarmIds.append(swarmId) return swarmIds
'Return the list of all completing swarms. Parameters: retval: list of active swarm Ids'
def getCompletingSwarms(self):
swarmIds = [] for (swarmId, info) in self._state['swarms'].iteritems(): if (info['status'] == 'completing'): swarmIds.append(swarmId) return swarmIds
'Return the best model ID and it\'s errScore from the given swarm. If the swarm has not completed yet, the bestModelID will be None. Parameters: retval: (modelId, errScore)'
def bestModelInCompletedSwarm(self, swarmId):
swarmInfo = self._state['swarms'][swarmId] return (swarmInfo['bestModelId'], swarmInfo['bestErrScore'])
'Return the best model ID and it\'s errScore from the given sprint. If the sprint has not completed yet, the bestModelID will be None. Parameters: retval: (modelId, errScore)'
def bestModelInCompletedSprint(self, sprintIdx):
sprintInfo = self._state['sprints'][sprintIdx] return (sprintInfo['bestModelId'], sprintInfo['bestErrScore'])
'Return the best model ID and it\'s errScore from the given sprint, which may still be in progress. This returns the best score from all models in the sprint which have matured so far. Parameters: retval: (modelId, errScore)'
def bestModelInSprint(self, sprintIdx):
swarms = self.getAllSwarms(sprintIdx) bestModelId = None bestErrScore = numpy.inf for swarmId in swarms: (modelId, errScore) = self._hsObj._resultsDB.bestModelIdAndErrScore(swarmId) if (errScore < bestErrScore): bestModelId = modelId bestErrScore = errScore re...
'Change the given swarm\'s state to \'newState\'. If \'newState\' is \'completed\', then bestModelId and bestErrScore must be provided. Parameters: swarmId: swarm Id newStatus: new status, either \'active\', \'completing\', \'completed\', or \'killed\''
def setSwarmState(self, swarmId, newStatus):
assert (newStatus in ['active', 'completing', 'completed', 'killed']) swarmInfo = self._state['swarms'][swarmId] if (swarmInfo['status'] == newStatus): return if ((swarmInfo['status'] == 'completed') and (newStatus == 'completing')): return self._dirty = True swarmInfo['status'] ...
'Return True if there are any more good sprints still being explored. A \'good\' sprint is one that is earlier than where we detected an increase in error from sprint to subsequent sprint.'
def anyGoodSprintsActive(self):
if (self._state['lastGoodSprint'] is not None): goodSprints = self._state['sprints'][0:(self._state['lastGoodSprint'] + 1)] else: goodSprints = self._state['sprints'] for sprint in goodSprints: if (sprint['status'] == 'active'): anyActiveSprints = True break ...
'Return True if the given sprint has completed.'
def isSprintCompleted(self, sprintIdx):
numExistingSprints = len(self._state['sprints']) if (sprintIdx >= numExistingSprints): return False return (self._state['sprints'][sprintIdx]['status'] == 'completed')
'See if we can kill off some speculative swarms. If an earlier sprint has finally completed, we can now tell which fields should *really* be present in the sprints we\'ve already started due to speculation, and kill off the swarms that should not have been included.'
def killUselessSwarms(self):
numExistingSprints = len(self._state['sprints']) if (self._hsObj._searchType == HsSearchType.legacyTemporal): if (numExistingSprints <= 2): return elif (numExistingSprints <= 1): return completedSwarms = self.getCompletedSwarms() completedSwarms = [(swarm, self._state['sw...
'If the given sprint exists and is active, return active=True. If the sprint does not exist yet, this call will create it (and return active=True). If it already exists, but is completing or complete, return active=False. If sprintIdx is past the end of the possible sprints, return active=False, noMoreSprints=True IMPO...
def isSprintActive(self, sprintIdx):
while True: numExistingSprints = len(self._state['sprints']) if (sprintIdx <= (numExistingSprints - 1)): if (not self._hsObj._speculativeParticles): active = (self._state['sprints'][sprintIdx]['status'] == 'active') return (active, False) else:...
'Log \'msg % args\' with severity \'DEBUG\'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)'
def debug(self, msg, *args, **kwargs):
self._baseLogger.debug(self, self.getExtendedMsg(msg), *args, **kwargs)
'Log \'msg % args\' with severity \'INFO\'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.info("Houston, we have a %s", "interesting problem", exc_info=1)'
def info(self, msg, *args, **kwargs):
self._baseLogger.info(self, self.getExtendedMsg(msg), *args, **kwargs)
'Log \'msg % args\' with severity \'WARNING\'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)'
def warning(self, msg, *args, **kwargs):
self._baseLogger.warning(self, self.getExtendedMsg(msg), *args, **kwargs)
'Log \'msg % args\' with severity \'ERROR\'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.error("Houston, we have a %s", "major problem", exc_info=1)'
def error(self, msg, *args, **kwargs):
self._baseLogger.error(self, self.getExtendedMsg(msg), *args, **kwargs)
'Log \'msg % args\' with severity \'CRITICAL\'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.critical("Houston, we have a %s", "major disaster", exc_info=1)'
def critical(self, msg, *args, **kwargs):
self._baseLogger.critical(self, self.getExtendedMsg(msg), *args, **kwargs)
'Log \'msg % args\' with the integer severity \'level\'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.log(level, "We have a %s", "mysterious problem", exc_info=1)'
def log(self, level, msg, *args, **kwargs):
self._baseLogger.log(self, level, self.getExtendedMsg(msg), *args, **kwargs)
'Record the best score for a swarm\'s generation index (x) Returns list of swarmIds to terminate.'
def recordDataPoint(self, swarmId, generation, errScore):
terminatedSwarms = [] if (swarmId in self.swarmScores): entry = self.swarmScores[swarmId] assert (len(entry) == generation) entry.append(errScore) entry = self.swarmBests[swarmId] entry.append(min(errScore, entry[(-1)])) assert (len(self.swarmBests[swarmId]) == le...
'Returns the periodic checks to see if the model should continue running. Parameters: terminationFunc: The function that will be called in the model main loop as a wrapper around this function. Must have a parameter called \'index\' Returns: A list of PeriodicActivityRequest objects.'
def getTerminationCallbacks(self, terminationFunc):
activities = ([None] * len(ModelTerminator._MILESTONES)) for (index, (iteration, _)) in enumerate(ModelTerminator._MILESTONES): cb = functools.partial(terminationFunc, index=index) activities[index] = PeriodicActivityRequest(repeating=False, period=iteration, cb=cb)
'Retrieve the requested property as a string. If property does not exist, then KeyError will be raised. Parameters: prop: name of the property retval: property value as a string'
@classmethod def getString(cls, prop):
if (cls._properties is None): cls._readStdConfigFiles() envValue = os.environ.get(('%s%s' % (cls.envPropPrefix, prop.replace('.', '_'))), None) if (envValue is not None): return envValue return cls._properties[prop]
'Retrieve the requested property and return it as a bool. If property does not exist, then KeyError will be raised. If the property value is neither 0 nor 1, then ValueError will be raised Parameters: prop: name of the property retval: property value as bool'
@classmethod def getBool(cls, prop):
value = cls.getInt(prop) if (value not in (0, 1)): raise ValueError(('Expected 0 or 1, but got %r in config property %s' % (value, prop))) return bool(value)
'Retrieve the requested property and return it as an int. If property does not exist, then KeyError will be raised. Parameters: prop: name of the property retval: property value as int'
@classmethod def getInt(cls, prop):
return int(cls.getString(prop))
'Retrieve the requested property and return it as a float. If property does not exist, then KeyError will be raised. Parameters: prop: name of the property retval: property value as float'
@classmethod def getFloat(cls, prop):
return float(cls.getString(prop))
'Get the value of the given configuration property as string. This returns a string which is the property value, or the value of "default" arg if the property is not found. Use Configuration.getString() instead. NOTE: it\'s atypical for our configuration properties to be missing - a missing configuration property is us...
@classmethod def get(cls, prop, default=None):
try: return cls.getString(prop) except KeyError: return default
'Set the value of the given configuration property. Parameters: prop: name of the property value: value to set'
@classmethod def set(cls, prop, value):
if (cls._properties is None): cls._readStdConfigFiles() cls._properties[prop] = str(value)
'Return a dict containing all of the configuration properties Parameters: retval: dict containing all configuration properties.'
@classmethod def dict(cls):
if (cls._properties is None): cls._readStdConfigFiles() result = dict(cls._properties) keys = os.environ.keys() replaceKeys = filter((lambda x: x.startswith(cls.envPropPrefix)), keys) for envKey in replaceKeys: key = envKey[len(cls.envPropPrefix):] key = key.replace('_', '.')...
'Parse the given XML file and store all properties it describes. Parameters: filename: name of XML file to parse (no path) path: path of the XML file. If None, then use the standard configuration search path.'
@classmethod def readConfigFile(cls, filename, path=None):
properties = cls._readConfigFile(filename, path) if (cls._properties is None): cls._properties = dict() for name in properties: if ('value' in properties[name]): cls._properties[name] = properties[name]['value']
'Parse the given XML file and return a dict describing the file. Parameters: filename: name of XML file to parse (no path) path: path of the XML file. If None, then use the standard configuration search path. retval: returns a dict with each property as a key and a dict of all the property\'s attributes as val...
@classmethod def _readConfigFile(cls, filename, path=None):
outputProperties = dict() if (path is None): filePath = cls.findConfigFile(filename) else: filePath = os.path.join(path, filename) try: if (filePath is not None): try: _getLoggerBase().debug('Loading config file: %s', filePath) ...
'Clear out the entire configuration.'
@classmethod def clear(cls):
cls._properties = None cls._configPaths = None
'Search the configuration path (specified via the NTA_CONF_PATH environment variable) for the given filename. If found, return the complete path to the file. Parameters: filename: name of file to locate'
@classmethod def findConfigFile(cls, filename):
paths = cls.getConfigPaths() for p in paths: testPath = os.path.join(p, filename) if os.path.isfile(testPath): return os.path.join(p, filename)
'Return the list of paths to search for configuration files. Parameters: retval: list of paths.'
@classmethod def getConfigPaths(cls):
configPaths = [] if (cls._configPaths is not None): return cls._configPaths else: if ('NTA_CONF_PATH' in os.environ): configVar = os.environ['NTA_CONF_PATH'] configPaths = configVar.split(os.pathsep) return configPaths
'Modify the paths we use to search for configuration files. Parameters: paths: list of paths to search for config files.'
@classmethod def setConfigPaths(cls, paths):
cls._configPaths = list(paths)