desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Parameters: rawInfo: A single model information element as returned by ClientJobsDAO.modelsInfo() retval: nothing.'
def __init__(self, rawInfo):
self.__rawInfo = rawInfo self.__cachedResults = None assert (self.__rawInfo.params is not None) self.__cachedParams = None
'Parameters: retval: Representation of this _NupicModelInfo instance.'
def __repr__(self):
return ('%s(jobID=%s, modelID=%s, status=%s, completionReason=%s, updateCounter=%s, numRecords=%s)' % ('_NupicModelInfo', self.__rawInfo.jobId, self.__rawInfo.modelId, self.__rawInfo.status, self.__rawInfo.completionReason, self.__rawInfo.updateCounter, self.__rawInfo.numRecords))
'Parameters: retval: Nupic modelID associated with this model info.'
def getModelID(self):
return self.__rawInfo.modelId
'Parameters: retval: Human-readable string representation of the model\'s status.'
def statusAsString(self):
return ('%s' % self.__rawInfo.status)
'Parameters: retval: Printable description of the model.'
def getModelDescription(self):
params = self.__unwrapParams() if ('experimentName' in params): return params['experimentName'] else: paramSettings = self.getParamLabels() items = [] for (key, value) in paramSettings.items(): items.append(('%s_%s' % (key, value))) return '.'.join(items)
'Parameters: retval: Contents of the sub-experiment description file for this model'
def getGeneratedDescriptionFile(self):
return self.__rawInfo.genDescription
'Paramets: retval: The number of records processed by the model.'
def getNumRecords(self):
return self.__rawInfo.numRecords
'Parameters: retval: a dictionary of model parameter labels. For each entry the key is the name of the parameter and the value is the value chosen for it.'
def getParamLabels(self):
params = self.__unwrapParams() if ('particleState' in params): retval = dict() queue = [(pair, retval) for pair in params['particleState']['varStates'].iteritems()] while (len(queue) > 0): (pair, output) = queue.pop() (k, v) = pair if (('position' in v...
'Unwraps self.__rawInfo.params into the equivalent python dictionary and caches it in self.__cachedParams. Returns the unwrapped params Parameters: retval: Model params dictionary as correpsonding to the json as returned in ClientJobsDAO.modelsInfo()[x].params'
def __unwrapParams(self):
if (self.__cachedParams is None): self.__cachedParams = json.loads(self.__rawInfo.params) assert (self.__cachedParams is not None), ('%s resulted in None' % self.__rawInfo.params) return self.__cachedParams
'Retrives a dictionary of metrics designated for report Parameters: retval: a dictionary of metrics that were collected for the model or an empty dictionary if there aren\'t any.'
def getReportMetrics(self):
return self.__unwrapResults().reportMetrics
'Retrives a dictionary of metrics designagted for optimization Parameters: retval: a dictionary of optimization metrics that were collected for the model or an empty dictionary if there aren\'t any.'
def getOptimizationMetrics(self):
return self.__unwrapResults().optimizationMetrics
'Retrives a dictionary of metrics that combines all report and optimization metrics Parameters: retval: a dictionary of optimization metrics that were collected for the model; an empty dictionary if there aren\'t any.'
def getAllMetrics(self):
result = self.getReportMetrics() result.update(self.getOptimizationMetrics()) return result
'Unwraps self.__rawInfo.results and caches it in self.__cachedResults; Returns the unwrapped params Parameters: retval: ModelResults namedtuple instance'
def __unwrapResults(self):
if (self.__cachedResults is None): if (self.__rawInfo.results is not None): resultList = json.loads(self.__rawInfo.results) assert (len(resultList) == 2), ('Expected 2 elements, but got %s (%s).' % (len(resultList), resultList)) self.__cachedResults = se...
'Parameters: retval: True if the job has not been started yet'
def isWaitingToStart(self):
waiting = (self.__rawInfo.status == self.__nupicModelStatus_notStarted) return waiting
'Parameters: retval: True if the job has not been started yet'
def isRunning(self):
running = (self.__rawInfo.status == self.__nupicModelStatus_running) return running
'Parameters: retval: True if the model\'s processing has completed (either with success or failure).'
def isFinished(self):
finished = (self.__rawInfo.status == self.__nupicModelStatus_completed) return finished
'Returns _ModelCompletionReason. NOTE: it\'s an error to call this method if isFinished() would return False. Parameters: retval: _ModelCompletionReason instance'
def getCompletionReason(self):
assert self.isFinished(), ('Too early to tell: %s' % self) return _ModelCompletionReason(self.__rawInfo.completionReason)
'Returns model completion message. NOTE: it\'s an error to call this method if isFinished() would return False. Parameters: retval: completion message'
def getCompletionMsg(self):
assert self.isFinished(), ('Too early to tell: %s' % self) return self.__rawInfo.completionMsg
'Returns model evaluation start time. NOTE: it\'s an error to call this method if isWaitingToStart() would return True. Parameters: retval: model evaluation start time'
def getStartTime(self):
assert (not self.isWaitingToStart()), ('Too early to tell: %s' % self) return ('%s' % self.__rawInfo.startTime)
'Returns mode evaluation end time. NOTE: it\'s an error to call this method if isFinished() would return False. Parameters: retval: model evaluation end time'
def getEndTime(self):
assert self.isFinished(), ('Too early to tell: %s' % self) return ('%s' % self.__rawInfo.endTime)
'Instantiate our results database Parameters: hsObj: Reference to the HypersearchV2 instance'
def __init__(self, hsObj):
self._hsObj = hsObj self._allResults = [] self._errModels = set() self._numErrModels = 0 self._completedModels = set() self._numCompletedModels = 0 self._modelIDToIdx = dict() self._bestResult = numpy.inf self._bestModelID = None self._swarmBestOverall = dict() self._swarmNum...
'Insert a new entry or update an existing one. If this is an update of an existing entry, then modelParams will be None Parameters: modelID: globally unique modelID of this model modelParams: params dict for this model, or None if this is just an update of a model that it already previously reported on. See th...
def update(self, modelID, modelParams, modelParamsHash, metricResult, completed, completionReason, matured, numRecords):
assert (modelParamsHash is not None) if completed: matured = True if ((metricResult is not None) and matured and (completionReason in [ClientJobsDAO.CMPL_REASON_EOF, ClientJobsDAO.CMPL_REASON_STOPPED])): if self._hsObj._maximize: errScore = ((-1) * metricResult) else: ...
'Return number of models that completed with errors. Parameters: retval: # if models'
def getNumErrModels(self):
return self._numErrModels
'Return list of models IDs that completed with errors. Parameters: retval: # if models'
def getErrModelIds(self):
return list(self._errModels)
'Return total number of models that completed. Parameters: retval: # if models that completed'
def getNumCompletedModels(self):
return self._numCompletedModels
'Return the modelID of the model with the given paramsHash, or None if not found. Parameters: paramsHash: paramsHash to look for retval: modelId, or None if not found'
def getModelIDFromParamsHash(self, paramsHash):
entryIdx = self._paramsHashToIndexes.get(paramsHash, None) if (entryIdx is not None): return self._allResults[entryIdx]['modelID'] else: return None
'Return the total # of models we have in our database (if swarmId is None) or in a specific swarm. Parameters: swarmId: A string representation of the sorted list of encoders in this swarm. For example \'__address_encoder.__gym_encoder\' includeHidden: If False, this will only return the number of models that a...
def numModels(self, swarmId=None, includeHidden=False):
if includeHidden: if (swarmId is None): return len(self._allResults) else: return len(self._swarmIdToIndexes.get(swarmId, [])) else: if (swarmId is None): entries = self._allResults else: entries = [self._allResults[entryIdx] for en...
'Return the model ID of the model with the best result so far and it\'s score on the optimize metric. If swarm is None, then it returns the global best, otherwise it returns the best for the given swarm for all generatons up to and including genIdx. Parameters: swarmId: A string representation of the sorted list of en...
def bestModelIdAndErrScore(self, swarmId=None, genIdx=None):
if (swarmId is None): return (self._bestModelID, self._bestResult) else: if (swarmId not in self._swarmBestOverall): return (None, numpy.inf) genScores = self._swarmBestOverall[swarmId] bestModelId = None bestScore = numpy.inf for (i, (modelId, errScor...
'Return particle info for a specific modelId. Parameters: modelId: which model Id retval: (particleState, modelId, errScore, completed, matured)'
def getParticleInfo(self, modelId):
entry = self._allResults[self._modelIDToIdx[modelId]] return (entry['modelParams']['particleState'], modelId, entry['errScore'], entry['completed'], entry['matured'])
'Return a list of particleStates for all particles we know about in the given swarm, their model Ids, and metric results. Parameters: swarmId: A string representation of the sorted list of encoders in this swarm. For example \'__address_encoder.__gym_encoder\' genIdx: If not None, only return particles at this specif...
def getParticleInfos(self, swarmId=None, genIdx=None, completed=None, matured=None, lastDescendent=False):
if (swarmId is not None): entryIdxs = self._swarmIdToIndexes.get(swarmId, []) else: entryIdxs = range(len(self._allResults)) if (len(entryIdxs) == 0): return ([], [], [], [], []) particleStates = [] modelIds = [] errScores = [] completedFlags = [] maturedFlags = [...
'Return a list of particleStates for all particles in the given swarm generation that have been orphaned. Parameters: swarmId: A string representation of the sorted list of encoders in this swarm. For example \'__address_encoder.__gym_encoder\' genIdx: If not None, only return particles at this specific generation in...
def getOrphanParticleInfos(self, swarmId, genIdx):
entryIdxs = range(len(self._allResults)) if (len(entryIdxs) == 0): return ([], [], [], [], []) particleStates = [] modelIds = [] errScores = [] completedFlags = [] maturedFlags = [] for idx in entryIdxs: entry = self._allResults[idx] if (not entry['hidden']): ...
'Return a list of swarm generations that have completed and the best (minimal) errScore seen for each of them. Parameters: retval: list of tuples. Each tuple is of the form: (swarmId, genIdx, bestErrScore)'
def getMaturedSwarmGenerations(self):
result = [] modifiedSwarmGens = sorted(self._modifiedSwarmGens) for key in modifiedSwarmGens: (swarmId, genIdx) = key if (key in self._maturedSwarmGens): self._modifiedSwarmGens.remove(key) continue if ((genIdx >= 1) and (not ((swarmId, (genIdx - 1)) in self._...
'Return the generation index of the first generation in the given swarm that does not have numParticles particles in it, either still in the running state or completed. This does not include orphaned particles. Parameters: swarmId: A string representation of the sorted list of encoders in this swarm. For example \'__a...
def firstNonFullGeneration(self, swarmId, minNumParticles):
if (not (swarmId in self._swarmNumParticlesPerGeneration)): return None numPsPerGen = self._swarmNumParticlesPerGeneration[swarmId] numPsPerGen = numpy.array(numPsPerGen) firstNonFull = numpy.where((numPsPerGen < minNumParticles))[0] if (len(firstNonFull) == 0): return len(numPsPerGe...
'Return the generation index of the highest generation in the given swarm. Parameters: swarmId: A string representation of the sorted list of encoders in this swarm. For example \'__address_encoder.__gym_encoder\' retval: generation index'
def highestGeneration(self, swarmId):
numPsPerGen = self._swarmNumParticlesPerGeneration[swarmId] return (len(numPsPerGen) - 1)
'Return the best score and position for a given particle. The position is given as a dict, with varName:varPosition items in it. Parameters: particleId: which particle retval: (bestResult, bestPosition)'
def getParticleBest(self, particleId):
return self._particleBest.get(particleId, (None, None))
'Return a dict of the errors obtained on models that were run with each value from a PermuteChoice variable. For example, if a PermuteChoice variable has the following choices: [\'a\', \'b\', \'c\'] The dict will have 3 elements. The keys are the stringified choiceVars, and each value is tuple containing (choiceVar, er...
def getResultsPerChoice(self, swarmId, maxGenIdx, varName):
results = dict() (allParticles, _, resultErrs, _, _) = self.getParticleInfos(swarmId, genIdx=None, matured=True) for (particleState, resultErr) in itertools.izip(allParticles, resultErrs): if (maxGenIdx is not None): if (particleState['genIdx'] > maxGenIdx): continue ...
'Instantiate the HyperseachV2 instance. Parameters: searchParams: a dict of the job\'s search parameters. The format is: persistentJobGUID: REQUIRED. Persistent, globally-unique identifier for this job for use in constructing persistent model checkpoint keys. MUST be compatible with S3 key-naming rules, but MUST NOT...
def __init__(self, searchParams, workerID=None, cjDAO=None, jobID=None, logLevel=None):
self.logger = logging.getLogger('.'.join(['com.numenta', self.__class__.__module__, self.__class__.__name__])) if (logLevel is not None): self.logger.setLevel(logLevel) random.seed(42) self._searchParams = searchParams self._workerID = workerID self._cjDAO = cjDAO self._jobID = jobID...
'Generate stream definition based on'
def _getStreamDef(self, modelDescription):
aggregationPeriod = {'days': 0, 'hours': 0, 'microseconds': 0, 'milliseconds': 0, 'minutes': 0, 'months': 0, 'seconds': 0, 'weeks': 0, 'years': 0} aggFunctionsDict = {} if ('aggregation' in modelDescription['streamDef']): for key in aggregationPeriod.keys(): if (key in modelDescription['...
'Destructor; NOTE: this is not guaranteed to be called (bugs like circular references could prevent it from being called).'
def __del__(self):
self.close() return
'Deletes temporary system objects/files.'
def close(self):
if ((self._tempDir is not None) and os.path.isdir(self._tempDir)): self.logger.debug('Removing temporary directory %r', self._tempDir) shutil.rmtree(self._tempDir) self._tempDir = None return
'Read the permutations file and initialize the following member variables: _predictedField: field name of the field we are trying to predict _permutations: Dict containing the full permutations dictionary. _flattenedPermutations: Dict containing the flattened version of _permutations. The keys leading to the value in t...
def _readPermutationsFile(self, filename, modelDescription):
vars = {} permFile = execfile(filename, globals(), vars) self._reportKeys = vars.get('report', []) self._filterFunc = vars.get('permutationFilter', None) self._dummyModelParamsFunc = vars.get('dummyModelParams', None) self._predictedField = None self._predictedFieldEncoder = None self._f...
'Computes the number of models that are expected to complete as part of this instances\'s HyperSearch. NOTE: This is compute-intensive for HyperSearches with a huge number of combinations. NOTE/TODO: THIS ONLY WORKS FOR RONOMATIC: This method is exposed for the benefit of perutations_runner.py for use in progress repo...
def getExpectedNumModels(self):
return (-1)
'Generates a list of model names that are expected to complete as part of this instances\'s HyperSearch. NOTE: This is compute-intensive for HyperSearches with a huge number of combinations. NOTE/TODO: THIS ONLY WORKS FOR RONOMATIC: This method is exposed for the benefit of perutations_runner.py. Parameters: retval: ...
def getModelNames(self):
return None
'Returns a dictionary of permutation variables. Parameters: retval: A dictionary of permutation variables; keys are flat permutation variable names and each value is a sub-class of PermuteVariable.'
def getPermutationVariables(self):
return self._flattenedPermutations
'Generates a lookup dictionary of permutation variables whose values are too complex for labels, so that artificial labels have to be generated for them. Parameters: retval: A look-up dictionary of permutation variables whose values are too complex for labels, so artificial labels were generated instead (e.g., "C...
def getComplexVariableLabelLookupDict(self):
raise NotImplementedError
'Retrives the optimization key name and optimization function. Parameters: retval: (optimizationMetricKey, maximize) optimizationMetricKey: which report key to optimize for maximize: True if we should try and maximize the optimizeKey metric. False if we should minimize it.'
def getOptimizationMetricInfo(self):
return (self._optimizeKey, self._maximize)
'If there are any models that haven\'t been updated in a while, consider them dead, and mark them as hidden in our resultsDB. We also change the paramsHash and particleHash of orphaned models so that we can re-generate that particle and/or model again if we desire. Parameters: retval:'
def _checkForOrphanedModels(self):
self.logger.debug(('Checking for orphaned models older than %s' % self._modelOrphanIntervalSecs)) while True: orphanedModelId = self._cjDAO.modelAdoptNextOrphan(self._jobID, self._modelOrphanIntervalSecs) if (orphanedModelId is None): return self.logger.info...
'Periodically, check to see if we should remove a certain field combination from evaluation (because it is doing so poorly) or move on to the next sprint (add in more fields). This method is called from _getCandidateParticleAndSwarm(), which is called right before we try and create a new model to run. Parameters: remov...
def _hsStatePeriodicUpdate(self, exhaustedSwarmId=None):
if (self._hsState is None): self._hsState = HsState(self) self._hsState.readStateFromDB() completedSwarms = set() if (exhaustedSwarmId is not None): self.logger.info(("Removing swarm %s from the active set because we can't find any new unique par...
'Find or create a candidate particle to produce a new model. At any one time, there is an active set of swarms in the current sprint, where each swarm in the sprint represents a particular combination of fields. Ideally, we should try to balance the number of models we have evaluated for each swarm at any time. This me...
def _getCandidateParticleAndSwarm(self, exhaustedSwarmId=None):
jobCancel = self._cjDAO.jobGetFields(self._jobID, ['cancel'])[0] if jobCancel: self._jobCancelled = True (workerCmpReason, workerCmpMsg) = self._cjDAO.jobGetFields(self._jobID, ['workerCompletionReason', 'workerCompletionMsg']) if (workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS): ...
'Test if it\'s OK to exit this worker. This is only called when we run out of prospective new models to evaluate. This method sees if all models have matured yet. If not, it will sleep for a bit and return False. This will indicate to the hypersearch worker that we should keep running, and check again later. This gives...
def _okToExit(self):
print >>sys.stderr, 'reporter:status:In hypersearchV2: _okToExit' if (not self._jobCancelled): (_, modelIds, _, _, _) = self._resultsDB.getParticleInfos(matured=False) if (len(modelIds) > 0): self.logger.info('Ready to end hyperseach, but not all models ...
'Create one or more new models for evaluation. These should NOT be models that we already know are in progress (i.e. those that have been sent to us via recordModelProgress). We return a list of models to the caller (HypersearchWorker) and if one can be successfully inserted into the models table (i.e. it is not a dupl...
def createModels(self, numModels=1):
self._checkForOrphanedModels() modelResults = [] for _ in xrange(numModels): candidateParticle = None if ((self._maxModels is not None) and ((self._resultsDB.numModels() - self._resultsDB.getNumErrModels()) >= self._maxModels)): return (self._okToExit(), []) if (candidate...
'Record or update the results for a model. This is called by the HSW whenever it gets results info for another model, or updated results on a model that is still running. The first time this is called for a given modelID, the modelParams will contain the params dict for that model and the modelParamsHash will contain t...
def recordModelProgress(self, modelID, modelParams, modelParamsHash, results, completed, completionReason, matured, numRecords):
if (results is None): metricResult = None else: metricResult = results[1].values()[0] errScore = self._resultsDB.update(modelID=modelID, modelParams=modelParams, modelParamsHash=modelParamsHash, metricResult=metricResult, completed=completed, completionReason=completionReason, matured=mature...
'Run the given model. This runs the model described by \'modelParams\'. Periodically, it updates the results seen on the model to the model database using the databaseAO (database Access Object) methods. Parameters: modelID: ID of this model in models table jobID: ID for this hypersearch job i...
def runModel(self, modelID, jobID, modelParams, modelParamsHash, jobsDAO, modelCheckpointGUID):
if (not self._createCheckpoints): modelCheckpointGUID = None self._resultsDB.update(modelID=modelID, modelParams=modelParams, modelParamsHash=modelParamsHash, metricResult=None, completed=False, completionReason=None, matured=False, numRecords=0) structuredParams = modelParams['structuredParams'] ...
'Parameters: modelID: ID for this model in the models table jobID: ID for this hypersearch job in the jobs table predictedField: Name of the input field for which this model is being optimized experimentDir: Directory path containing the experiment\'s description.py script reportKeyPatt...
def __init__(self, modelID, jobID, predictedField, experimentDir, reportKeyPatterns, optimizeKeyPattern, jobsDAO, modelCheckpointGUID, logLevel=None, predictionCacheMaxRecords=None):
self._MIN_RECORDS_TO_BE_BEST = int(Configuration.get('nupic.hypersearch.bestModelMinRecords')) self._MATURITY_MAX_CHANGE = float(Configuration.get('nupic.hypersearch.maturityPctChange')) self._MATURITY_NUM_POINTS = int(Configuration.get('nupic.hypersearch.maturityNumPoints')) self._modelID = modelID ...
'Runs the OPF Model Parameters: retval: (completionReason, completionMsg) where completionReason is one of the ClientJobsDAO.CMPL_REASON_XXX equates.'
def run(self):
descriptionPyModule = helpers.loadExperimentDescriptionScriptFromDir(self._experimentDir) expIface = helpers.getExperimentDescriptionInterfaceFromModule(descriptionPyModule) expIface.normalizeStreamSources() modelDescription = expIface.getModelDescription() self._modelControl = expIface.getModelCont...
'Main loop of the OPF Model Runner. Parameters: recordIterator: Iterator for counting number of records (see _runTask) learningOffAt: If not None, learning is turned off when we reach this iteration number'
def __runTaskMainLoop(self, numIters, learningOffAt=None):
self._model.resetSequenceStates() self._currentRecordIndex = (-1) while True: if self._isKilled: break if self._isCanceled: break if self._isInterrupted.isSet(): self.__setAsOrphaned() break if self._isMature: if (no...
'Run final activities after a model has run. These include recording and logging the final score'
def _finalize(self):
self._logger.info('Finished: modelID=%r; %r records processed. Performing final activities', self._modelID, (self._currentRecordIndex + 1)) self._updateModelDBResults() if (not self._isKilled): self.__updateJobResults() else: self.__deleteOutputCache(self._modelID) ...
'Create a checkpoint from the current model, and store it in a dir named after checkpoint GUID, and finally store the GUID in the Models DB'
def __createModelCheckpoint(self):
if ((self._model is None) or (self._modelCheckpointGUID is None)): return if (self._predictionLogger is None): self._createPredictionLogger() predictions = StringIO.StringIO() self._predictionLogger.checkpoint(checkpointSink=predictions, maxRows=int(Configuration.get('nupic.model.checkpo...
'Delete the stored checkpoint for the specified modelID. This function is called if the current model is now the best model, making the old model\'s checkpoint obsolete Parameters: modelID: The modelID for the checkpoint to delete. This is NOT the unique checkpointID'
def __deleteModelCheckpoint(self, modelID):
checkpointID = self._jobsDAO.modelsGetFields(modelID, ['modelCheckpointId'])[0] if (checkpointID is None): return try: shutil.rmtree(os.path.join(self._experimentDir, str(self._modelCheckpointGUID))) except: self._logger.warn('Failed to delete model checkpoint %s. ...
'Creates the model\'s PredictionLogger object, which is an interface to write model results to a permanent storage location'
def _createPredictionLogger(self):
self._predictionLogger = BasicPredictionLogger(fields=self._model.getFieldInfo(), experimentDir=self._experimentDir, label='hypersearch-worker', inferenceType=self._model.getInferenceType()) if self.__loggedMetricPatterns: metricLabels = self.__metricMgr.getMetricLabels() loggedMetrics = matchPa...
'Get the label for the metric being optimized. This function also caches the label in the instance variable self._optimizedMetricLabel Parameters: metricLabels: A sequence of all the labels being computed for this model Returns: The label for the metric being optmized over'
def __getOptimizedMetricLabel(self):
matchingKeys = matchPatterns([self._optimizeKeyPattern], self._getMetricLabels()) if (len(matchingKeys) == 0): raise Exception(('None of the generated metrics match the specified optimization pattern: %s. Available metrics are %s' % (self._optimizeKeyPattern, se...
'Returns: A list of labels that correspond to metrics being computed'
def _getMetricLabels(self):
return self.__metricMgr.getMetricLabels()
'Method which returns a dictionary of field statistics received from the input source. Returns: fieldStats: dict of dicts where the first level is the field name and the second level is the statistic. ie. fieldStats[\'pounds\'][\'min\']'
def _getFieldStats(self):
fieldStats = dict() fieldNames = self._inputSource.getFieldNames() for field in fieldNames: curStats = dict() curStats['min'] = self._inputSource.getFieldMin(field) curStats['max'] = self._inputSource.getFieldMax(field) fieldStats[field] = curStats return fieldStats
'Protected function that can be overriden by subclasses. Its main purpose is to allow the the OPFDummyModelRunner to override this with deterministic values Returns: All the metrics being computed for this model'
def _getMetrics(self):
return self.__metricMgr.getMetrics()
'Retrieves the current results and updates the model\'s record in the Model database.'
def _updateModelDBResults(self):
metrics = self._getMetrics() reportDict = dict([(k, metrics[k]) for k in self._reportMetricLabels]) metrics = self._getMetrics() optimizeDict = dict() if (self._optimizeKeyPattern is not None): optimizeDict[self._optimizedMetricLabel] = metrics[self._optimizedMetricLabel] results = json....
'Periodic check to see if this is the best model. This should only have an effect if this is the *first* model to report its progress'
def __updateJobResultsPeriodic(self):
if (self._isBestModelStored and (not self._isBestModel)): return while True: jobResultsStr = self._jobsDAO.jobGetFields(self._jobID, ['results'])[0] if (jobResultsStr is None): jobResults = {} else: self._isBestModelStored = True if (not self._...
'Reads the current "best model" for the job and returns whether or not the current model is better than the "best model" stored for the job Returns: (isBetter, storedBest, origResultsStr) isBetter: True if the current model is better than the stored "best model" storedResults: A dict of the currently stored results in ...
def __checkIfBestCompletedModel(self):
jobResultsStr = self._jobsDAO.jobGetFields(self._jobID, ['results'])[0] if (jobResultsStr is None): jobResults = {} else: jobResults = json.loads(jobResultsStr) isSaved = jobResults.get('saved', False) bestMetric = jobResults.get('bestValue', None) currentMetric = self._getMetric...
'Check if this is the best model If so: 1) Write it\'s checkpoint 2) Record this model as the best 3) Delete the previous best\'s output cache Otherwise: 1) Delete our output cache'
def __updateJobResults(self):
isSaved = False while True: (self._isBestModel, jobResults, jobResultsStr) = self.__checkIfBestCompletedModel() if self._isBestModel: if (not isSaved): self.__flushPredictionCache() self._jobsDAO.modelUpdateTimestamp(self._modelID) self...
'Writes the results of one iteration of a model. The results are written to this ModelRunner\'s in-memory cache unless this model is the "best model" for the job. If this model is the "best model", the predictions are written out to a permanent store via a prediction output stream instance Parameters: result: A op...
def _writePrediction(self, result):
self.__predictionCache.append(result) if self._isBestModel: self.__flushPredictionCache()
'This callback is called by self.__predictionLogger.writeRecords() between each batch of records it writes. It gives us a chance to say that the model is \'still alive\' during long write operations.'
def __writeRecordsCallback(self):
self._jobsDAO.modelUpdateResults(self._modelID)
'Writes the contents of this model\'s in-memory prediction cache to a permanent store via the prediction output stream instance'
def __flushPredictionCache(self):
if (not self.__predictionCache): return if (self._predictionLogger is None): self._createPredictionLogger() startTime = time.time() self._predictionLogger.writeRecords(self.__predictionCache, progressCB=self.__writeRecordsCallback) self._logger.info('Flushed prediction cache; ...
'Delete\'s the output cache associated with the given modelID. This actually clears up the resources associated with the cache, rather than deleting al the records in the cache Parameters: modelID: The id of the model whose output cache is being deleted'
def __deleteOutputCache(self, modelID):
if ((modelID == self._modelID) and (self._predictionLogger is not None)): self._predictionLogger.close() del self.__predictionCache self._predictionLogger = None self.__predictionCache = None
'Creates and returns a PeriodicActivityMgr instance initialized with our periodic activities Parameters: retval: a PeriodicActivityMgr instance'
def _initPeriodicActivities(self):
updateModelDBResults = PeriodicActivityRequest(repeating=True, period=100, cb=self._updateModelDBResults) updateJobResults = PeriodicActivityRequest(repeating=True, period=100, cb=self.__updateJobResultsPeriodic) checkCancelation = PeriodicActivityRequest(repeating=True, period=50, cb=self.__checkCancelatio...
'Check if the cancelation flag has been set for this model in the Model DB'
def __checkCancelation(self):
print >>sys.stderr, 'reporter:counter:HypersearchWorker,numRecords,50' jobCancel = self._jobsDAO.jobGetFields(self._jobID, ['cancel'])[0] if jobCancel: self._cmpReason = ClientJobsDAO.CMPL_REASON_KILLED self._isCanceled = True self._logger.info('Model %s canceled because ...
'Save the current metric value and see if the model\'s performance has \'leveled off.\' We do this by looking at some number of previous number of recordings'
def __checkMaturity(self):
if ((self._currentRecordIndex + 1) < self._MIN_RECORDS_TO_BE_BEST): return if self._isMature: return metric = self._getMetrics()[self._optimizedMetricLabel] self._metricRegression.addPoint(x=self._currentRecordIndex, y=metric) (pctChange, absPctChange) = self._metricRegression.getPct...
'Handles a "warning signal" from the scheduler. This is received when the scheduler is about to kill the the current process so that the worker can be allocated to another job. Right now, this function just sets the current model to the "Orphaned" state in the models table so that another worker can eventually re-run t...
def handleWarningSignal(self, signum, frame):
self._isInterrupted.set()
'Sets the current model as orphaned. This is called when the scheduler is about to kill the process to reallocate the worker to a different process.'
def __setAsOrphaned(self):
cmplReason = ClientJobsDAO.CMPL_REASON_ORPHAN cmplMessage = 'Killed by Scheduler' self._jobsDAO.modelSetCompleted(self._modelID, cmplReason, cmplMessage)
'/models returns: [model1, model2, model3, ...] list of model names'
def GET(self):
global g_models return json.dumps({'models': g_models.keys()})
'/models/{name} schema: "modelParams": dict containing model parameters "predictedFieldName": str returns: {"success":name}'
def POST(self, name):
global g_models data = json.loads(web.data()) modelParams = data['modelParams'] predictedFieldName = data['predictedFieldName'] if (name in g_models.keys()): raise web.badrequest(('Model with name <%s> already exists' % name)) model = ModelFactory.create(modelParams) m...
'/models/{name}/run schema: predictedFieldName: value timestamp: %m/%d/%y %H:%M NOTE: predictedFieldName MUST be the same name specified when creating the model. returns: "predictionNumber":<number of record>, "anomalyScore":anomalyScore'
def POST(self, name):
global g_models data = json.loads(web.data()) data['timestamp'] = datetime.datetime.strptime(data['timestamp'], '%m/%d/%y %H:%M') if (name not in g_models.keys()): raise web.notfound(('Model with name <%s> does not exist.' % name)) modelResult = g_models[name].run(data) ...
'Exports a network as a networkx MultiDiGraph intermediate representation suitable for visualization. :return: networkx MultiDiGraph'
def export(self):
graph = nx.MultiDiGraph() regions = self.network.getRegions() for idx in xrange(regions.getCount()): regionPair = regions.getByIndex(idx) regionName = regionPair[0] graph.add_node(regionName, label=regionName) for (linkName, link) in self.network.getLinks(): graph.add_edg...
'Render network. Default is :class:`~nupic.frameworks.viz.dot_renderer.DotRenderer`. :param renderer: Constructor parameter to a "renderer" implementation. Return value for which must have a "render" method that accepts a single argument (a networkx graph instance).'
def render(self, renderer=DEFAULT_RENDERER):
renderer().render(self.export())
'filePath: path of file where SP __init__ args are to be saved'
def __init__(self, filePath):
self.__filePath = filePath return
'filePath: path of file where TM __init__ args are to be saved'
def __init__(self, filePath):
self.__filePath = filePath return
'The PVM does not learn, so this function has no effect.'
def finishLearning(self):
pass
'Since the PVM has no use for this information, this is a no-op'
def setFieldStatistics(self, fieldStats):
pass
'Serialize via capnp :param proto: capnp PreviousValueModelProto message builder'
def write(self, proto):
super(PreviousValueModel, self).writeBaseToProto(proto.modelBase) proto.fieldNames = self._fieldNames proto.fieldTypes = self._fieldTypes proto.predictedField = self._predictedField proto.predictionSteps = self._predictionSteps
'Deserialize via capnp :param proto: capnp PreviousValueModelProto message reader :returns: new instance of PreviousValueModel deserialized from the given proto'
@classmethod def read(cls, proto):
instance = object.__new__(cls) super(PreviousValueModel, instance).__init__(proto=proto.modelBase) instance._logger = opf_utils.initLogger(instance) instance._predictedField = proto.predictedField instance._fieldNames = list(proto.fieldNames) instance._fieldTypes = list(proto.fieldTypes) ins...
'Get the logger for this object. :returns: (Logger) A Logger object.'
@classmethod def __getLogger(cls):
if (cls.__logger is None): cls.__logger = opf_utils.initLogger(cls) return cls.__logger
'Create a new model instance, given a description dictionary. :param modelConfig: (dict) A dictionary describing the current model, `described here <../../quick-start/example-model-params.html>`_. :param logLevel: (int) The level of logging output that should be generated :raises Exception: Unsupported model type :retu...
@staticmethod def create(modelConfig, logLevel=logging.ERROR):
logger = ModelFactory.__getLogger() logger.setLevel(logLevel) logger.debug('ModelFactory returning Model from dict: %s', modelConfig) modelClass = None if (modelConfig['model'] == 'HTMPrediction'): modelClass = HTMPredictionModel elif (modelConfig['model'] == 'TwoGram'): ...
'Load saved model. :param savedModelDir: (string) Directory of where the experiment is to be or was saved :returns: (:class:`nupic.frameworks.opf.model.Model`) The loaded model instance.'
@staticmethod def loadFromCheckpoint(savedModelDir, newSerialization=False):
if newSerialization: return HTMPredictionModel.readFromCheckpoint(savedModelDir) else: return Model.load(savedModelDir)
'Compute the new metrics values, given the next inference/ground-truth values :param results: (:class:`~nupic.frameworks.opf.opf_utils.ModelResult`) object that was computed during the last iteration of the model. :returns: (dict) where each key is the metric-name, and the values are it scalar value.'
def update(self, results):
self._addResults(results) if ((not self.__metricSpecs) or (self.__currentInference is None)): return {} metricResults = {} for (metric, spec, label) in zip(self.__metrics, self.__metricSpecs, self.__metricLabels): inferenceElement = spec.inferenceElement field = spec.field ...
'Gets the current metric values :returns: (dict) where each key is the metric-name, and the values are it scalar value. Same as the output of :meth:`~nupic.frameworks.opf.prediction_metrics_manager.MetricsManager.update`'
def getMetrics(self):
result = {} for (metricObj, label) in zip(self.__metrics, self.__metricLabels): value = metricObj.getMetric() result[label] = value['value'] return result
'Gets detailed info about a given metric, in addition to its value. This may including any statistics or auxilary data that are computed for a given metric. :param metricLabel: (string) label of the given metric (see :class:`~nupic.frameworks.opf.metrics.MetricSpec`) :returns: (dict) of metric information, as returned ...
def getMetricDetails(self, metricLabel):
try: metricIndex = self.__metricLabels.index(metricLabel) except IndexError: return None return self.__metrics[metricIndex].getMetric()
':returns: (list) of labels for the metrics that are being calculated'
def getMetricLabels(self):
return tuple(self.__metricLabels)
'Stores the current model results in the manager\'s internal store Parameters: results: A ModelResults object that contains the current timestep\'s input/inferences'
def _addResults(self, results):
if self.__isTemporal: shiftedInferences = self.__inferenceShifter.shift(results).inferences self.__currentResult = copy.deepcopy(results) self.__currentResult.inferences = shiftedInferences self.__currentInference = shiftedInferences else: self.__currentResult = copy.deep...
'Get the actual value for this field Parameters: sensorInputElement: The inference element (part of the inference) that is being used for this metric'
def _getGroundTruth(self, inferenceElement):
sensorInputElement = InferenceElement.getInputElement(inferenceElement) if (sensorInputElement is None): return None return getattr(self.__currentGroundTruth.sensorInput, sensorInputElement)
'Get what the inferred value for this field was Parameters: inferenceElement: The inference element (part of the inference) that is being used for this metric'
def _getInference(self, inferenceElement):
if (self.__currentInference is not None): return self.__currentInference.get(inferenceElement, None) return None
'Get what the inferred value for this field was Parameters: inferenceElement: The inference element (part of the inference) that is being used for this metric'
def _getRawGroundTruth(self):
return self.__currentGroundTruth.rawInput
'Creates the required metrics modules Parameters: metricSpecs: A sequence of MetricSpec objects that specify which metric modules to instantiate'
def __constructMetricsModules(self, metricSpecs):
if (not metricSpecs): return self.__metricSpecs = metricSpecs for spec in metricSpecs: if (not InferenceElement.validate(spec.inferenceElement)): raise ValueError(('Invalid inference element for metric spec: %r' % spec)) self.__metrics.append(metrics.get...