desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Run one iteration of TMRegion\'s compute'
def _compute(self, inputs, outputs):
if (self._tfdr is None): raise RuntimeError('TM has not been initialized') self._conditionalBreak() self._iterations += 1 buInputVector = inputs['bottomUpIn'] resetSignal = False if ('resetIn' in inputs): assert (len(inputs['resetIn']) == 1) if (inputs['resetI...
'Doesn\'t include the spatial, temporal and other parameters :returns: (dict) the base Spec for TMRegion.'
@classmethod def getBaseSpec(cls):
spec = dict(description=TMRegion.__doc__, singleNodeOnly=True, inputs=dict(bottomUpIn=dict(description='The input signal, conceptually organized as an\n image pyramid data structure, but ...
'Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getSpec`. The parameters collection is constructed based on the parameters specified by the various components (spatialSpec, temporalSpec and otherSpec)'
@classmethod def getSpec(cls):
spec = cls.getBaseSpec() (t, o) = _getAdditionalSpecs(temporalImp=gDefaultTemporalImp) spec['parameters'].update(t) spec['parameters'].update(o) return spec
':returns: instance of the underlying :class:`~nupic.algorithms.temporal_memory.TemporalMemory` algorithm object.'
def getAlgorithmInstance(self):
return self._tfdr
'Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getParameter`. Get the value of a parameter. Most parameters are handled automatically by :class:`~nupic.bindings.regions.PyRegion.PyRegion`\'s parameter get mechanism. The ones that need special treatment are explicitly handled here.'
def getParameter(self, parameterName, index=(-1)):
if (parameterName in self._temporalArgNames): return getattr(self._tfdr, parameterName) else: return PyRegion.getParameter(self, parameterName, index)
'Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.setParameter`.'
def setParameter(self, parameterName, index, parameterValue):
if (parameterName in self._temporalArgNames): setattr(self._tfdr, parameterName, parameterValue) elif (parameterName == 'logPathOutput'): self.logPathOutput = parameterValue if (self._fpLogTPOutput is not None): self._fpLogTPOutput.close() self._fpLogTPOutput = No...
'Resets the region\'s sequence states.'
def resetSequenceStates(self):
self._tfdr.reset() self._sequencePos = 0 return
'Perform an internal optimization step that speeds up inference if we know learning will not be performed anymore. This call may, for example, remove all potential inputs to each column.'
def finishLearning(self):
if (self._tfdr is None): raise RuntimeError('Temporal memory has not been initialized') if hasattr(self._tfdr, 'finishLearning'): self.resetSequenceStates() self._tfdr.finishLearning()
'Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getSchema`.'
@staticmethod def getSchema():
return TMRegionProto
'Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.writeToProto`. Write state to proto object. :param proto: TMRegionProto capnproto object'
def writeToProto(self, proto):
proto.temporalImp = self.temporalImp proto.columnCount = self.columnCount proto.inputWidth = self.inputWidth proto.cellsPerColumn = self.cellsPerColumn proto.learningMode = self.learningMode proto.inferenceMode = self.inferenceMode proto.anomalyMode = self.anomalyMode proto.topDownMode =...
'Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.readFromProto`. Read state from proto object. :param proto: TMRegionProto capnproto object'
@classmethod def readFromProto(cls, proto):
instance = cls(proto.columnCount, proto.inputWidth, proto.cellsPerColumn) instance.temporalImp = proto.temporalImp instance.learningMode = proto.learningMode instance.inferenceMode = proto.inferenceMode instance.anomalyMode = proto.anomalyMode instance.topDownMode = proto.topDownMode instanc...
'Return serializable state. This function will return a version of the __dict__ with all "ephemeral" members stripped out. "Ephemeral" members are defined as those that do not need to be (nor should be) stored in any kind of persistent file (e.g., NuPIC network XML file.)'
def __getstate__(self):
state = self.__dict__.copy() for ephemeralMemberName in self._getEphemeralMembersAll(): state.pop(ephemeralMemberName, None) return state
'Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.serializeExtraData`.'
def serializeExtraData(self, filePath):
if (self._tfdr is not None): self._tfdr.saveToFile(filePath)
'Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.deSerializeExtraData`. This method is called during network deserialization with an external filename that can be used to bypass pickle for loading large binary states. :param filePath: (string) absolute file path'
def deSerializeExtraData(self, filePath):
if (self._tfdr is not None): self._tfdr.loadFromFile(filePath)
'Set the state of ourself from a serialized state.'
def __setstate__(self, state):
if (not hasattr(self, 'storeDenseOutput')): self.storeDenseOutput = False if (not hasattr(self, 'computePredictedActiveCellIndices')): self.computePredictedActiveCellIndices = False self.__dict__.update(state) self._loaded = True self._initialize()
'Initialize all ephemerals used by derived classes.'
def _initEphemerals(self):
self._sequencePos = 0 self._fpLogTPOutput = None self.logPathOutput = None
'Callback that returns a list of all "ephemeral" members (i.e., data members that should not and/or cannot be pickled.)'
def _getEphemeralMembers(self):
return ['_sequencePos', '_fpLogTPOutput', 'logPathOutput']
'Returns list of all ephemeral members.'
def _getEphemeralMembersBase(self):
return ['_loaded', '_profileObj', '_iterations']
'Returns a concatenated list of both the standard base class ephemeral members, as well as any additional ephemeral members (e.g., file handles, etc.).'
def _getEphemeralMembersAll(self):
return (self._getEphemeralMembersBase() + self._getEphemeralMembers())
'Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getOutputElementCount`.'
def getOutputElementCount(self, name):
if (name == 'bottomUpOut'): return self.outputWidth elif (name == 'topDownOut'): return self.columnCount elif (name == 'lrnActiveStateT'): return self.outputWidth elif (name == 'activeCells'): return self.outputWidth elif (name == 'predictedActiveCells'): retu...
'Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getParameterArrayCount`.'
def getParameterArrayCount(self, name, index):
p = self.getParameter(name) if (not hasattr(p, '__len__')): raise Exception(("Attempt to access parameter '%s' as an array but it is not an array" % name)) return len(p)
'Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getParameterArray`.'
def getParameterArray(self, name, index, a):
p = self.getParameter(name) if (not hasattr(p, '__len__')): raise Exception(("Attempt to access parameter '%s' as an array but it is not an array" % name)) if (len(p) > 0): a[:] = p[:]
'Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getSpec`.'
@classmethod def getSpec(cls):
ns = dict(description=SDRClassifierRegion.__doc__, singleNodeOnly=True, inputs=dict(actValueIn=dict(description='Actual value of the field to predict. Only taken into account if the input has no category field.', dataType='Real32', count=0, required=False, regionLe...
'Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.initialize`. Is called once by NuPIC before the first call to compute(). Initializes self._sdrClassifier if it is not already initialized.'
def initialize(self):
if (self._sdrClassifier is None): self._sdrClassifier = SDRClassifierFactory.create(steps=self.stepsList, alpha=self.alpha, verbosity=self.verbosity, implementation=self.implementation)
':returns: (:class:`nupic.regions.sdr_classifier_region.SDRClassifierRegion`)'
def getAlgorithmInstance(self):
return self._sdrClassifier
'Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getParameter`.'
def getParameter(self, name, index=(-1)):
return PyRegion.getParameter(self, name, index)
'Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.setParameter`.'
def setParameter(self, name, index, value):
if (name == 'learningMode'): self.learningMode = bool(int(value)) elif (name == 'inferenceMode'): self.inferenceMode = bool(int(value)) else: return PyRegion.setParameter(self, name, index, value)
':returns: the pycapnp proto type that the class uses for serialization.'
@staticmethod def getSchema():
return SDRClassifierRegionProto
'Write state to proto object. :param proto: SDRClassifierRegionProto capnproto object'
def writeToProto(self, proto):
proto.implementation = self.implementation proto.steps = self.steps proto.alpha = self.alpha proto.verbosity = self.verbosity proto.maxCategoryCount = self.maxCategoryCount self._sdrClassifier.write(proto.sdrClassifier)
'Read state from proto object. :param proto: SDRClassifierRegionProto capnproto object'
@classmethod def readFromProto(cls, proto):
instance = cls() instance.implementation = proto.implementation instance.steps = proto.steps instance.alpha = proto.alpha instance.verbosity = proto.verbosity instance.maxCategoryCount = proto.maxCategoryCount instance._sdrClassifier = SDRClassifierFactory.read(proto) return instance
'Process one input sample. This method is called by the runtime engine. :param inputs: (dict) mapping region input names to numpy.array values :param outputs: (dict) mapping region output names to numpy.arrays that should be populated with output values by this method'
def compute(self, inputs, outputs):
self._computeFlag = True patternNZ = inputs['bottomUpIn'].nonzero()[0] if self.learningMode: categories = [category for category in inputs['categoryIn'] if (category >= 0)] if (len(categories) > 0): bucketIdxList = [] actValueList = [] for category in cate...
'Just return the inference value from one input sample. The actual learning happens in compute() -- if, and only if learning is enabled -- which is called when you run the network. .. warning:: This method is deprecated and exists only to maintain backward compatibility. This method is deprecated, and will be removed. ...
def customCompute(self, recordNum, patternNZ, classification):
if (not hasattr(self, '_computeFlag')): self._computeFlag = False if self._computeFlag: warnings.simplefilter('error', DeprecationWarning) warnings.warn('The customCompute() method should not be called at the same time as the compute() method. ...
'Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getOutputElementCount`.'
def getOutputElementCount(self, outputName):
if (outputName == 'categoriesOut'): return len(self.stepsList) elif (outputName == 'probabilities'): return (len(self.stepsList) * self.maxCategoryCount) elif (outputName == 'actualValues'): return self.maxCategoryCount else: raise ValueError('Unknown output {}.'.fo...
'Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getSpec`.'
@classmethod def getSpec(cls):
ns = dict(description=KNNAnomalyClassifierRegion.__doc__, singleNodeOnly=True, inputs=dict(spBottomUpOut=dict(description='The output signal generated from the bottom-up inputs\n from ...
'Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getParameter`.'
def getParameter(self, name, index=(-1)):
if (name == 'trainRecords'): return self.trainRecords elif (name == 'anomalyThreshold'): return self.anomalyThreshold elif (name == 'activeColumnCount'): return self._activeColumnCount elif (name == 'classificationMaxDist'): return self._classificationMaxDist else: ...
'Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.setParameter`.'
def setParameter(self, name, index, value):
if (name == 'trainRecords'): if (not (isinstance(value, float) or isinstance(value, int))): raise HTMPredictionModelInvalidArgument(("Invalid argument type '%s'. threshold must be a number." % type(value))) if ((len(self._recordsCache) > 0) and (value < self._reco...
'Process one input sample. This method is called by the runtime engine.'
def compute(self, inputs, outputs):
record = self._constructClassificationRecord(inputs) if (record.ROWID >= self.getParameter('trainRecords')): self._classifyState(record) self._recordsCache.append(record) while (len(self._recordsCache) > self.cacheSize): self._recordsCache.pop(0) self.labelResults = record.anomalyLab...
'Get the labels of the previously computed record. :returns: (list) of strings representing the classification labels'
def getLabelResults(self):
return self.labelResults
'Reclassifies all internal state'
def _classifyStates(self):
for state in self._recordsCache: self._classifyState(state)
'Reclassifies given state.'
def _classifyState(self, state):
if (state.ROWID < self.getParameter('trainRecords')): if (not state.setByUser): state.anomalyLabel = [] self._deleteRecordsFromKNN([state]) return label = KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL autoLabel = (label + KNNAnomalyClassifierRegion.AUTO_T...
'Construct a _HTMClassificationRecord based on the state of the model passed in through the inputs. Types for self.classificationVectorType: 1 - TM active cells in learn state 2 - SP columns concatenated with error from TM column predictions and SP'
def _constructClassificationRecord(self, inputs):
allSPColumns = inputs['spBottomUpOut'] activeSPColumns = allSPColumns.nonzero()[0] score = anomaly.computeRawAnomalyScore(activeSPColumns, self._prevPredictedColumns) spSize = len(allSPColumns) allTPCells = inputs['tpTopDownOut'] tpSize = len(inputs['tpLrnActiveStateT']) classificationVector...
'Adds the record to the KNN classifier.'
def _addRecordToKNN(self, record):
knn = self._knnclassifier._knn prototype_idx = self._knnclassifier.getParameter('categoryRecencyList') category = self._labelListToCategoryNumber(record.anomalyLabel) if (record.ROWID in prototype_idx): knn.prototypeSetCategory(record.ROWID, category) return pattern = self._getStateA...
'Removes the given records from the classifier. parameters recordsToDelete - list of records to delete from the classififier'
def _deleteRecordsFromKNN(self, recordsToDelete):
prototype_idx = self._knnclassifier.getParameter('categoryRecencyList') idsToDelete = [r.ROWID for r in recordsToDelete if ((not r.setByUser) and (r.ROWID in prototype_idx))] nProtos = self._knnclassifier._knn._numPatterns self._knnclassifier._knn.removeIds(idsToDelete) assert (self._knnclassifier._...
'Removes any stored records within the range from start to end. Noninclusive of end. parameters start - integer representing the ROWID of the start of the deletion range, end - integer representing the ROWID of the end of the deletion range, if None, it will default to end.'
def _deleteRangeFromKNN(self, start=0, end=None):
prototype_idx = numpy.array(self._knnclassifier.getParameter('categoryRecencyList')) if (end is None): end = (prototype_idx.max() + 1) idsIdxToDelete = numpy.logical_and((prototype_idx >= start), (prototype_idx < end)) idsToDelete = prototype_idx[idsIdxToDelete] nProtos = self._knnclassifier...
'returns the classified labeling of record'
def _recomputeRecordFromKNN(self, record):
inputs = {'categoryIn': [None], 'bottomUpIn': self._getStateAnomalyVector(record)} outputs = {'categoriesOut': numpy.zeros((1,)), 'bestPrototypeIndices': numpy.zeros((1,)), 'categoryProbabilitiesOut': numpy.zeros((1,))} classifier_indexes = numpy.array(self._knnclassifier.getParameter('categoryRecencyList')...
'Since the KNN Classifier stores categories as numbers, we must store each label as a number. This method converts from a label to a unique number. Each label is assigned a unique bit so multiple labels may be assigned to a single record.'
def _labelToCategoryNumber(self, label):
if (label not in self.saved_categories): self.saved_categories.append(label) return pow(2, self.saved_categories.index(label))
'This method takes a list of labels and returns a unique category number. This enables this class to store a list of categories for each point since the KNN classifier only stores a single number category for each record.'
def _labelListToCategoryNumber(self, labelList):
categoryNumber = 0 for label in labelList: categoryNumber += self._labelToCategoryNumber(label) return categoryNumber
'Converts a category number into a list of labels'
def _categoryToLabelList(self, category):
if (category is None): return [] labelList = [] labelNum = 0 while (category > 0): if ((category % 2) == 1): labelList.append(self.saved_categories[labelNum]) labelNum += 1 category = (category >> 1) return labelList
'Returns a state\'s anomaly vertor converting it from spare to dense'
def _getStateAnomalyVector(self, state):
vector = numpy.zeros(self._anomalyVectorLength) vector[state.anomalyVector] = 1 return vector
'Get the labels on classified points within range start to end. Not inclusive of end. :returns: (dict) with format: \'isProcessing\': boolean, \'recordLabels\': list of results ``isProcessing`` - currently always false as recalculation blocks; used if reprocessing of records is still being performed; Each item in ``rec...
def getLabels(self, start=None, end=None):
if (len(self._recordsCache) == 0): return {'isProcessing': False, 'recordLabels': []} try: start = int(start) except Exception: start = 0 try: end = int(end) except Exception: end = self._recordsCache[(-1)].ROWID if (end <= start): raise HTMPredict...
'Add the label labelName to each record with record ROWID in range from ``start`` to ``end``, noninclusive of end. This will recalculate all points from end to the last record stored in the internal cache of this classifier. :param start: (int) start index :param end: (int) end index (noninclusive) :param labelName: (s...
def addLabel(self, start, end, labelName):
if (len(self._recordsCache) == 0): raise HTMPredictionModelInvalidRangeError("Invalid supplied range for 'addLabel'. Model has no saved records.") try: start = int(start) except Exception: start = 0 try: end = int(end) except Exception: ...
'Remove labels from each record with record ROWID in range from ``start`` to ``end``, noninclusive of end. Removes all records if ``labelFilter`` is None, otherwise only removes the labels equal to ``labelFilter``. This will recalculate all points from end to the last record stored in the internal cache of this classif...
def removeLabels(self, start=None, end=None, labelFilter=None):
if (len(self._recordsCache) == 0): raise HTMPredictionModelInvalidRangeError("Invalid supplied range for 'removeLabels'. Model has no saved records.") try: start = int(start) except Exception: start = 0 try: end = int(end) except Exception: ...
'Return serializable state. This function will return a version of the __dict__ with all "ephemeral" members stripped out. "Ephemeral" members are defined as those that do not need to be (nor should be) stored in any kind of persistent file (e.g., NuPIC network XML file.)'
def __getstate__(self):
state = self.__dict__.copy() state['_knnclassifierProps'] = state['_knnclassifier'].__getstate__() state.pop('_knnclassifier') return state
'Set the state of ourself from a serialized state.'
def __setstate__(self, state):
if (('_version' not in state) or (state['_version'] == 1)): knnclassifierProps = state.pop('_knnclassifierProps') self.__dict__.update(state) self._knnclassifier = KNNClassifierRegion(**self._knnclassifierArgs) self._knnclassifier.__setstate__(knnclassifierProps) self._versio...
'Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getOutputElementCount`.'
def getOutputElementCount(self, name):
if (name == 'labels'): return self._maxLabelOutputs else: raise Exception('Invalid output name specified')
':return: sensed value'
def getSensedValue(self):
return self._sensedValue
':param value: will be encoded when this region does a compute.'
def setSensedValue(self, value):
self._sensedValue = value
'Get the beginning part of the database name for the current version of the database. This, concatenated with \'_\' + Configuration.get(\'nupic.cluster.database.nameSuffix\') will produce the actual database name used.'
@classmethod def dbNamePrefix(cls):
return cls.__getDBNamePrefixForVersion(cls._DB_VERSION)
'Get the beginning part of the database name for the given database version. This, concatenated with \'_\' + Configuration.get(\'nupic.cluster.database.nameSuffix\') will produce the actual database name used. Parameters: dbVersion: ClientJobs database version number retval: the ClientJobs database name pr...
@classmethod def __getDBNamePrefixForVersion(cls, dbVersion):
return ('%s_v%d' % (cls._DB_ROOT_NAME, dbVersion))
'Generates the ClientJobs database name for the current version of the database; "semi-private" class method for use by friends of the class. Parameters: retval: the ClientJobs database name'
@classmethod def _getDBName(cls):
return cls.__getDBNameForVersion(cls._DB_VERSION)
'Generates the ClientJobs database name for the given version of the database Parameters: dbVersion: ClientJobs database version number retval: the ClientJobs database name for the given DB version'
@classmethod def __getDBNameForVersion(cls, dbVersion):
prefix = cls.__getDBNamePrefixForVersion(dbVersion) suffix = Configuration.get('nupic.cluster.database.nameSuffix') suffix = suffix.replace('-', '_') suffix = suffix.replace('.', '_') dbName = ('%s_%s' % (prefix, suffix)) return dbName
'Get the instance of the ClientJobsDAO created for this process (or perhaps at some point in the future, for this thread). Parameters: retval: instance of ClientJobsDAO'
@staticmethod @logExceptions(_LOGGER) def get():
if (ClientJobsDAO._instance is None): cjDAO = ClientJobsDAO() cjDAO.connect() ClientJobsDAO._instance = cjDAO return ClientJobsDAO._instance
'Instantiate a ClientJobsDAO instance. Parameters:'
@logExceptions(_LOGGER) def __init__(self):
self._logger = _LOGGER assert (ClientJobsDAO._instance is None) self.dbName = self._getDBName() self._jobs = self._JobsTableInfo() self._jobs.tableName = ('%s.jobs' % self.dbName) self._models = self._ModelsTableInfo() self._models.tableName = ('%s.models' % self.dbName) self._connection...
'Convert a database internal column name to a public name. This takes something of the form word1_word2_word3 and converts it to: word1Word2Word3. If the db field name starts with \'_\', it is stripped out so that the name is compatible with collections.namedtuple. for example: _word1_word2_word3 => word1Word2Word3 Par...
def _columnNameDBToPublic(self, dbName):
words = dbName.split('_') if dbName.startswith('_'): words = words[1:] pubWords = [words[0]] for word in words[1:]: pubWords.append((word[0].upper() + word[1:])) return ''.join(pubWords)
'Locate the current version of the jobs DB or create a new one, and optionally delete old versions laying around. If desired, this method can be called at any time to re-create the tables from scratch, delete old versions of the database, etc. Parameters: deleteOldVersions: if true, delete any old versions of the DB ...
@logExceptions(_LOGGER) @g_retrySQL def connect(self, deleteOldVersions=False, recreate=False):
with ConnectionFactory.get() as conn: self._initTables(cursor=conn.cursor, deleteOldVersions=deleteOldVersions, recreate=recreate) conn.cursor.execute('SELECT CONNECTION_ID()') self._connectionID = conn.cursor.fetchall()[0][0] self._logger.info('clientJobsConnectionID=%r', self._c...
'Initialize tables, if needed Parameters: cursor: SQL cursor deleteOldVersions: if true, delete any old versions of the DB left on the server recreate: if true, recreate the database from scratch even if it already exists.'
@logExceptions(_LOGGER) def _initTables(self, cursor, deleteOldVersions, recreate):
if deleteOldVersions: self._logger.info('Dropping old versions of client_jobs DB; called from: %r', traceback.format_stack()) for i in range(self._DB_VERSION): cursor.execute(('DROP DATABASE IF EXISTS %s' % (self.__getDBNameForVersion(i),))) if rec...
'Return a sequence of matching rows with the requested field values from a table or empty sequence if nothing matched. tableInfo: Table information: a ClientJobsDAO._TableInfoBase instance conn: Owned connection acquired from ConnectionFactory.get() fieldsToMatch: Dictionary of internal fieldName/va...
def _getMatchingRowsNoRetries(self, tableInfo, conn, fieldsToMatch, selectFieldNames, maxRows=None):
assert fieldsToMatch, repr(fieldsToMatch) assert all(((k in tableInfo.dbFieldNames) for k in fieldsToMatch.iterkeys())), repr(fieldsToMatch) assert selectFieldNames, repr(selectFieldNames) assert all(((f in tableInfo.dbFieldNames) for f in selectFieldNames)), repr(selectFieldNames) matchPairs = fiel...
'Like _getMatchingRowsNoRetries(), but with retries on transient MySQL failures'
@g_retrySQL def _getMatchingRowsWithRetries(self, tableInfo, fieldsToMatch, selectFieldNames, maxRows=None):
with ConnectionFactory.get() as conn: return self._getMatchingRowsNoRetries(tableInfo, conn, fieldsToMatch, selectFieldNames, maxRows)
'Return a single matching row with the requested field values from the the requested table or None if nothing matched. tableInfo: Table information: a ClientJobsDAO._TableInfoBase instance conn: Owned connection acquired from ConnectionFactory.get() fieldsToMatch: Dictionary of internal fieldName/va...
def _getOneMatchingRowNoRetries(self, tableInfo, conn, fieldsToMatch, selectFieldNames):
rows = self._getMatchingRowsNoRetries(tableInfo, conn, fieldsToMatch, selectFieldNames, maxRows=1) if rows: assert (len(rows) == 1), repr(len(rows)) result = rows[0] else: result = None return result
'Like _getOneMatchingRowNoRetries(), but with retries on transient MySQL failures'
@g_retrySQL def _getOneMatchingRowWithRetries(self, tableInfo, fieldsToMatch, selectFieldNames):
with ConnectionFactory.get() as conn: return self._getOneMatchingRowNoRetries(tableInfo, conn, fieldsToMatch, selectFieldNames)
'Attempt to insert a row with the given parameters into the jobs table. Return jobID of the inserted row, or of an existing row with matching client/jobHash key. The combination of client and jobHash are expected to be unique (enforced by a unique index on the two columns). NOTE: It\'s possibe that this or another proc...
def _insertOrGetUniqueJobNoRetries(self, conn, client, cmdLine, jobHash, clientInfo, clientKey, params, minimumWorkers, maximumWorkers, jobType, priority, alreadyRunning):
assert (len(client) <= self.CLIENT_MAX_LEN), ('client too long:' + repr(client)) assert cmdLine, ('Unexpected empty or None command-line: ' + repr(cmdLine)) assert (len(jobHash) == self.HASH_MAX_LEN), ('wrong hash len=%d' % len(jobHash)) if alreadyRunning: initStatus =...
'Resumes processing of an existing job that is presently in the STATUS_COMPLETED state. NOTE: this is primarily for resuming suspended Production and Stream Jobs; DO NOT use it on Hypersearch jobs. This prepares an existing job entry to resume processing. The CJM is always periodically sweeping the jobs table and when ...
def _resumeJobNoRetries(self, conn, jobID, alreadyRunning):
if alreadyRunning: initStatus = self.STATUS_TESTMODE else: initStatus = self.STATUS_NOTSTARTED assignments = ['status=%s', 'completion_reason=DEFAULT', 'completion_msg=DEFAULT', 'worker_completion_reason=DEFAULT', 'worker_completion_msg=DEFAULT', 'end_time=DEFAULT', 'cancel=DEFAULT', '_eng_l...
'Return our connection ID. This can be used for worker identification purposes. NOTE: the actual MySQL connection ID used in queries may change from time to time if connection is re-acquired (e.g., upon MySQL server restart) or when more than one entry from the connection pool has been used (e.g., multi-threaded apps)'...
def getConnectionID(self):
return self._connectionID
'Requests a job to be suspended NOTE: this is primarily for suspending Production Jobs; DO NOT use it on Hypersearch jobs. For canceling any job type, use jobCancel() instead! Parameters: jobID: jobID of the job to resume retval: nothing'
@logExceptions(_LOGGER) def jobSuspend(self, jobID):
self.jobCancel(jobID) return
'Resumes processing of an existing job that is presently in the STATUS_COMPLETED state. NOTE: this is primarily for resuming suspended Production Jobs; DO NOT use it on Hypersearch jobs. NOTE: The job MUST be in the STATUS_COMPLETED state at the time of this call, otherwise an exception will be raised. This prepares an...
@logExceptions(_LOGGER) def jobResume(self, jobID, alreadyRunning=False):
row = self.jobGetFields(jobID, ['status']) (jobStatus,) = row if (jobStatus != self.STATUS_COMPLETED): raise RuntimeError(('Failed to resume job: job was not suspended; jobID=%s; job status=%r' % (jobID, jobStatus))) @g_retrySQL def resumeWithRetries(): ...
'Add an entry to the jobs table for a new job request. This is called by clients that wish to startup a new job, like a Hypersearch, stream job, or specific model evaluation from the engine. This puts a new entry into the jobs table. The CJM is always periodically sweeping the jobs table and when it finds a new job, wi...
@logExceptions(_LOGGER) def jobInsert(self, client, cmdLine, clientInfo='', clientKey='', params='', alreadyRunning=False, minimumWorkers=0, maximumWorkers=0, jobType='', priority=DEFAULT_JOB_PRIORITY):
jobHash = self._normalizeHash(uuid.uuid1().bytes) @g_retrySQL def insertWithRetries(): with ConnectionFactory.get() as conn: return self._insertOrGetUniqueJobNoRetries(conn, client=client, cmdLine=cmdLine, jobHash=jobHash, clientInfo=clientInfo, clientKey=clientKey, params=params, minimu...
'Add an entry to the jobs table for a new job request, but only if the same job, by the same client is not already running. If the job is already running, or queued up to run, this call does nothing. If the job does not exist in the jobs table or has completed, it will be inserted and/or started up again. This method i...
@logExceptions(_LOGGER) def jobInsertUnique(self, client, cmdLine, jobHash, clientInfo='', clientKey='', params='', minimumWorkers=0, maximumWorkers=0, jobType='', priority=DEFAULT_JOB_PRIORITY):
assert cmdLine, ('Unexpected empty or None command-line: ' + repr(cmdLine)) @g_retrySQL def insertUniqueWithRetries(): jobHashValue = self._normalizeHash(jobHash) jobID = None with ConnectionFactory.get() as conn: row = self._getOneMatchingRowNoRetries(self...
'Place the given job in STATUS_RUNNING mode; the job is expected to be STATUS_NOTSTARTED. NOTE: this function was factored out of jobStartNext because it\'s also needed for testing (e.g., test_client_jobs_dao.py)'
@g_retrySQL def _startJobWithRetries(self, jobID):
with ConnectionFactory.get() as conn: query = ('UPDATE %s SET status=%%s, _eng_cjm_conn_id=%%s, start_time=UTC_TIMESTAMP(), _eng_last_update_time=...
'For use only by Nupic Scheduler (also known as ClientJobManager) Look through the jobs table and see if any new job requests have been queued up. If so, pick one and mark it as starting up and create the model table to hold the results Parameters: retval: jobID of the job we are starting up, if found; None if not f...
@logExceptions(_LOGGER) def jobStartNext(self):
row = self._getOneMatchingRowWithRetries(self._jobs, dict(status=self.STATUS_NOTSTARTED), ['job_id']) if (row is None): return None (jobID,) = row self._startJobWithRetries(jobID) return jobID
'Look through the jobs table and reactivate all that are already in the running state by setting their _eng_allocate_new_workers fields to True; used by Nupic Scheduler as part of its failure-recovery procedure.'
@logExceptions(_LOGGER) @g_retrySQL def jobReactivateRunningJobs(self):
with ConnectionFactory.get() as conn: query = ('UPDATE %s SET _eng_cjm_conn_id=%%s, _eng_allocate_new_workers=TRUE WHERE status=%%s ' % (self.jobsTableName,)) conn.cursor.execute(query, [self._connectionID, ...
'Look through the jobs table and get the demand - minimum and maximum number of workers requested, if new workers are to be allocated, if there are any untended dead workers, for all running jobs. Parameters: retval: list of ClientJobsDAO._jobs.jobDemandNamedTuple nametuples containing the demand - min and max wor...
@logExceptions(_LOGGER) def jobGetDemand(self):
rows = self._getMatchingRowsWithRetries(self._jobs, dict(status=self.STATUS_RUNNING), [self._jobs.pubToDBNameDict[f] for f in self._jobs.jobDemandNamedTuple._fields]) return [self._jobs.jobDemandNamedTuple._make(r) for r in rows]
'Set cancel field of all currently-running jobs to true.'
@logExceptions(_LOGGER) @g_retrySQL def jobCancelAllRunningJobs(self):
with ConnectionFactory.get() as conn: query = ('UPDATE %s SET cancel=TRUE WHERE status<>%%s ' % (self.jobsTableName,)) conn.cursor.execute(query, [self.STATUS_COMPLETED]) return
'Look through the jobs table and count the running jobs whose cancel field is true. Parameters: retval: A count of running jobs with the cancel field set to true.'
@logExceptions(_LOGGER) @g_retrySQL def jobCountCancellingJobs(self):
with ConnectionFactory.get() as conn: query = ('SELECT COUNT(job_id) FROM %s WHERE (status<>%%s AND cancel is TRUE)' % (self.jobsTableName,)) conn.cursor.execute(query, [self.STATUS_COMPLETED]) rows = conn.cursor.fetchall() return rows[0][0]
'Look through the jobs table and get the list of running jobs whose cancel field is true. Parameters: retval: A (possibly empty) sequence of running job IDs with cancel field set to true'
@logExceptions(_LOGGER) @g_retrySQL def jobGetCancellingJobs(self):
with ConnectionFactory.get() as conn: query = ('SELECT job_id FROM %s WHERE (status<>%%s AND cancel is TRUE)' % (self.jobsTableName,)) conn.cursor.execute(query, [self.STATUS_COMPLETED]) rows = conn.cursor.fetchall() return tuple((r[0] for r in rows))
'Generator to allow iterating slices at dynamic intervals Parameters: data: Any data structure that supports slicing (i.e. list or tuple) *intervals: Iterable of intervals. The sum of intervals should be less than, or equal to the length of data.'
@staticmethod @logExceptions(_LOGGER) def partitionAtIntervals(data, intervals):
assert (sum(intervals) <= len(data)) start = 0 for interval in intervals: end = (start + interval) (yield data[start:end]) start = end raise StopIteration
'Return a list of namedtuples from the result of a join query. A single database result is partitioned at intervals corresponding to the fields in namedTuples. The return value is the result of applying namedtuple._make() to each of the partitions, for each of the namedTuples. Parameters: result: Tuple repres...
@staticmethod @logExceptions(_LOGGER) def _combineResults(result, *namedTuples):
results = ClientJobsDAO.partitionAtIntervals(result, [len(nt._fields) for nt in namedTuples]) return [nt._make(result) for (nt, result) in zip(namedTuples, results)]
'Get all info about a job, with model details, if available. Parameters: job: jobID of the job to query retval: A sequence of two-tuples if the jobID exists in the jobs table (exeption is raised if it doesn\'t exist). Each two-tuple contains an instance of jobInfoNamedTuple as the first element and an instance of mo...
@logExceptions(_LOGGER) @g_retrySQL def jobInfoWithModels(self, jobID):
combinedResults = None with ConnectionFactory.get() as conn: query = ' '.join([('SELECT %s.*, %s.*' % (self.jobsTableName, self.modelsTableName)), ('FROM %s' % self.jobsTableName), ('LEFT JOIN %s USING(job_id)' % self.modelsTableName), 'WHERE job_id=%s']) conn.cursor.exec...
'Get all info about a job Parameters: job: jobID of the job to query retval: namedtuple containing the job info.'
@logExceptions(_LOGGER) def jobInfo(self, jobID):
row = self._getOneMatchingRowWithRetries(self._jobs, dict(job_id=jobID), [self._jobs.pubToDBNameDict[n] for n in self._jobs.jobInfoNamedTuple._fields]) if (row is None): raise RuntimeError(('jobID=%s not found within the jobs table' % jobID)) return self._jobs.jobInfoNamedTuple._ma...
'Change the status on the given job Parameters: job: jobID of the job to change status status: new status string (ClientJobsDAO.STATUS_xxxxx) useConnectionID: True if the connection id of the calling function must be the same as the connection that created the job. Set to False for hypersearch workers'
@logExceptions(_LOGGER) @g_retrySQL def jobSetStatus(self, jobID, status, useConnectionID=True):
with ConnectionFactory.get() as conn: query = ('UPDATE %s SET status=%%s, _eng_last_update_time=UTC_TIMESTAMP() WHERE job_id=%%s' % (self.jobsTableName,)) sqlParams = [status, jobID] ...
'Change the status on the given job to completed Parameters: job: jobID of the job to mark as completed completionReason: completionReason string completionMsg: completionMsg string useConnectionID: True if the connection id of the calling function must be the same as the connection that create...
@logExceptions(_LOGGER) @g_retrySQL def jobSetCompleted(self, jobID, completionReason, completionMsg, useConnectionID=True):
with ConnectionFactory.get() as conn: query = ('UPDATE %s SET status=%%s, completion_reason=%%s, completion_msg=%%s, end_t...
'Cancel the given job. This will update the cancel field in the jobs table and will result in the job being cancelled. Parameters: jobID: jobID of the job to mark as completed to False for hypersearch workers'
@logExceptions(_LOGGER) def jobCancel(self, jobID):
self._logger.info('Canceling jobID=%s', jobID) self.jobSetFields(jobID, {'cancel': True}, useConnectionID=False)
'Fetch all the modelIDs that correspond to a given jobID; empty sequence if none'
@logExceptions(_LOGGER) def jobGetModelIDs(self, jobID):
rows = self._getMatchingRowsWithRetries(self._models, dict(job_id=jobID), ['model_id']) return [r[0] for r in rows]
'Return the number of jobs for the given clientInfo and a status that is not completed.'
@logExceptions(_LOGGER) @g_retrySQL def getActiveJobCountForClientInfo(self, clientInfo):
with ConnectionFactory.get() as conn: query = ('SELECT count(job_id) FROM %s WHERE client_info = %%s AND status != %%s' % self.jobsTableName) conn.cursor.execute(query, [clientInfo, self.STATUS_COMPLETED]) activeJobCount = conn.cursor.fetchone()[0] re...
'Return the number of jobs for the given clientKey and a status that is not completed.'
@logExceptions(_LOGGER) @g_retrySQL def getActiveJobCountForClientKey(self, clientKey):
with ConnectionFactory.get() as conn: query = ('SELECT count(job_id) FROM %s WHERE client_key = %%s AND status != %%s' % self.jobsTableName) conn.cursor.execute(query, [clientKey, self.STATUS_COMPLETED]) activeJobCount = conn.cursor.fetchone()[0] retu...
'Fetch jobIDs for jobs in the table with optional fields given a specific clientInfo'
@logExceptions(_LOGGER) @g_retrySQL def getActiveJobsForClientInfo(self, clientInfo, fields=[]):
dbFields = [self._jobs.pubToDBNameDict[x] for x in fields] dbFieldsStr = ','.join((['job_id'] + dbFields)) with ConnectionFactory.get() as conn: query = ('SELECT %s FROM %s WHERE client_info = %%s AND status != %%s' % (dbFieldsStr, self.jobsTableName)) co...
'Fetch jobIDs for jobs in the table with optional fields given a specific clientKey'
@logExceptions(_LOGGER) @g_retrySQL def getActiveJobsForClientKey(self, clientKey, fields=[]):
dbFields = [self._jobs.pubToDBNameDict[x] for x in fields] dbFieldsStr = ','.join((['job_id'] + dbFields)) with ConnectionFactory.get() as conn: query = ('SELECT %s FROM %s WHERE client_key = %%s AND status != %%s' % (dbFieldsStr, self.jobsTableName)) con...
'Fetch jobIDs for jobs in the table with optional fields'
@logExceptions(_LOGGER) @g_retrySQL def getJobs(self, fields=[]):
dbFields = [self._jobs.pubToDBNameDict[x] for x in fields] dbFieldsStr = ','.join((['job_id'] + dbFields)) with ConnectionFactory.get() as conn: query = ('SELECT %s FROM %s' % (dbFieldsStr, self.jobsTableName)) conn.cursor.execute(query) rows = conn.cursor.fetchall() ret...
'Helper function for querying the models table including relevant job info where the job type matches the specified jobType. Only records for which there is a matching jobId in both tables is returned, and only the requested fields are returned in each result, assuming that there is not a conflict. This function is u...
@logExceptions(_LOGGER) @g_retrySQL def getFieldsForActiveJobsOfType(self, jobType, fields=[]):
dbFields = [self._jobs.pubToDBNameDict[x] for x in fields] dbFieldsStr = ','.join((['job_id'] + dbFields)) with ConnectionFactory.get() as conn: query = ('SELECT DISTINCT %s FROM %s j LEFT JOIN %s m USING(job_id) WHERE j.status != %%s AND _eng_job_type...
'Fetch the values of 1 or more fields from a job record. Here, \'fields\' is a list with the names of the fields to fetch. The names are the public names of the fields (camelBack, not the lower_case_only form as stored in the DB). Parameters: jobID: jobID of the job record fields: list of fields to return Return...
@logExceptions(_LOGGER) def jobGetFields(self, jobID, fields):
return self.jobsGetFields([jobID], fields, requireAll=True)[0][1]
'Fetch the values of 1 or more fields from a sequence of job records. Here, \'fields\' is a sequence (list or tuple) with the names of the fields to fetch. The names are the public names of the fields (camelBack, not the lower_case_only form as stored in the DB). WARNING!!!: The order of the results are NOT necessarily...
@logExceptions(_LOGGER) def jobsGetFields(self, jobIDs, fields, requireAll=True):
assert isinstance(jobIDs, self._SEQUENCE_TYPES) assert (len(jobIDs) >= 1) rows = self._getMatchingRowsWithRetries(self._jobs, dict(job_id=jobIDs), (['job_id'] + [self._jobs.pubToDBNameDict[x] for x in fields])) if (requireAll and (len(rows) < len(jobIDs))): raise RuntimeError(('jobIDs %s n...