desc stringlengths 3 26.7k | decl stringlengths 11 7.89k | bodies stringlengths 8 553k |
|---|---|---|
'Helper method that generates a unique label for a :class:`MetricSpec` /
:class:`~nupic.frameworks.opf.opf_utils.InferenceType` pair. The label is
formatted as follows:
<predictionKind>:<metric type>:(paramName=value)*:field=<fieldname>
For example:
classification:aae:paramA=10.2:paramB=20:window=100:field=pounds
:retu... | def getLabel(self, inferenceType=None):
| result = []
if (inferenceType is not None):
result.append(InferenceType.getLabel(inferenceType))
result.append(self.inferenceElement)
result.append(self.metric)
params = self.params
if (params is not None):
sortedParams = params.keys()
sortedParams.sort()
for para... |
'Extracts the PredictionKind (temporal vs. nontemporal) from the given
metric label.
:param label: (string) for a metric spec generated by
:meth:`getMetricLabel`
:returns: (:class:`~nupic.frameworks.opf.opf_utils.InferenceType`)'
| @classmethod
def getInferenceTypeFromLabel(cls, label):
| (infType, _, _) = label.partition(cls._LABEL_SEPARATOR)
if (not InferenceType.validate(infType)):
return None
return infType
|
':param windowSize: The number of values that are used to compute the
moving average'
| def __init__(self, windowSize=None):
| self._windowSize = windowSize
self._countDict = dict()
self._history = deque([])
|
'Initialize this metric
If the params contains the key \'errorMetric\', then that is the name of
another metric to which we will pass a modified groundTruth and prediction
to from our addInstance() method. For example, we may compute a moving mean
on the groundTruth and then pass that to the AbsoluteAveError metric'
| def __init__(self, metricSpec):
| self.id = None
self.verbosity = 0
self.window = (-1)
self.history = None
self.accumulatedError = 0
self.aggregateError = None
self.steps = 0
self.spec = metricSpec
self.disabled = False
self._predictionSteps = [0]
self._groundTruthHistory = deque([])
self._subErrorMetrics... |
'Utility function that saves the passed in groundTruth into a local
history buffer, and returns the groundTruth from self._predictionSteps ago,
where self._predictionSteps is defined by the \'steps\' parameter.
This can be called from the beginning of a derived class\'s addInstance()
before it passes groundTruth and pr... | def _getShiftedGroundTruth(self, groundTruth):
| self._groundTruthHistory.append(groundTruth)
assert (len(self._predictionSteps) == 1)
if (len(self._groundTruthHistory) > self._predictionSteps[0]):
return self._groundTruthHistory.popleft()
elif hasattr(groundTruth, '__iter__'):
return ([None] * len(groundTruth))
else:
retur... |
'Compute and store metric value'
| def addInstance(self, groundTruth, prediction, record=None, result=None):
| self.value = self.avg(prediction)
|
'Return the metric value'
| def getMetric(self):
| return {'value': self.value}
|
'Helper function to return a scalar value representing the most
likely outcome given a probability distribution'
| def mostLikely(self, pred):
| if (len(pred) == 1):
return pred.keys()[0]
mostLikelyOutcome = None
maxProbability = 0
for (prediction, probability) in pred.items():
if (probability > maxProbability):
mostLikelyOutcome = prediction
maxProbability = probability
return mostLikelyOutcome
|
'Helper function to return a scalar value representing the expected
value of a probability distribution'
| def expValue(self, pred):
| if (len(pred) == 1):
return pred.keys()[0]
return sum([(x * p) for (x, p) in pred.items()])
|
'Accumulate history of groundTruth and "prediction" values.
For this metric, groundTruth is the actual category and "prediction" is a
dict containing one top-level item with a key of 0 (meaning this is the
0-step classificaton) and a value which is another dict, which contains the
probability for each category as outpu... | def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer, result=None):
| if self.disabled:
return 0
if (historyBuffer is not None):
historyBuffer.append((groundTruth, prediction[0]))
if (len(historyBuffer) > self.spec.params['window']):
historyBuffer.popleft()
return 0
|
'MetricMulti constructor using metricSpec is not allowed.'
| def __init__(self, metricSpec):
| raise ValueError('MetricMulti cannot be constructed from metricSpec string! Use MetricMulti(weights,metrics) constructor instead.')
|
'MetricMulti
@param weights - [list of floats] used as weights
@param metrics - [list of submetrics]
@param window - (opt) window size for moving average, or None when disabled'
| def __init__(self, weights, metrics, window=None):
| if ((weights is None) or (not isinstance(weights, list)) or (not (len(weights) > 0)) or (not isinstance(weights[0], float))):
raise ValueError("MetricMulti requires 'weights' parameter as a [list of floats]")
self.weights = weights
if ((metrics is None) or (not isinstance(met... |
'Constructor
Args:
model: The OPF Model instance against which to run the task
task: A dictionary conforming to opfTaskSchema.json
cmdOptions: ParseCommandLineOptionsResult namedtuple'
| def __init__(self, model, task, cmdOptions):
| validateOpfJsonValue(task, 'opfTaskSchema.json')
self.__logger = logging.getLogger('.'.join(['com.numenta', self.__class__.__module__, self.__class__.__name__]))
self.__logger.debug((((('Instantiated %s(' + 'model=%r, ') + 'task=%r, ') + 'cmdOptions=%r)') % (self.__class__.__name__, model, task, cm... |
'Runs a single experiment task'
| def run(self):
| self.__logger.debug('run(): Starting task <%s>', self.__task['taskLabel'])
if self.__cmdOptions.privateOptions['testMode']:
numIters = 10
else:
numIters = self.__task['iterationCount']
if (numIters >= 0):
iterTracker = iter(xrange(numIters))
else:
iterTracker... |
'Creates and returns a list of activites for this TaskRunner instance
Returns: a list of PeriodicActivityRequest elements'
| def _createPeriodicActivities(self):
| periodicActivities = []
class MetricsReportCb(object, ):
def __init__(self, taskRunner):
self.__taskRunner = taskRunner
return
def __call__(self):
self.__taskRunner._getAndEmitExperimentMetrics()
reportMetrics = PeriodicActivityRequest(repeating=True, peri... |
'requestedActivities: a sequence of PeriodicActivityRequest elements'
| def __init__(self, requestedActivities):
| self.__activities = []
for req in requestedActivities:
act = self.Activity(repeating=req.repeating, period=req.period, cb=req.cb, iteratorHolder=[iter(xrange((req.period - 1)))])
self.__activities.append(act)
|
'Activity tick handler; services all activities
Returns:
True if controlling iterator says it\'s okay to keep going;
False to stop'
| def tick(self):
| for act in self.__activities:
if (not act.iteratorHolder[0]):
continue
try:
next(act.iteratorHolder[0])
except StopIteration:
act.cb()
if act.repeating:
act.iteratorHolder[0] = iter(xrange((act.period - 1)))
else:
... |
'Translates the given metrics value to JSON string
metrics: A list of dictionaries per OPFTaskDriver.getMetrics():
Returns: JSON string representing the given metrics object.'
| def _translateMetricsToJSON(self, metrics, label):
| metricsDict = metrics
def _mapNumpyValues(obj):
'\n '
import numpy
if isinstance(obj, numpy.float32):
return float(obj)
elif isinstance(obj, numpy.bool_):
return bool(obj)
elif isinstance(obj, numpy.ndarray):
retu... |
'Constructor
experimentDir:
experiment directory path that contains description.py
label: A label string to incorporate into the filename.
inferenceElements:
inferenceType:
An constant from opf_utils.InferenceType for the
requested prediction writer
fields: a non-empty sequence of nupic.data.fieldmeta.Fiel... | def __init__(self, experimentDir, label, inferenceType, fields, metricNames=None, checkpointSource=None):
| self.__experimentDir = experimentDir
self.__inferenceType = inferenceType
self.__inputFieldsMeta = tuple(copy.deepcopy(fields))
self.__numInputFields = len(self.__inputFieldsMeta)
self.__label = label
if (metricNames is not None):
metricNames.sort()
self.__metricNames = metricNames
... |
'Open the data file and write the header row'
| def __openDatafile(self, modelResult):
| resetFieldMeta = FieldMetaInfo(name='reset', type=FieldMetaType.integer, special=FieldMetaSpecial.reset)
self.__outputFieldsMeta.append(resetFieldMeta)
rawInput = modelResult.rawInput
rawFields = rawInput.keys()
rawFields.sort()
for field in rawFields:
if (field.startswith('_') or (field... |
'Tell the writer which metrics should be written
Parameters:
metricsNames: A list of metric lables to be written'
| def setLoggedMetrics(self, metricNames):
| if (metricNames is None):
self.__metricNames = set([])
else:
self.__metricNames = set(metricNames)
|
'[virtual method override] Closes the writer (e.g., close the underlying
file)'
| def close(self):
| if self.__dataset:
self.__dataset.close()
self.__dataset = None
return
|
'Get field metadata information for inferences that are of list type
TODO: Right now we assume list inferences are associated with the input field
metadata'
| def __getListMetaInfo(self, inferenceElement):
| fieldMetaInfo = []
inferenceLabel = InferenceElement.getLabel(inferenceElement)
for inputFieldMeta in self.__inputFieldsMeta:
if InferenceElement.getInputElement(inferenceElement):
outputFieldMeta = FieldMetaInfo(name=(inputFieldMeta.name + '.actual'), type=inputFieldMeta.type, special=i... |
'Get field metadate information for inferences that are of dict type'
| def __getDictMetaInfo(self, inferenceElement, inferenceDict):
| fieldMetaInfo = []
inferenceLabel = InferenceElement.getLabel(inferenceElement)
if InferenceElement.getInputElement(inferenceElement):
fieldMetaInfo.append(FieldMetaInfo(name=(inferenceLabel + '.actual'), type=FieldMetaType.string, special=''))
keys = sorted(inferenceDict.keys())
for key in ... |
'[virtual method override] Emits a single prediction as input versus
predicted.
modelResult: An opf_utils.ModelResult object that contains the model input
and output for the current timestep.'
| def append(self, modelResult):
| inferences = modelResult.inferences
hasInferences = False
if (inferences is not None):
for value in inferences.itervalues():
hasInferences = (hasInferences or (value is not None))
if (not hasInferences):
return
if (self.__dataset is None):
self.__openDatafile(mode... |
'[virtual method override] Save a checkpoint of the prediction output
stream. The checkpoint comprises up to maxRows of the most recent inference
records.
Parameters:
checkpointSink: A File-like object where predictions checkpoint data, if
any, will be stored.
maxRows: Maximum number of most recent infer... | def checkpoint(self, checkpointSink, maxRows):
| checkpointSink.truncate()
if (self.__dataset is None):
if (self.__checkpointCache is not None):
self.__checkpointCache.seek(0)
shutil.copyfileobj(self.__checkpointCache, checkpointSink)
checkpointSink.flush()
return
else:
return
sel... |
'Emit a input/prediction pair, if possible.
modelResult: An opf_utils.ModelResult object that contains the model input
and output for the current timestep.'
| def update(self, modelResult):
| self.__writer.append(modelResult)
return
|
'writer: Non-temporal prediction log writer conforming to
PredictionWriterIface interface.'
| def __init__(self, writer):
| self.__logger = logging.getLogger('.'.join(['com.numenta', self.__class__.__module__, self.__class__.__name__]))
self.__writer = writer
self.__inferenceShifter = InferenceShifter()
return
|
'Queue up the T(i+1) prediction value and emit a T(i)
input/prediction pair, if possible. E.g., if the previous T(i-1)
iteration was learn-only, then we would not have a T(i) prediction in our
FIFO and would not be able to emit a meaningful input/prediction
pair.
modelResult: An opf_utils.ModelResult object that co... | def update(self, modelResult):
| self.__writer.append(self.__inferenceShifter.shift(modelResult))
|
'experimentDir: experiment directory path that contains description.py
Returns: experiment inference directory path string (the path may not
yet exist - see createExperimentInferenceDir())'
| @staticmethod
def getExperimentInferenceDirPath(experimentDir):
| return os.path.abspath(os.path.join(experimentDir, 'inference'))
|
'Creates the inference output directory for the given experiment
experimentDir: experiment directory path that contains description.py
Returns: path of the inference output directory'
| @classmethod
def createExperimentInferenceDir(cls, experimentDir):
| path = cls.getExperimentInferenceDirPath(experimentDir)
cls.makeDirectory(path)
return path
|
'Makes directory for the given directory path if it doesn\'t already exist
in the filesystem. Creates all requested directory segments as needed.
path: path of the directory to create.
Returns: nothing'
| @staticmethod
def makeDirectory(path):
| try:
os.makedirs(path)
except OSError as e:
if (e.errno == os.errno.EEXIST):
pass
else:
raise
return
|
'Get the sensor input element that corresponds to the given inference
element. This is mainly used for metrics and prediction logging
:param inferenceElement: (:class:`.InferenceElement`)
:return: (string) name of sensor input element'
| @staticmethod
def getInputElement(inferenceElement):
| return InferenceElement.__inferenceInputMap.get(inferenceElement, None)
|
'.. note:: This should only be checked IF THE MODEL\'S INFERENCE TYPE IS ALSO
TEMPORAL. That is, a temporal model CAN have non-temporal inference
elements, but a non-temporal model CANNOT have temporal inference
elements.
:param inferenceElement: (:class:`.InferenceElement`)
:return: (bool) ``True`` if the inference fr... | @staticmethod
def isTemporal(inferenceElement):
| if (InferenceElement.__temporalInferenceElements is None):
InferenceElement.__temporalInferenceElements = set([InferenceElement.prediction])
return (inferenceElement in InferenceElement.__temporalInferenceElements)
|
':param inferenceElement: (:class:`.InferenceElement`) value being delayed
:param key: (string) If the inference is a dictionary type, this specifies
key for the sub-inference that is being delayed.
:return: (int) the number of records that elapse between when an inference
is made and when the corresponding input recor... | @staticmethod
def getTemporalDelay(inferenceElement, key=None):
| if (inferenceElement in (InferenceElement.prediction, InferenceElement.encodings)):
return 1
if (inferenceElement in (InferenceElement.anomalyScore, InferenceElement.anomalyLabel, InferenceElement.classification, InferenceElement.classConfidences)):
return 0
if (inferenceElement in (Inferenc... |
':param inferences: (dict) where the keys are :class:`.InferenceElement`
objects.
:return: (int) the maximum delay for the :class:`.InferenceElement` objects
in the inference dictionary.'
| @staticmethod
def getMaxDelay(inferences):
| maxDelay = 0
for (inferenceElement, inference) in inferences.iteritems():
if isinstance(inference, dict):
for key in inference.iterkeys():
maxDelay = max(InferenceElement.getTemporalDelay(inferenceElement, key), maxDelay)
else:
maxDelay = max(InferenceElem... |
':param inferenceType: (:class:`.InferenceType`)
:return: (bool) `True` if the inference type is \'temporal\', i.e. requires a
temporal memory in the network.'
| @staticmethod
def isTemporal(inferenceType):
| if (InferenceType.__temporalInferenceTypes is None):
InferenceType.__temporalInferenceTypes = set([InferenceType.TemporalNextStep, InferenceType.TemporalClassification, InferenceType.TemporalAnomaly, InferenceType.TemporalMultiStep, InferenceType.NontemporalMultiStep])
return (inferenceType in Inference... |
':param opf_utils.InferenceType inferenceType: mutually-exclusive with proto
arg
:param proto: capnp ModelProto message reader for deserializing;
mutually-exclusive with the other constructor args.'
| def __init__(self, inferenceType=None, proto=None):
| assert (((inferenceType is not None) and (proto is None)) or ((inferenceType is None) and (proto is not None))), 'proto and other constructor args are mutually exclusive'
if (proto is None):
self._numPredictions = 0
self.__inferenceType = inferenceType
self.__learnin... |
'Run one iteration of this model.
:param inputRecord: (object)
A record object formatted according to
:meth:`~nupic.data.record_stream.RecordStreamIface.getNextRecord` or
:meth:`~nupic.data.record_stream.RecordStreamIface.getNextRecordDict`
result format.
:returns: (:class:`~nupic.frameworks.opf.opf_utils.ModelResult`)... | def run(self, inputRecord):
| predictionNumber = self._numPredictions
self._numPredictions += 1
result = opf_utils.ModelResult(predictionNumber=predictionNumber, rawInput=inputRecord)
return result
|
'Return the InferenceType of this model.
This is immutable.
:returns: :class:`~nupic.frameworks.opf.opf_utils.InferenceType`'
| def getInferenceType(self):
| return self.__inferenceType
|
'Turn Learning on for the current model.'
| def enableLearning(self):
| self.__learningEnabled = True
return
|
'Turn Learning off for the current model.'
| def disableLearning(self):
| self.__learningEnabled = False
return
|
'Return the Learning state of the current model.
:returns: (bool) The learning state'
| def isLearningEnabled(self):
| return self.__learningEnabled
|
'Enable inference for this model.
:param inferenceArgs: (dict)
A dictionary of arguments required for inference. These depend on
the InferenceType of the current model'
| def enableInference(self, inferenceArgs=None):
| self.__inferenceEnabled = True
self.__inferenceArgs = inferenceArgs
|
'Return the dict of arguments for the current inference mode.
:returns: (dict) The arguments of the inference mode'
| def getInferenceArgs(self):
| return self.__inferenceArgs
|
'Turn Inference off for the current model.'
| def disableInference(self):
| self.__inferenceEnabled = False
|
'Return the inference state of the current model.
:returns: (bool) The inference state'
| def isInferenceEnabled(self):
| return self.__inferenceEnabled
|
'Return the pycapnp proto type that the class uses for serialization.
This is used to convert the proto into the proper type before passing it
into the read or write method of the subclass.'
| @staticmethod
def getSchema():
| raise NotImplementedError()
|
'Return the absolute path of the model\'s checkpoint file.
:param checkpointDir: (string)
Directory of where the experiment is to be or was saved
:returns: (string) An absolute path.'
| @staticmethod
def _getModelCheckpointFilePath(checkpointDir):
| path = os.path.join(checkpointDir, 'model.data')
path = os.path.abspath(path)
return path
|
'Serializes model using capnproto and writes data to ``checkpointDir``'
| def writeToCheckpoint(self, checkpointDir):
| proto = self.getSchema().new_message()
self.write(proto)
checkpointPath = self._getModelCheckpointFilePath(checkpointDir)
if os.path.exists(checkpointDir):
if (not os.path.isdir(checkpointDir)):
raise Exception(('Existing filesystem entry <%s> is not a model c... |
'Deerializes model from checkpointDir using capnproto'
| @classmethod
def readFromCheckpoint(cls, checkpointDir):
| checkpointPath = cls._getModelCheckpointFilePath(checkpointDir)
with open(checkpointPath, 'r') as f:
proto = cls.getSchema().read(f)
model = cls.read(proto)
return model
|
'Save the state maintained by the Model base class
:param proto: capnp ModelProto message builder'
| def writeBaseToProto(self, proto):
| inferenceType = self.getInferenceType()
inferenceType = (inferenceType[:1].lower() + inferenceType[1:])
proto.inferenceType = inferenceType
proto.numPredictions = self._numPredictions
proto.learningEnabled = self.__learningEnabled
proto.inferenceEnabled = self.__inferenceEnabled
proto.infere... |
'Write state to proto object.
The type of proto is determined by :meth:`getSchema`.'
| def write(self, proto):
| raise NotImplementedError()
|
'Read state from proto object.
The type of proto is determined by :meth:`getSchema`.'
| @classmethod
def read(cls, proto):
| raise NotImplementedError()
|
'Save the model in the given directory.
:param saveModelDir: (string)
Absolute directory path for saving the model. This directory should
only be used to store a saved model. If the directory does not exist,
it will be created automatically and populated with model data. A
pre-existing directory will only be accepted i... | def save(self, saveModelDir):
| logger = self._getLogger()
logger.debug('(%s) Creating local checkpoint in %r...', self, saveModelDir)
modelPickleFilePath = self._getModelPickleFilePath(saveModelDir)
if os.path.exists(saveModelDir):
if (not os.path.isdir(saveModelDir)):
raise Exception(('Existing ... |
'Protected method that is called during serialization with an external
directory path. It can be overridden by subclasses to bypass pickle for
saving large binary states.
This is called by ModelBase only.
:param extraDataDir: (string) Model\'s extra data directory path'
| def _serializeExtraData(self, extraDataDir):
| pass
|
'Load saved model.
:param savedModelDir: (string)
Directory of where the experiment is to be or was saved
:returns: (:class:`Model`) The loaded model instance'
| @classmethod
def load(cls, savedModelDir):
| logger = opf_utils.initLogger(cls)
logger.debug('Loading model from local checkpoint at %r...', savedModelDir)
modelPickleFilePath = Model._getModelPickleFilePath(savedModelDir)
with open(modelPickleFilePath, 'r') as modelPickleFile:
logger.debug('Unpickling Model instanc... |
'Protected method that is called during deserialization
(after __setstate__) with an external directory path.
It can be overridden by subclasses to bypass pickle for loading large
binary states.
This is called by ModelBase only.
:param extraDataDir: (string) Model\'s extra data directory path'
| def _deSerializeExtraData(self, extraDataDir):
| pass
|
'Return the absolute path of the model\'s pickle file.
:param saveModelDir: (string)
Directory of where the experiment is to be or was saved
:returns: (string) An absolute path.'
| @staticmethod
def _getModelPickleFilePath(saveModelDir):
| path = os.path.join(saveModelDir, 'model.pkl')
path = os.path.abspath(path)
return path
|
'Return the absolute path to the directory where the model\'s own
"extra data" are stored (i.e., data that\'s too big for pickling).
:param saveModelDir: (string)
Directory of where the experiment is to be or was saved
:returns: (string) An absolute path.'
| @staticmethod
def _getModelExtraDataDir(saveModelDir):
| path = os.path.join(saveModelDir, 'modelextradata')
path = os.path.abspath(path)
return path
|
'Make directory for the given directory path if it doesn\'t already
exist in the filesystem.
:param absDirPath: (string) Absolute path of the directory to create
@exception (Exception) OSError if directory creation fails'
| @staticmethod
def __makeDirectoryFromAbsolutePath(absDirPath):
| assert os.path.isabs(absDirPath)
try:
os.makedirs(absDirPath)
except OSError as e:
if (e.errno != os.errno.EEXIST):
raise
return
|
'Resolves the referenced value. If the result is already cached,
returns it to caller. Otherwise, invokes the pure virtual method
handleGetValue. If handleGetValue() returns another value-getter, calls
that value-getter to resolve the value. This may result in a chain of calls
that terminates once the value is fully... | def __call__(self, topContainer):
| assert (not self.__inLookup)
if (self.__cachedResult is not self.__NoResult):
return self.__cachedResult
self.__cachedResult = self.handleGetValue(topContainer)
if isinstance(self.__cachedResult, ValueGetterBase):
valueGetter = self.__cachedResult
self.__inLookup = True
s... |
'A "pure virtual" method. The derived class MUST override this method
and return the referenced value. The derived class is NOT responsible for
fully resolving the reference\'d value in the event the value resolves to
another ValueGetterBase-based instance -- this is handled automatically
within ValueGetterBase imple... | def handleGetValue(self, topContainer):
| raise NotImplementedError(('ERROR: ValueGetterBase is an abstract ' + 'class; base class MUST override handleGetValue()'))
|
'referenceDict: Explicit reference dictionary that contains the field
corresonding to the first key name in dictKeyChain. This may
be the result returned by the built-in globals() function,
when we desire to look up a dictionary value from a dictionary
referenced by a global variable within the calling module.
If None... | def __init__(self, referenceDict, *dictKeyChain):
| ValueGetterBase.__init__(self)
assert ((referenceDict is None) or isinstance(referenceDict, dict))
assert (len(dictKeyChain) >= 1)
self.__referenceDict = referenceDict
self.__dictKeyChain = dictKeyChain
|
'This method overrides ValueGetterBase\'s "pure virtual" method. It
returns the referenced value. The derived class is NOT responsible for
fully resolving the reference\'d value in the event the value resolves to
another ValueGetterBase-based instance -- this is handled automatically
within ValueGetterBase implementa... | def handleGetValue(self, topContainer):
| value = (self.__referenceDict if (self.__referenceDict is not None) else topContainer)
for key in self.__dictKeyChain:
value = value[key]
return value
|
'dictKeyChain: One or more strings; the first string is a key (that will
eventually be defined) in the dictionary that will be passed
to applyValueGettersToContainer(). If additional strings are
supplied, then the values correspnding to prior key strings
must be dictionaries, and each additionl string references a
sub-... | def __init__(self, *dictKeyChain):
| DictValueGetter.__init__(self, None, *dictKeyChain)
|
'Creates and returns the _IterationPhase-based instance corresponding
to this phase specification
model: Model instance'
| def _getImpl(self, model):
| impl = _IterationPhaseLearnOnly(model=model, nIters=self.__nIters)
return impl
|
'Creates and returns the _IterationPhase-based instance corresponding
to this phase specification
model: Model instance'
| def _getImpl(self, model):
| impl = _IterationPhaseInferOnly(model=model, nIters=self.__nIters, inferenceArgs=self.__inferenceArgs)
return impl
|
'Creates and returns the _IterationPhase-based instance corresponding
to this phase specification
model: Model instance'
| def _getImpl(self, model):
| impl = _IterationPhaseLearnAndInfer(model=model, nIters=self.__nIters, inferenceArgs=self.__inferenceArgs)
return impl
|
'Replaces the Iteration Cycle phases
:param phaseSpecs: Iteration cycle description consisting of a sequence of
IterationPhaseSpecXXXXX elements that are performed in the
given order'
| def replaceIterationCycle(self, phaseSpecs):
| self.__phaseManager = _PhaseManager(model=self.__model, phaseSpecs=phaseSpecs)
return
|
'Performs initial setup activities, including \'setup\' callbacks. This
method MUST be called once before the first call to
:meth:`handleInputRecord`.'
| def setup(self):
| for cb in self.__userCallbacks['setup']:
cb(self.__model)
return
|
'Perform final activities, including \'finish\' callbacks. This
method MUST be called once after the last call to :meth:`handleInputRecord`.'
| def finalize(self):
| for cb in self.__userCallbacks['finish']:
cb(self.__model)
return
|
'Processes the given record according to the current iteration cycle phase
:param inputRecord: (object) record expected to be returned from
:meth:`nupic.data.record_stream.RecordStreamIface.getNextRecord`.
:returns: :class:`nupic.frameworks.opf.opf_utils.ModelResult`'
| def handleInputRecord(self, inputRecord):
| assert inputRecord, ('Invalid inputRecord: %r' % inputRecord)
results = self.__phaseManager.handleInputRecord(inputRecord)
metrics = self.__metricsMgr.update(results)
for cb in self.__userCallbacks['postIter']:
cb(self.__model)
results.metrics = metrics
return results
|
'Gets the current metric values
:returns: A dictionary of metric values. The key for each entry is the label
for the metric spec, as generated by
:meth:`nupic.frameworks.opf.metrics.MetricSpec.getLabel`. The
value for each entry is a dictionary containing the value of the
metric as returned by
:meth:`nupic.frameworks.o... | def getMetrics(self):
| return self.__metricsMgr.getMetrics()
|
':returns: (list) labels for the metrics that are being calculated'
| def getMetricLabels(self):
| return self.__metricsMgr.getMetricLabels()
|
'model: Model instance
phaseSpecs: Iteration period description consisting of a sequence of
IterationPhaseSpecXXXXX elements that are performed in the
given order'
| def __init__(self, model, phaseSpecs):
| self.__model = model
self.__phases = tuple(map((lambda x: x._getImpl(model=model)), phaseSpecs))
if self.__phases:
self.__phaseCycler = itertools.cycle(self.__phases)
self.__advancePhase()
return
|
'Advance to the next iteration cycle phase'
| def __advancePhase(self):
| self.__currentPhase = self.__phaseCycler.next()
self.__currentPhase.enterPhase()
return
|
'Processes the given record according to the current phase
inputRecord: record object formatted according to
nupic.data.FileSource.getNext() result format.
Returns: An opf_utils.ModelResult object with the inputs and inferences
after the current record is processed by the model'
| def handleInputRecord(self, inputRecord):
| results = self.__model.run(inputRecord)
shouldContinue = self.__currentPhase.advance()
if (not shouldContinue):
self.__advancePhase()
return results
|
'nIters: Number of iterations; MUST be greater than 0'
| def __init__(self, nIters):
| assert (nIters > 0), ('nIters=%s' % nIters)
self.__nIters = nIters
return
|
'Performs initialization that is necessary upon entry to the phase. Must
be called before handleInputRecord() at the beginning of each phase'
| @abstractmethod
def enterPhase(self):
| self.__iter = iter(xrange(self.__nIters))
self.__iter.next()
|
'Advances the iteration;
Returns: True if more iterations remain; False if this is the final
iteration.'
| def advance(self):
| hasMore = True
try:
self.__iter.next()
except StopIteration:
self.__iter = None
hasMore = False
return hasMore
|
'model: Model instance
nIters: Number of iterations; MUST be greater than 0'
| def __init__(self, model, nIters):
| super(_IterationPhaseLearnOnly, self).__init__(nIters=nIters)
self.__model = model
return
|
'[_IterationPhase method implementation]
Performs initialization that is necessary upon entry to the phase. Must
be called before handleInputRecord() at the beginning of each phase'
| def enterPhase(self):
| super(_IterationPhaseLearnOnly, self).enterPhase()
self.__model.enableLearning()
self.__model.disableInference()
return
|
'model: Model instance
nIters: Number of iterations; MUST be greater than 0
inferenceArgs:
A dictionary of arguments required for inference. These
depend on the InferenceType of the current model'
| def __init__(self, model, nIters, inferenceArgs):
| super(_IterationPhaseInferCommon, self).__init__(nIters=nIters)
self._model = model
self._inferenceArgs = inferenceArgs
return
|
'[_IterationPhase method implementation]
Performs initialization that is necessary upon entry to the phase. Must
be called before handleInputRecord() at the beginning of each phase'
| def enterPhase(self):
| super(_IterationPhaseInferCommon, self).enterPhase()
self._model.enableInference(inferenceArgs=self._inferenceArgs)
return
|
'model: Model instance
nIters: Number of iterations; MUST be greater than 0
inferenceArgs:
A dictionary of arguments required for inference. These
depend on the InferenceType of the current model'
| def __init__(self, model, nIters, inferenceArgs):
| super(_IterationPhaseInferOnly, self).__init__(model=model, nIters=nIters, inferenceArgs=inferenceArgs)
return
|
'[_IterationPhase method implementation]
Performs initialization that is necessary upon entry to the phase. Must
be called before handleInputRecord() at the beginning of each phase'
| def enterPhase(self):
| super(_IterationPhaseInferOnly, self).enterPhase()
self._model.disableLearning()
return
|
'model: Model instance
nIters: Number of iterations; MUST be greater than 0
inferenceArgs:
A dictionary of arguments required for inference. These
depend on the InferenceType of the current model'
| def __init__(self, model, nIters, inferenceArgs):
| super(_IterationPhaseLearnAndInfer, self).__init__(model=model, nIters=nIters, inferenceArgs=inferenceArgs)
return
|
'[_IterationPhase method implementation]
Performs initialization that is necessary upon entry to the phase. Must
be called before handleInputRecord() at the beginning of each phase'
| def enterPhase(self):
| super(_IterationPhaseLearnAndInfer, self).enterPhase()
self._model.enableLearning()
return
|
'Since the two-gram has no use for this information, this is a no-op'
| def setFieldStatistics(self, fieldStats):
| pass
|
':param proto: capnp TwoGramModelProto message reader'
| @classmethod
def read(cls, proto):
| instance = object.__new__(cls)
super(TwoGramModel, instance).__init__(proto=proto.modelBase)
instance._logger = opf_utils.initLogger(instance)
instance._reset = proto.reset
instance._hashToValueDict = {x.hash: x.value for x in proto.hashToValueDict}
instance._learningEnabled = proto.learningEnab... |
':param proto: capnp TwoGramModelProto message builder'
| def write(self, proto):
| super(TwoGramModel, self).writeBaseToProto(proto.modelBase)
proto.reset = self._reset
proto.learningEnabled = self._learningEnabled
proto.prevValues = self._prevValues
self._encoder.write(proto.encoder)
proto.hashToValueDict = [{'hash': h, 'value': v} for (h, v) in self._hashToValueDict.items()]... |
'net: The CLA Network instance
statsCollectors:
Sequence of 0 or more CLAStatistic-based instances'
| def __init__(self, net, statsCollectors):
| self.net = net
self.statsCollectors = statsCollectors
return
|
':param network: if not None, the deserialized nupic.engine.Network instance
to use instead of creating a new Network
:param baseProto: if not None, capnp ModelProto message reader for
deserializing; supersedes inferenceType'
| def __init__(self, sensorParams={}, inferenceType=InferenceType.TemporalNextStep, spEnable=True, spParams={}, trainSPNetOnlyIfRequested=False, tmEnable=True, tmParams={}, clEnable=True, clParams={}, anomalyParams={}, minLikelihoodThreshold=DEFAULT_LIKELIHOOD_THRESHOLD, maxPredictionsPerStep=DEFAULT_MAX_PREDICTIONS_PER_... | if (not (inferenceType in self.__supportedInferenceKindSet)):
raise ValueError('{0} received incompatible inference type: {1}'.format(self.__class__, inferenceType))
if (baseProto is None):
super(HTMPredictionModel, self).__init__(inferenceType)
else:
super(HTMPredicti... |
'Currently only supports a parameter named ``__numRunCalls``.
:param paramName: (string) name of parameter to get. If not
``__numRunCalls`` an exception is thrown.
:returns: (int) the value of ``self.__numRunCalls``'
| def getParameter(self, paramName):
| if (paramName == '__numRunCalls'):
return self.__numRunCalls
else:
raise RuntimeError(("'%s' parameter is not exposed by htm_prediction_model." % paramName))
|
'Set a parameter of the anomaly classifier within this model.
:param param: (string) name of parameter to set
:param value: (object) value to set'
| @requireAnomalyModel
def setAnomalyParameter(self, param, value):
| self._getAnomalyClassifier().setParameter(param, value)
|
'Get a parameter of the anomaly classifier within this model by key.
:param param: (string) name of parameter to retrieve'
| @requireAnomalyModel
def getAnomalyParameter(self, param):
| return self._getAnomalyClassifier().getParameter(param)
|
'Remove labels from the anomaly classifier within this model. Removes all
records if ``labelFilter==None``, otherwise only removes the labels equal to
``labelFilter``.
:param start: (int) index to start removing labels
:param end: (int) index to end removing labels
:param labelFilter: (string) If specified, only remove... | @requireAnomalyModel
def anomalyRemoveLabels(self, start, end, labelFilter):
| self._getAnomalyClassifier().getSelf().removeLabels(start, end, labelFilter)
|
'Add labels from the anomaly classifier within this model.
:param start: (int) index to start label
:param end: (int) index to end label
:param labelName: (string) name of label'
| @requireAnomalyModel
def anomalyAddLabel(self, start, end, labelName):
| self._getAnomalyClassifier().getSelf().addLabel(start, end, labelName)
|
'Get labels from the anomaly classifier within this model.
:param start: (int) index to start getting labels
:param end: (int) index to end getting labels'
| @requireAnomalyModel
def anomalyGetLabels(self, start, end):
| return self._getAnomalyClassifier().getSelf().getLabels(start, end)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.