id int64 0 458k | file_name stringlengths 4 119 | file_path stringlengths 14 227 | content stringlengths 24 9.96M | size int64 24 9.96M | language stringclasses 1 value | extension stringclasses 14 values | total_lines int64 1 219k | avg_line_length float64 2.52 4.63M | max_line_length int64 5 9.91M | alphanum_fraction float64 0 1 | repo_name stringlengths 7 101 | repo_stars int64 100 139k | repo_forks int64 0 26.4k | repo_open_issues int64 0 2.27k | repo_license stringclasses 12 values | repo_extraction_date stringclasses 433 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
25,900 | spec.py | numenta_nupic-legacy/src/nupic/regions/spec.py |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
## @file
This file defines node spec item classes like InputSpec, OutputSpec,
ParameterSpec and CommandSpec as well as the Spec class itself.
These classes correspond very closely to the NuPIC 2 C++ Spec.
The data type of inputs outputs and parameters must be basic Python types. This
is different from nuPIC 1 that allowed for arbitrary Python values as objects.
The Spec class provides a toDict() method that converts itself a dict of
dicts. All the keys must be regular strings for simplicity (no Unicode strings).
Each item class provides a constructor (__init__() method) with some defaults
and an invariant() method that verifies the validity of initialized object.
"""
dataTypes = ('int', 'uint', 'bool', 'str', 'float', 'Handle')
dataTypesToPyTypes = {
'int': int,
'uint': int,
'bool': bool,
'str': str,
'float': float,
'Handle': object,
}
class InputSpec(object):
def __init__(self,
description='',
dataType=None,
count=1,
required=False,
regionLevel=False,
isDefaultInput=False,
requireSplitterMap=True):
self.description=description
self.dataType=dataType
self.count=count
self.required=required
self.regionLevel=regionLevel
self.isDefaultInput=isDefaultInput
self.requireSplitterMap=requireSplitterMap
self.invariant()
def invariant(self):
assert isinstance(self.description, str)
assert isinstance(self.dataType, str)
assert self.dataType in dataTypes
assert isinstance(self.count, int)
assert self.count >= 0
assert isinstance(self.required, bool)
assert isinstance(self.regionLevel, bool)
assert isinstance(self.isDefaultInput, bool)
assert isinstance(self.requireSplitterMap, bool)
class OutputSpec(object):
def __init__(self,
description='',
dataType=None,
count=1,
regionLevel=False,
isDefaultOutput=False):
self.description=description
self.dataType=dataType
self.count=count
self.regionLevel=regionLevel
self.isDefaultOutput=isDefaultOutput
self.invariant()
def invariant(self):
assert isinstance(self.description, str)
assert isinstance(self.dataType, str)
assert self.dataType in dataTypes
assert isinstance(self.count, int)
assert self.count >= 0
assert isinstance(self.regionLevel, bool)
assert isinstance(self.isDefaultOutput, bool)
class ParameterSpec(object):
accessModes = ('Create', 'Read', 'ReadWrite')
def __init__(self,
description='',
dataType=None,
count=1,
constraints='',
defaultValue=None,
accessMode=None):
self.description=description
self.dataType=dataType
# String object can't have fixed length in the parameter spec
if dataType == 'str':
count = 0
self.count=count
self.constraints=constraints
self.defaultValue=defaultValue
self.accessMode=accessMode
self.invariant()
def invariant(self):
assert isinstance(self.description, str)
assert isinstance(self.dataType, str)
assert self.dataType in dataTypes
assert isinstance(self.count, int)
assert self.count >= 0
assert isinstance(self.constraints, str)
# Verify that default value is specified only for 'Create' parameters
if self.defaultValue is not None:
assert self.accessMode == 'Create'
assert isinstance(self.defaultValue, dataTypesToPyTypes[self.dataType])
else:
assert self.accessMode in ParameterSpec.accessModes, \
'Bad access node: ' + self.accessMode
class CommandSpec(object):
def __init__(self, description):
self.description = description
def invariant(self):
assert isinstance(self.description, str)
class Spec(object):
def __init__(self, description, singleNodeOnly):
self.description = description
self.singleNodeOnly = singleNodeOnly
self.inputs = {}
self.outputs = {}
self.parameters = {}
self.commands = {}
def invariant(self):
"""Verify the validity of the node spec object
The type of each sub-object is verified and then
the validity of each node spec item is verified by calling
it invariant() method. It also makes sure that there is at most
one default input and one default output.
"""
# Verify the description and singleNodeOnly attributes
assert isinstance(self.description, str)
assert isinstance(self.singleNodeOnly, bool)
# Make sure that all items dicts are really dicts
assert isinstance(self.inputs, dict)
assert isinstance(self.outputs, dict)
assert isinstance(self.parameters, dict)
assert isinstance(self.commands, dict)
# Verify all item dicts
hasDefaultInput = False
for k, v in self.inputs.items():
assert isinstance(k, str)
assert isinstance(v, InputSpec)
v.invariant()
if v.isDefaultInput:
assert not hasDefaultInput
hasDefaultInput = True
hasDefaultOutput = False
for k, v in self.outputs.items():
assert isinstance(k, str)
assert isinstance(v, OutputSpec)
v.invariant()
if v.isDefaultOutput:
assert not hasDefaultOutput
hasDefaultOutput = True
for k, v in self.parameters.items():
assert isinstance(k, str)
assert isinstance(v, ParameterSpec)
v.invariant()
for k, v in self.commands.items():
assert isinstance(k, str)
assert isinstance(v, CommandSpec)
v.invariant()
def toDict(self):
"""Convert the information of the node spec to a plain dict of basic types
The description and singleNodeOnly attributes are placed directly in
the result dicts. The inputs, outputs, parameters and commands dicts
contain Spec item objects (InputSpec, OutputSpec, etc). Each such object
is converted also to a plain dict using the internal items2dict() function
(see bellow).
"""
def items2dict(items):
"""Convert a dict of node spec items to a plain dict
Each node spec item object will be converted to a dict of its
attributes. The entire items dict will become a dict of dicts (same keys).
"""
d = {}
for k, v in items.items():
d[k] = v.__dict__
return d
self.invariant()
return dict(description=self.description,
singleNodeOnly=self.singleNodeOnly,
inputs=items2dict(self.inputs),
outputs=items2dict(self.outputs),
parameters=items2dict(self.parameters),
commands=items2dict(self.commands))
| 7,803 | Python | .py | 199 | 32.648241 | 80 | 0.679076 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,901 | sp_region.py | numenta_nupic-legacy/src/nupic/regions/sp_region.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy
import os
from nupic.bindings.algorithms import SpatialPooler as CPPSpatialPooler
from nupic.bindings.math import GetNTAReal
from nupic.bindings.regions.PyRegion import PyRegion
import nupic.algorithms.fdrutilities as fdru
from nupic.algorithms.spatial_pooler import SpatialPooler as PYSpatialPooler
from nupic.support import getArgumentDescriptions
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.regions.SPRegion_capnp import SPRegionProto
def getDefaultSPImp():
"""
Return the default spatial pooler implementation for this region.
"""
return 'cpp'
def getSPClass(spatialImp):
""" Return the class corresponding to the given spatialImp string
"""
if spatialImp == 'py':
return PYSpatialPooler
elif spatialImp == 'cpp':
return CPPSpatialPooler
else:
raise RuntimeError("Invalid spatialImp '%s'. Legal values are: 'py', "
"'cpp'" % (spatialImp))
def _buildArgs(f, self=None, kwargs={}):
"""
Get the default arguments from the function and assign as instance vars.
Return a list of 3-tuples with (name, description, defaultValue) for each
argument to the function.
Assigns all arguments to the function as instance variables of SPRegion.
If the argument was not provided, uses the default value.
Pops any values from kwargs that go to the function.
"""
# Get the name, description, and default value for each argument
argTuples = getArgumentDescriptions(f)
argTuples = argTuples[1:] # Remove 'self'
# Get the names of the parameters to our own constructor and remove them
# Check for _originial_init first, because if LockAttributesMixin is used,
# __init__'s signature will be just (self, *args, **kw), but
# _original_init is created with the original signature
#init = getattr(self, '_original_init', self.__init__)
init = SPRegion.__init__
ourArgNames = [t[0] for t in getArgumentDescriptions(init)]
# Also remove a few other names that aren't in our constructor but are
# computed automatically (e.g. numberOfCols for the TM)
# TODO: where does numberOfCols come into SPRegion?
ourArgNames += [
'numberOfCols',
]
for argTuple in argTuples[:]:
if argTuple[0] in ourArgNames:
argTuples.remove(argTuple)
# Build the dictionary of arguments
if self:
for argTuple in argTuples:
argName = argTuple[0]
if argName in kwargs:
# Argument was provided
argValue = kwargs.pop(argName)
else:
# Argument was not provided; use the default value if there is one, and
# raise an exception otherwise
if len(argTuple) == 2:
# No default value
raise TypeError("Must provide value for '%s'" % argName)
argValue = argTuple[2]
# Set as an instance variable if 'self' was passed in
setattr(self, argName, argValue)
return argTuples
def _getAdditionalSpecs(spatialImp, kwargs={}):
"""Build the additional specs in three groups (for the inspector)
Use the type of the default argument to set the Spec type, defaulting
to 'Byte' for None and complex types
Determines the spatial parameters based on the selected implementation.
It defaults to SpatialPooler.
"""
typeNames = {int: 'UInt32', float: 'Real32', str: 'Byte', bool: 'bool', tuple: 'tuple'}
def getArgType(arg):
t = typeNames.get(type(arg), 'Byte')
count = 0 if t == 'Byte' else 1
if t == 'tuple':
t = typeNames.get(type(arg[0]), 'Byte')
count = len(arg)
if t == 'bool':
t = 'UInt32'
return (t, count)
def getConstraints(arg):
t = typeNames.get(type(arg), 'Byte')
if t == 'Byte':
return 'multiple'
elif t == 'bool':
return 'bool'
else:
return ''
# Get arguments from spatial pooler constructors, figure out types of
# variables and populate spatialSpec.
SpatialClass = getSPClass(spatialImp)
sArgTuples = _buildArgs(SpatialClass.__init__)
spatialSpec = {}
for argTuple in sArgTuples:
d = dict(
description=argTuple[1],
accessMode='ReadWrite',
dataType=getArgType(argTuple[2])[0],
count=getArgType(argTuple[2])[1],
constraints=getConstraints(argTuple[2]))
spatialSpec[argTuple[0]] = d
# Add special parameters that weren't handled automatically
# Spatial parameters only!
spatialSpec.update(dict(
columnCount=dict(
description='Total number of columns (coincidences).',
accessMode='Read',
dataType='UInt32',
count=1,
constraints=''),
inputWidth=dict(
description='Size of inputs to the SP.',
accessMode='Read',
dataType='UInt32',
count=1,
constraints=''),
spInputNonZeros=dict(
description='The indices of the non-zero inputs to the spatial pooler',
accessMode='Read',
dataType='UInt32',
count=0,
constraints=''),
spOutputNonZeros=dict(
description='The indices of the non-zero outputs from the spatial pooler',
accessMode='Read',
dataType='UInt32',
count=0,
constraints=''),
spOverlapDistribution=dict(
description="""The overlaps between the active output coincidences
and the input. The overlap amounts for each coincidence are sorted
from highest to lowest. """,
accessMode='Read',
dataType='Real32',
count=0,
constraints=''),
sparseCoincidenceMatrix=dict(
description='The coincidences, as a SparseMatrix',
accessMode='Read',
dataType='Byte',
count=0,
constraints=''),
denseOutput=dict(
description='Score for each coincidence.',
accessMode='Read',
dataType='Real32',
count=0,
constraints=''),
spLearningStatsStr=dict(
description="""String representation of dictionary containing a number
of statistics related to learning.""",
accessMode='Read',
dataType='Byte',
count=0,
constraints='handle'),
spatialImp=dict(
description="""Which spatial pooler implementation to use. Set to either
'py', or 'cpp'. The 'cpp' implementation is optimized for
speed in C++.""",
accessMode='ReadWrite',
dataType='Byte',
count=0,
constraints='enum: py, cpp'),
))
# The last group is for parameters that aren't specific to spatial pooler
otherSpec = dict(
learningMode=dict(
description='1 if the node is learning (default 1).',
accessMode='ReadWrite',
dataType='UInt32',
count=1,
constraints='bool'),
inferenceMode=dict(
description='1 if the node is inferring (default 0).',
accessMode='ReadWrite',
dataType='UInt32',
count=1,
constraints='bool'),
anomalyMode=dict(
description='1 if an anomaly score is being computed',
accessMode='ReadWrite',
dataType='UInt32',
count=1,
constraints='bool'),
topDownMode=dict(
description='1 if the node should do top down compute on the next call '
'to compute into topDownOut (default 0).',
accessMode='ReadWrite',
dataType='UInt32',
count=1,
constraints='bool'),
activeOutputCount=dict(
description='Number of active elements in bottomUpOut output.',
accessMode='Read',
dataType='UInt32',
count=1,
constraints=''),
logPathInput=dict(
description='Optional name of input log file. If set, every input vector'
' will be logged to this file.',
accessMode='ReadWrite',
dataType='Byte',
count=0,
constraints=''),
logPathOutput=dict(
description='Optional name of output log file. If set, every output vector'
' will be logged to this file.',
accessMode='ReadWrite',
dataType='Byte',
count=0,
constraints=''),
logPathOutputDense=dict(
description='Optional name of output log file. If set, every output vector'
' will be logged to this file as a dense vector.',
accessMode='ReadWrite',
dataType='Byte',
count=0,
constraints=''),
)
return spatialSpec, otherSpec
class SPRegion(PyRegion):
"""
SPRegion is designed to implement the spatial pooler compute for a given
HTM level.
Uses the :class:`~nupic.algorithms.spatial_pooler.SpatialPooler` class to do
most of the work.
:param columnCount: (int) Number of columns in the SP, a required parameter.
:param inputWidth: (int) Size of inputs to the SP, a required parameter.
:param spatialImp: (string) ``py`` or ``cpp` (default ``cpp``).
"""
def __init__(self,
columnCount,
inputWidth,
spatialImp=getDefaultSPImp(),
**kwargs):
if columnCount <= 0 or inputWidth <=0:
raise TypeError("Parameters columnCount and inputWidth must be > 0")
# Pull out the spatial arguments automatically
# These calls whittle down kwargs and create instance variables of SPRegion
self.spatialImp = spatialImp
self.SpatialClass = getSPClass(spatialImp)
sArgTuples = _buildArgs(self.SpatialClass.__init__, self, kwargs)
# Make a list of automatic spatial arg names for later use
self._spatialArgNames = [t[0] for t in sArgTuples]
# Learning and SP parameters.
# By default we start out in stage learn with inference disabled
self.learningMode = True
self.inferenceMode = False
self.anomalyMode = False
self.topDownMode = False
self.columnCount = columnCount
self.inputWidth = inputWidth
PyRegion.__init__(self, **kwargs)
# Initialize all non-persistent base members, as well as give
# derived class an opportunity to do the same.
self._loaded = False
self._initializeEphemeralMembers()
# Debugging support, used in _conditionalBreak
self.breakPdb = False
self.breakKomodo = False
# Defaults for all other parameters
self.logPathInput = ''
self.logPathOutput = ''
self.logPathOutputDense = ''
self._fpLogSPInput = None
self._fpLogSP = None
self._fpLogSPDense = None
#
# Variables set up in initInNetwork()
#
# Spatial instance
self._sfdr = None
# Spatial pooler's bottom-up output value: hang on to this output for
# top-down inference and for debugging
self._spatialPoolerOutput = None
# Spatial pooler's bottom-up input: hang on to this for supporting the
# spInputNonZeros parameter
self._spatialPoolerInput = None
#############################################################################
#
# Initialization code
#
#############################################################################
def _initializeEphemeralMembers(self):
"""
Initialize all ephemeral data members, and give the derived class the
opportunity to do the same by invoking the virtual member _initEphemerals(),
which is intended to be overridden.
NOTE: this is used by both __init__ and __setstate__ code paths.
"""
for attrName in self._getEphemeralMembersBase():
if attrName != "_loaded":
if hasattr(self, attrName):
if self._loaded:
# print self.__class__.__name__, "contains base class member '%s' " \
# "after loading." % attrName
# TODO: Re-enable warning or turn into error in a future release.
pass
else:
print self.__class__.__name__, "contains base class member '%s'" % \
attrName
if not self._loaded:
for attrName in self._getEphemeralMembersBase():
if attrName != "_loaded":
# if hasattr(self, attrName):
# import pdb; pdb.set_trace()
assert not hasattr(self, attrName)
else:
assert hasattr(self, attrName)
# Profiling information
self._profileObj = None
self._iterations = 0
# Let derived class initialize ephemerals
self._initEphemerals()
self._checkEphemeralMembers()
def initialize(self):
"""
Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.initialize`.
"""
# Zero out the spatial output in case it is requested
self._spatialPoolerOutput = numpy.zeros(self.columnCount,
dtype=GetNTAReal())
# Zero out the rfInput in case it is requested
self._spatialPoolerInput = numpy.zeros((1, self.inputWidth),
dtype=GetNTAReal())
# Allocate the spatial pooler
self._allocateSpatialFDR(None)
def _allocateSpatialFDR(self, rfInput):
"""Allocate the spatial pooler instance."""
if self._sfdr:
return
# Retrieve the necessary extra arguments that were handled automatically
autoArgs = dict((name, getattr(self, name))
for name in self._spatialArgNames)
# Instantiate the spatial pooler class.
if ( (self.SpatialClass == CPPSpatialPooler) or
(self.SpatialClass == PYSpatialPooler) ):
autoArgs['columnDimensions'] = [self.columnCount]
autoArgs['inputDimensions'] = [self.inputWidth]
autoArgs['potentialRadius'] = self.inputWidth
self._sfdr = self.SpatialClass(
**autoArgs
)
#############################################################################
#
# Core compute methods: learning, inference, and prediction
#
#############################################################################
def compute(self, inputs, outputs):
"""
Run one iteration, profiling it if requested.
:param inputs: (dict) mapping region input names to numpy.array values
:param outputs: (dict) mapping region output names to numpy.arrays that
should be populated with output values by this method
"""
# Uncomment this to find out who is generating divide by 0, or other numpy warnings
# numpy.seterr(divide='raise', invalid='raise', over='raise')
# Modify this line to turn on profiling for a given node. The results file
# ('hotshot.stats') will be sensed and printed out by the vision framework's
# RunInference.py script at the end of inference.
# Also uncomment the hotshot import at the top of this file.
if False and self.learningMode \
and self._iterations > 0 and self._iterations <= 10:
import hotshot
if self._iterations == 10:
print "\n Collecting and sorting internal node profiling stats generated by hotshot..."
stats = hotshot.stats.load("hotshot.stats")
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats()
# The guts of the compute are contained in the _compute() call so that we
# can profile it if requested.
if self._profileObj is None:
print "\n Preparing to capture profile using hotshot..."
if os.path.exists('hotshot.stats'):
# There is an old hotshot stats profile left over, remove it.
os.remove('hotshot.stats')
self._profileObj = hotshot.Profile("hotshot.stats", 1, 1)
# filename, lineevents, linetimings
self._profileObj.runcall(self._compute, *[inputs, outputs])
else:
self._compute(inputs, outputs)
def _compute(self, inputs, outputs):
"""
Run one iteration of SPRegion's compute
"""
#if self.topDownMode and (not 'topDownIn' in inputs):
# raise RuntimeError("The input topDownIn must be linked in if "
# "topDownMode is True")
if self._sfdr is None:
raise RuntimeError("Spatial pooler has not been initialized")
if not self.topDownMode:
#
# BOTTOM-UP compute
#
self._iterations += 1
# Get our inputs into numpy arrays
buInputVector = inputs['bottomUpIn']
resetSignal = False
if 'resetIn' in inputs:
assert len(inputs['resetIn']) == 1
resetSignal = inputs['resetIn'][0] != 0
# Perform inference and/or learning
rfOutput = self._doBottomUpCompute(
rfInput = buInputVector.reshape((1,buInputVector.size)),
resetSignal = resetSignal
)
outputs['bottomUpOut'][:] = rfOutput.flat
else:
#
# TOP-DOWN inference
#
topDownIn = inputs.get('topDownIn',None)
spatialTopDownOut, temporalTopDownOut = self._doTopDownInfer(topDownIn)
outputs['spatialTopDownOut'][:] = spatialTopDownOut
if temporalTopDownOut is not None:
outputs['temporalTopDownOut'][:] = temporalTopDownOut
# OBSOLETE
outputs['anomalyScore'][:] = 0
# Write the bottom up out to our node outputs only if we are doing inference
#print "SPRegion input: ", buInputVector.nonzero()[0]
#print "SPRegion output: ", rfOutput.nonzero()[0]
def _doBottomUpCompute(self, rfInput, resetSignal):
"""
Do one iteration of inference and/or learning and return the result
Parameters:
--------------------------------------------
rfInput: Input vector. Shape is: (1, inputVectorLen).
resetSignal: True if reset is asserted
"""
# Conditional compute break
self._conditionalBreak()
# Save the rfInput for the spInputNonZeros parameter
self._spatialPoolerInput = rfInput.reshape(-1)
assert(rfInput.shape[0] == 1)
# Run inference using the spatial pooler. We learn on the coincidences only
# if we are in learning mode and trainingStep is set appropriately.
# Run SFDR bottom-up compute and cache output in self._spatialPoolerOutput
inputVector = numpy.array(rfInput[0]).astype('uint32')
outputVector = numpy.zeros(self._sfdr.getNumColumns()).astype('uint32')
self._sfdr.compute(inputVector, self.learningMode, outputVector)
self._spatialPoolerOutput[:] = outputVector[:]
# Direct logging of SP outputs if requested
if self._fpLogSP:
output = self._spatialPoolerOutput.reshape(-1)
outputNZ = output.nonzero()[0]
outStr = " ".join(["%d" % int(token) for token in outputNZ])
print >>self._fpLogSP, output.size, outStr
# Direct logging of SP inputs
if self._fpLogSPInput:
output = rfInput.reshape(-1)
outputNZ = output.nonzero()[0]
outStr = " ".join(["%d" % int(token) for token in outputNZ])
print >>self._fpLogSPInput, output.size, outStr
return self._spatialPoolerOutput
def _doTopDownInfer(self, topDownInput = None):
"""
Do one iteration of top-down inference.
Parameters:
--------------------------------------------
tdInput: Top-down input
retval: (spatialTopDownOut, temporalTopDownOut)
spatialTopDownOut is the top down output computed only from the SP,
using it's current bottom-up output.
temporalTopDownOut is the top down output computed from the topDown in
of the level above us.
"""
return None, None
#############################################################################
#
# Region API support methods: getSpec, getParameter, and setParameter
#
#############################################################################
@classmethod
def getBaseSpec(cls):
"""
Doesn't include the spatial, temporal and other parameters
:returns: (dict) The base Spec for SPRegion.
"""
spec = dict(
description=SPRegion.__doc__,
singleNodeOnly=True,
inputs=dict(
bottomUpIn=dict(
description="""The input vector.""",
dataType='Real32',
count=0,
required=True,
regionLevel=False,
isDefaultInput=True,
requireSplitterMap=False),
resetIn=dict(
description="""A boolean flag that indicates whether
or not the input vector received in this compute cycle
represents the start of a new temporal sequence.""",
dataType='Real32',
count=1,
required=False,
regionLevel=True,
isDefaultInput=False,
requireSplitterMap=False),
topDownIn=dict(
description="""The top-down input signal, generated from
feedback from upper levels""",
dataType='Real32',
count=0,
required = False,
regionLevel=True,
isDefaultInput=False,
requireSplitterMap=False),
sequenceIdIn=dict(
description="Sequence ID",
dataType='UInt64',
count=1,
required=False,
regionLevel=True,
isDefaultInput=False,
requireSplitterMap=False),
),
outputs=dict(
bottomUpOut=dict(
description="""The output signal generated from the bottom-up inputs
from lower levels.""",
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=True),
topDownOut=dict(
description="""The top-down output signal, generated from
feedback from upper levels""",
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=False),
spatialTopDownOut = dict(
description="""The top-down output, generated only from the current
SP output. This can be used to evaluate how well the
SP is representing the inputs independent of the TM.""",
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=False),
temporalTopDownOut = dict(
description="""The top-down output, generated only from the current
TM output feedback down through the SP.""",
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=False),
anomalyScore = dict(
description="""The score for how 'anomalous' (i.e. rare) this spatial
input pattern is. Higher values are increasingly rare""",
dataType='Real32',
count=1,
regionLevel=True,
isDefaultOutput=False),
),
parameters=dict(
breakPdb=dict(
description='Set to 1 to stop in the pdb debugger on the next compute',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=0,
accessMode='ReadWrite'),
breakKomodo=dict(
description='Set to 1 to stop in the Komodo debugger on the next compute',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=0,
accessMode='ReadWrite'),
),
)
return spec
@classmethod
def getSpec(cls):
"""
Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getSpec`.
The parameters collection is constructed based on the parameters specified
by the various components (spatialSpec, temporalSpec and otherSpec)
"""
spec = cls.getBaseSpec()
s, o = _getAdditionalSpecs(spatialImp=getDefaultSPImp())
spec['parameters'].update(s)
spec['parameters'].update(o)
return spec
def getAlgorithmInstance(self):
"""
:returns: (:class:`~nupic.algorithms.spatial_pooler.SpatialPooler`) instance
of the underlying algorithm object.
"""
return self._sfdr
def getParameter(self, parameterName, index=-1):
"""
Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getParameter`.
Most parameters are handled automatically by PyRegion's parameter get
mechanism. The ones that need special treatment are explicitly handled here.
"""
if parameterName == 'activeOutputCount':
return self.columnCount
elif parameterName == 'spatialPoolerInput':
return list(self._spatialPoolerInput.reshape(-1))
elif parameterName == 'spatialPoolerOutput':
return list(self._spatialPoolerOutput)
elif parameterName == 'spNumActiveOutputs':
return len(self._spatialPoolerOutput.nonzero()[0])
elif parameterName == 'spOutputNonZeros':
return [len(self._spatialPoolerOutput)] + \
list(self._spatialPoolerOutput.nonzero()[0])
elif parameterName == 'spInputNonZeros':
import pdb; pdb.set_trace()
return [len(self._spatialPoolerInput)] + \
list(self._spatialPoolerInput.nonzero()[0])
elif parameterName == 'spLearningStatsStr':
try:
return str(self._sfdr.getLearningStats())
except:
return str(dict())
else:
return PyRegion.getParameter(self, parameterName, index)
def setParameter(self, parameterName, index, parameterValue):
"""
Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.setParameter`.
Set the value of a Spec parameter. Most parameters are handled
automatically by PyRegion's parameter set mechanism. The ones that need
special treatment are explicitly handled here.
"""
if parameterName in self._spatialArgNames:
setattr(self._sfdr, parameterName, parameterValue)
elif parameterName == "logPathInput":
self.logPathInput = parameterValue
# Close any existing log file
if self._fpLogSPInput:
self._fpLogSPInput.close()
self._fpLogSPInput = None
# Open a new log file
if parameterValue:
self._fpLogSPInput = open(self.logPathInput, 'w')
elif parameterName == "logPathOutput":
self.logPathOutput = parameterValue
# Close any existing log file
if self._fpLogSP:
self._fpLogSP.close()
self._fpLogSP = None
# Open a new log file
if parameterValue:
self._fpLogSP = open(self.logPathOutput, 'w')
elif parameterName == "logPathOutputDense":
self.logPathOutputDense = parameterValue
# Close any existing log file
if self._fpLogSPDense:
self._fpLogSPDense.close()
self._fpLogSPDense = None
# Open a new log file
if parameterValue:
self._fpLogSPDense = open(self.logPathOutputDense, 'w')
elif hasattr(self, parameterName):
setattr(self, parameterName, parameterValue)
else:
raise Exception('Unknown parameter: ' + parameterName)
#############################################################################
#
# Methods to support serialization
#
#############################################################################
@staticmethod
def getSchema():
"""
Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getSchema`.
"""
return SPRegionProto
def writeToProto(self, proto):
"""
Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.writeToProto`.
Write state to proto object.
:param proto: SPRegionProto capnproto object
"""
proto.spatialImp = self.spatialImp
proto.columnCount = self.columnCount
proto.inputWidth = self.inputWidth
proto.learningMode = 1 if self.learningMode else 0
proto.inferenceMode = 1 if self.inferenceMode else 0
proto.anomalyMode = 1 if self.anomalyMode else 0
proto.topDownMode = 1 if self.topDownMode else 0
self._sfdr.write(proto.spatialPooler)
@classmethod
def readFromProto(cls, proto):
"""
Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.readFromProto`.
Read state from proto object.
:param proto: SPRegionProto capnproto object
"""
instance = cls(proto.columnCount, proto.inputWidth)
instance.spatialImp = proto.spatialImp
instance.learningMode = proto.learningMode
instance.inferenceMode = proto.inferenceMode
instance.anomalyMode = proto.anomalyMode
instance.topDownMode = proto.topDownMode
spatialImp = proto.spatialImp
instance._sfdr = getSPClass(spatialImp).read(proto.spatialPooler)
return instance
def __getstate__(self):
"""
Return serializable state. This function will return a version of the
__dict__ with all "ephemeral" members stripped out. "Ephemeral" members
are defined as those that do not need to be (nor should be) stored
in any kind of persistent file (e.g., NuPIC network XML file.)
"""
state = self.__dict__.copy()
# We only want to serialize a single spatial/temporal FDR if they're cloned
for ephemeralMemberName in self._getEphemeralMembersAll():
state.pop(ephemeralMemberName, None)
return state
def __setstate__(self, state):
"""
Set the state of ourself from a serialized state.
"""
self.__dict__.update(state)
self._loaded = True
# Backwards compatibility
if not hasattr(self, "SpatialClass"):
self.SpatialClass = self._sfdr.__class__
# Initialize all non-persistent base members, as well as give
# derived class an opportunity to do the same.
self._initializeEphemeralMembers()
self._allocateSpatialFDR(None)
def _initEphemerals(self):
"""
Initialize all ephemerals used by derived classes.
"""
if hasattr(self, '_sfdr') and self._sfdr:
self._spatialPoolerOutput = numpy.zeros(self.columnCount,
dtype=GetNTAReal())
else:
self._spatialPoolerOutput = None # Will be filled in initInNetwork
# Direct logging support (faster than node watch)
self._fpLogSPInput = None
self._fpLogSP = None
self._fpLogSPDense = None
self.logPathInput = ""
self.logPathOutput = ""
self.logPathOutputDense = ""
def _getEphemeralMembers(self):
"""
Callback that returns a list of all "ephemeral" members (i.e., data members
that should not and/or cannot be pickled.)
"""
return ['_spatialPoolerOutput', '_fpLogSP', '_fpLogSPDense',
'logPathInput', 'logPathOutput', 'logPathOutputDense'
]
def _getEphemeralMembersBase(self):
"""
Returns list of all ephemeral members.
"""
return [
'_loaded',
'_profileObj',
'_iterations',
]
def _getEphemeralMembersAll(self):
"""
Returns a concatenated list of both the standard base class
ephemeral members, as well as any additional ephemeral members
(e.g., file handles, etc.).
"""
return self._getEphemeralMembersBase() + self._getEphemeralMembers()
def _checkEphemeralMembers(self):
for attrName in self._getEphemeralMembersBase():
if not hasattr(self, attrName):
print "Missing base class member:", attrName
for attrName in self._getEphemeralMembers():
if not hasattr(self, attrName):
print "Missing derived class member:", attrName
for attrName in self._getEphemeralMembersBase():
assert hasattr(self, attrName)
for attrName in self._getEphemeralMembers():
assert hasattr(self, attrName), "Node missing attr '%s'." % attrName
#############################################################################
#
# Misc. code
#
#############################################################################
def _conditionalBreak(self):
if self.breakKomodo:
import dbgp.client; dbgp.client.brk()
if self.breakPdb:
import pdb; pdb.set_trace()
#############################################################################
#
# NuPIC 2 Support
# These methods are required by NuPIC 2
#
#############################################################################
def getOutputElementCount(self, name):
if name == 'bottomUpOut':
return self.columnCount
elif name == 'spatialTopDownOut' or name == 'temporalTopDownOut' or \
name == 'topDownOut':
return self.inputWidth
else:
raise Exception("Invalid output name specified")
# TODO: as a temporary hack, getParameterArrayCount checks to see if there's a
# variable, private or not, with that name. If so, it attempts to return the
# length of that variable.
def getParameterArrayCount(self, name, index):
"""
Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getParameterArrayCount`.
TODO: as a temporary hack, getParameterArrayCount checks to see if there's a
variable, private or not, with that name. If so, it returns the value of the
variable.
"""
p = self.getParameter(name)
if (not hasattr(p, '__len__')):
raise Exception("Attempt to access parameter '%s' as an array but it is not an array" % name)
return len(p)
def getParameterArray(self, name, index, a):
"""
Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getParameterArray`.
TODO: as a temporary hack, getParameterArray checks to see if there's a
variable, private or not, with that name. If so, it returns the value of the
variable.
"""
p = self.getParameter(name)
if (not hasattr(p, '__len__')):
raise Exception("Attempt to access parameter '%s' as an array but it is not an array" % name)
if len(p) > 0:
a[:] = p[:]
| 34,043 | Python | .py | 859 | 32.795111 | 99 | 0.645253 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,902 | knn_anomaly_classifier_region.py | numenta_nupic-legacy/src/nupic/regions/knn_anomaly_classifier_region.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
## @file
This file defines the k Nearest Neighbor classifier region.
"""
import copy
import numpy
from nupic.algorithms import anomaly
from nupic.bindings.regions.PyRegion import PyRegion
from knn_classifier_region import KNNClassifierRegion
from nupic.bindings.math import Random
from nupic.frameworks.opf.exceptions import (HTMPredictionModelInvalidRangeError,
HTMPredictionModelInvalidArgument)
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.regions.knn_anomaly_classifier_region_capnp import \
KNNAnomalyClassifierRegionProto
class KNNAnomalyClassifierRegion(PyRegion):
"""
Wraps the :class:`~nupic.regions.knn_classifier_region.KNNClassifierRegion` to
classify :class:`~nupic.frameworks.opf.htm_prediction_model.HTMPredictionModel`
state. It allows for individual records to be classified as anomalies and
supports anomaly detection even after the model has learned the anomalous
sequence.
**Methods:**
* :meth:`~nupic.regions.knn_anomaly_classifier_region.KNNAnomalyClassifierRegion.compute`
- called by
:class:`~nupic.frameworks.opf.htm_prediction_model.HTMPredictionModel`
during record processing
* :meth:`~nupic.regions.knn_anomaly_classifier_region.KNNAnomalyClassifierRegion.getLabels`
- return points with classification records
* :meth:`~nupic.regions.knn_anomaly_classifier_region.KNNAnomalyClassifierRegion.addLabel`
- add a set label to a given set of points
* :meth:`~nupic.regions.knn_anomaly_classifier_region.KNNAnomalyClassifierRegion.removeLabels`
- remove labels from a given set of points
:param trainRecords: (int) number of records to skip before classification.
:param anomalyThreshold: (float) threshold on anomaly score to automatically
classify record as an anomaly
:param cacheSize: (int) number of records to keep in cache. Can only
recalculate records kept in cache when setting the ``trainRecords``.
:param classificationVectorType: (int) default=1
:param activeColumnCount: (int) default=40,
:param classificationMaxDist: (float) default=0.30
"""
@classmethod
def getSpec(cls):
"""
Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getSpec`.
"""
ns = dict(
description=KNNAnomalyClassifierRegion.__doc__,
singleNodeOnly=True,
inputs=dict(
spBottomUpOut=dict(
description="""The output signal generated from the bottom-up inputs
from lower levels.""",
dataType='Real32',
count=0,
required=True,
regionLevel=False,
isDefaultInput=True,
requireSplitterMap=False),
tpTopDownOut=dict(
description="""The top-down inputsignal, generated from
feedback from upper levels""",
dataType='Real32',
count=0,
required=True,
regionLevel=False,
isDefaultInput=True,
requireSplitterMap=False),
tpLrnActiveStateT=dict(
description="""Active cells in the learn state at time T from TM.
This is used to classify on.""",
dataType='Real32',
count=0,
required=True,
regionLevel=False,
isDefaultInput=True,
requireSplitterMap=False),
sequenceIdIn=dict(
description="Sequence ID",
dataType='UInt64',
count=1,
required=False,
regionLevel=True,
isDefaultInput=False,
requireSplitterMap=False),
),
outputs=dict(
),
parameters=dict(
trainRecords=dict(
description='Number of records to wait for training',
dataType='UInt32',
count=1,
constraints='',
defaultValue=0,
accessMode='Create'),
anomalyThreshold=dict(
description='Threshold used to classify anomalies.',
dataType='Real32',
count=1,
constraints='',
defaultValue=0,
accessMode='Create'),
cacheSize=dict(
description='Number of records to store in cache.',
dataType='UInt32',
count=1,
constraints='',
defaultValue=0,
accessMode='Create'),
classificationVectorType=dict(
description="""Vector type to use when classifying.
1 - Vector Column with Difference (TM and SP)
""",
dataType='UInt32',
count=1,
constraints='',
defaultValue=1,
accessMode='ReadWrite'),
activeColumnCount=dict(
description="""Number of active columns in a given step. Typically
equivalent to SP.numActiveColumnsPerInhArea""",
dataType='UInt32',
count=1,
constraints='',
defaultValue=40,
accessMode='ReadWrite'),
classificationMaxDist=dict(
description="""Maximum distance a sample can be from an anomaly
in the classifier to be labeled as an anomaly.
Ex: With rawOverlap distance, a value of 0.65 means that the points
must be at most a distance 0.65 apart from each other. This
translates to they must be at least 35% similar.""",
dataType='Real32',
count=1,
constraints='',
defaultValue=0.65,
accessMode='Create'
)
),
commands=dict(
getLabels=dict(description=
"Returns a list of label dicts with properties ROWID and labels."
"ROWID corresponds to the records id and labels is a list of "
"strings representing the records labels. Takes additional "
"integer properties start and end representing the range that "
"will be returned."),
addLabel=dict(description=
"Takes parameters start, end and labelName. Adds the label "
"labelName to the records from start to end. This will recalculate "
"labels from end to the most recent record."),
removeLabels=dict(description=
"Takes additional parameters start, end, labelFilter. Start and "
"end correspond to range to remove the label. Remove labels from "
"each record with record ROWID in range from start to end, "
"noninclusive of end. Removes all records if labelFilter is None, "
"otherwise only removes the labels eqaul to labelFilter.")
)
)
ns['parameters'].update(KNNClassifierRegion.getSpec()['parameters'])
return ns
__VERSION__ = 1
AUTO_THRESHOLD_CLASSIFIED_LABEL = "Auto Threshold Classification"
AUTO_TAG = " (auto)"
def __init__(self,
trainRecords,
anomalyThreshold,
cacheSize,
classificationVectorType=1,
activeColumnCount=40,
classificationMaxDist=0.30,
**classifierArgs):
# Internal Region Values
self._maxLabelOutputs = 16
self._activeColumnCount = activeColumnCount
self._prevPredictedColumns = numpy.array([])
self._anomalyVectorLength = None
self._classificationMaxDist = classificationMaxDist
self._iteration = 0
# Set to create deterministic classifier
classifierArgs['SVDDimCount'] = None
# Parameters
self.trainRecords = trainRecords
self.anomalyThreshold = anomalyThreshold
self.cacheSize = cacheSize
self.classificationVectorType = classificationVectorType
self._knnclassifierArgs = classifierArgs
self._knnclassifier = KNNClassifierRegion(**self._knnclassifierArgs)
self.labelResults = []
self.saved_categories = []
self._recordsCache = []
self._version = KNNAnomalyClassifierRegion.__VERSION__
def initialize(self):
pass
def getParameter(self, name, index=-1):
"""
Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getParameter`.
"""
if name == "trainRecords":
return self.trainRecords
elif name == "anomalyThreshold":
return self.anomalyThreshold
elif name == "activeColumnCount":
return self._activeColumnCount
elif name == "classificationMaxDist":
return self._classificationMaxDist
else:
# If any spec parameter name is the same as an attribute, this call
# will get it automatically, e.g. self.learningMode
return PyRegion.getParameter(self, name, index)
def setParameter(self, name, index, value):
"""
Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.setParameter`.
"""
if name == "trainRecords":
# Ensure that the trainRecords can only be set to minimum of the ROWID in
# the saved states
if not (isinstance(value, float) or isinstance(value, int)):
raise HTMPredictionModelInvalidArgument("Invalid argument type \'%s\'. threshold "
"must be a number." % (type(value)))
if len(self._recordsCache) > 0 and value < self._recordsCache[0].ROWID:
raise HTMPredictionModelInvalidArgument("Invalid value. autoDetectWaitRecord "
"value must be valid record within output stream. Current minimum "
" ROWID in output stream is %d." % (self._recordsCache[0].ROWID))
self.trainRecords = value
# Remove any labels before the first cached record (wont be used anymore)
self._deleteRangeFromKNN(0, self._recordsCache[0].ROWID)
# Reclassify all states
self._classifyStates()
elif name == "anomalyThreshold":
if not (isinstance(value, float) or isinstance(value, int)):
raise HTMPredictionModelInvalidArgument("Invalid argument type \'%s\'. threshold "
"must be a number." % (type(value)))
self.anomalyThreshold = value
self._classifyStates()
elif name == "classificationMaxDist":
if not (isinstance(value, float) or isinstance(value, int)):
raise HTMPredictionModelInvalidArgument("Invalid argument type \'%s\'. "
"classificationMaxDist must be a number." % (type(value)))
self._classificationMaxDist = value
self._classifyStates()
elif name == "activeColumnCount":
self._activeColumnCount = value
else:
return PyRegion.setParameter(self, name, index, value)
def compute(self, inputs, outputs):
"""
Process one input sample.
This method is called by the runtime engine.
"""
record = self._constructClassificationRecord(inputs)
#Classify this point after waiting the classification delay
if record.ROWID >= self.getParameter('trainRecords'):
self._classifyState(record)
#Save new classification record and keep history as moving window
self._recordsCache.append(record)
while len(self._recordsCache) > self.cacheSize:
self._recordsCache.pop(0)
self.labelResults = record.anomalyLabel
self._iteration += 1
def getLabelResults(self):
"""
Get the labels of the previously computed record.
:returns: (list) of strings representing the classification labels
"""
return self.labelResults
def _classifyStates(self):
"""
Reclassifies all internal state
"""
for state in self._recordsCache:
self._classifyState(state)
def _classifyState(self, state):
"""
Reclassifies given state.
"""
# Record is before wait period do not classifiy
if state.ROWID < self.getParameter('trainRecords'):
if not state.setByUser:
state.anomalyLabel = []
self._deleteRecordsFromKNN([state])
return
label = KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL
autoLabel = label + KNNAnomalyClassifierRegion.AUTO_TAG
# Update the label based on classifications
newCategory = self._recomputeRecordFromKNN(state)
labelList = self._categoryToLabelList(newCategory)
if state.setByUser:
if label in state.anomalyLabel:
state.anomalyLabel.remove(label)
if autoLabel in state.anomalyLabel:
state.anomalyLabel.remove(autoLabel)
labelList.extend(state.anomalyLabel)
# Add threshold classification label if above threshold, else if
# classified to add the auto threshold classification.
if state.anomalyScore >= self.getParameter('anomalyThreshold'):
labelList.append(label)
elif label in labelList:
ind = labelList.index(label)
labelList[ind] = autoLabel
# Make all entries unique
labelList = list(set(labelList))
# If both above threshold and auto classified above - remove auto label
if label in labelList and autoLabel in labelList:
labelList.remove(autoLabel)
if state.anomalyLabel == labelList:
return
# Update state's labeling
state.anomalyLabel = labelList
# Update KNN Classifier with new labeling
if state.anomalyLabel == []:
self._deleteRecordsFromKNN([state])
else:
self._addRecordToKNN(state)
def _constructClassificationRecord(self, inputs):
"""
Construct a _HTMClassificationRecord based on the state of the model
passed in through the inputs.
Types for self.classificationVectorType:
1 - TM active cells in learn state
2 - SP columns concatenated with error from TM column predictions and SP
"""
# Count the number of unpredicted columns
allSPColumns = inputs["spBottomUpOut"]
activeSPColumns = allSPColumns.nonzero()[0]
score = anomaly.computeRawAnomalyScore(activeSPColumns,
self._prevPredictedColumns)
spSize = len(allSPColumns)
allTPCells = inputs['tpTopDownOut']
tpSize = len(inputs['tpLrnActiveStateT'])
classificationVector = numpy.array([])
if self.classificationVectorType == 1:
# Classification Vector: [---TM Cells---]
classificationVector = numpy.zeros(tpSize)
activeCellMatrix = inputs["tpLrnActiveStateT"].reshape(tpSize, 1)
activeCellIdx = numpy.where(activeCellMatrix > 0)[0]
if activeCellIdx.shape[0] > 0:
classificationVector[numpy.array(activeCellIdx, dtype=numpy.uint16)] = 1
elif self.classificationVectorType == 2:
# Classification Vecotr: [---SP---|---(TM-SP)----]
classificationVector = numpy.zeros(spSize+spSize)
if activeSPColumns.shape[0] > 0:
classificationVector[activeSPColumns] = 1.0
errorColumns = numpy.setdiff1d(self._prevPredictedColumns,
activeSPColumns)
if errorColumns.shape[0] > 0:
errorColumnIndexes = ( numpy.array(errorColumns, dtype=numpy.uint16) +
spSize )
classificationVector[errorColumnIndexes] = 1.0
else:
raise TypeError("Classification vector type must be either 'tpc' or"
" 'sp_tpe', current value is %s" % (self.classificationVectorType))
# Store the state for next time step
numPredictedCols = len(self._prevPredictedColumns)
predictedColumns = allTPCells.nonzero()[0]
self._prevPredictedColumns = copy.deepcopy(predictedColumns)
if self._anomalyVectorLength is None:
self._anomalyVectorLength = len(classificationVector)
result = _CLAClassificationRecord(
ROWID=self._iteration, #__numRunCalls called
#at beginning of model.run
anomalyScore=score,
anomalyVector=classificationVector.nonzero()[0].tolist(),
anomalyLabel=[]
)
return result
def _addRecordToKNN(self, record):
"""
Adds the record to the KNN classifier.
"""
knn = self._knnclassifier._knn
prototype_idx = self._knnclassifier.getParameter('categoryRecencyList')
category = self._labelListToCategoryNumber(record.anomalyLabel)
# If record is already in the classifier, overwrite its labeling
if record.ROWID in prototype_idx:
knn.prototypeSetCategory(record.ROWID, category)
return
# Learn this pattern in the knn
pattern = self._getStateAnomalyVector(record)
rowID = record.ROWID
knn.learn(pattern, category, rowID=rowID)
def _deleteRecordsFromKNN(self, recordsToDelete):
"""
Removes the given records from the classifier.
parameters
------------
recordsToDelete - list of records to delete from the classififier
"""
prototype_idx = self._knnclassifier.getParameter('categoryRecencyList')
idsToDelete = ([r.ROWID for r in recordsToDelete if
not r.setByUser and r.ROWID in prototype_idx])
nProtos = self._knnclassifier._knn._numPatterns
self._knnclassifier._knn.removeIds(idsToDelete)
assert self._knnclassifier._knn._numPatterns == nProtos - len(idsToDelete)
def _deleteRangeFromKNN(self, start=0, end=None):
"""
Removes any stored records within the range from start to
end. Noninclusive of end.
parameters
------------
start - integer representing the ROWID of the start of the deletion range,
end - integer representing the ROWID of the end of the deletion range,
if None, it will default to end.
"""
prototype_idx = numpy.array(
self._knnclassifier.getParameter('categoryRecencyList'))
if end is None:
end = prototype_idx.max() + 1
idsIdxToDelete = numpy.logical_and(prototype_idx >= start,
prototype_idx < end)
idsToDelete = prototype_idx[idsIdxToDelete]
nProtos = self._knnclassifier._knn._numPatterns
self._knnclassifier._knn.removeIds(idsToDelete.tolist())
assert self._knnclassifier._knn._numPatterns == nProtos - len(idsToDelete)
def _recomputeRecordFromKNN(self, record):
"""
returns the classified labeling of record
"""
inputs = {
"categoryIn": [None],
"bottomUpIn": self._getStateAnomalyVector(record),
}
outputs = {"categoriesOut": numpy.zeros((1,)),
"bestPrototypeIndices":numpy.zeros((1,)),
"categoryProbabilitiesOut":numpy.zeros((1,))}
# Only use points before record to classify and after the wait period.
classifier_indexes = numpy.array(
self._knnclassifier.getParameter('categoryRecencyList'))
valid_idx = numpy.where(
(classifier_indexes >= self.getParameter('trainRecords')) &
(classifier_indexes < record.ROWID)
)[0].tolist()
if len(valid_idx) == 0:
return None
self._knnclassifier.setParameter('inferenceMode', None, True)
self._knnclassifier.setParameter('learningMode', None, False)
self._knnclassifier.compute(inputs, outputs)
self._knnclassifier.setParameter('learningMode', None, True)
classifier_distances = self._knnclassifier.getLatestDistances()
valid_distances = classifier_distances[valid_idx]
if valid_distances.min() <= self._classificationMaxDist:
classifier_indexes_prev = classifier_indexes[valid_idx]
rowID = classifier_indexes_prev[valid_distances.argmin()]
indexID = numpy.where(classifier_indexes == rowID)[0][0]
category = self._knnclassifier.getCategoryList()[indexID]
return category
return None
def _labelToCategoryNumber(self, label):
"""
Since the KNN Classifier stores categories as numbers, we must store each
label as a number. This method converts from a label to a unique number.
Each label is assigned a unique bit so multiple labels may be assigned to
a single record.
"""
if label not in self.saved_categories:
self.saved_categories.append(label)
return pow(2, self.saved_categories.index(label))
def _labelListToCategoryNumber(self, labelList):
"""
This method takes a list of labels and returns a unique category number.
This enables this class to store a list of categories for each point since
the KNN classifier only stores a single number category for each record.
"""
categoryNumber = 0
for label in labelList:
categoryNumber += self._labelToCategoryNumber(label)
return categoryNumber
def _categoryToLabelList(self, category):
"""
Converts a category number into a list of labels
"""
if category is None:
return []
labelList = []
labelNum = 0
while category > 0:
if category % 2 == 1:
labelList.append(self.saved_categories[labelNum])
labelNum += 1
category = category >> 1
return labelList
def _getStateAnomalyVector(self, state):
"""
Returns a state's anomaly vertor converting it from spare to dense
"""
vector = numpy.zeros(self._anomalyVectorLength)
vector[state.anomalyVector] = 1
return vector
def getLabels(self, start=None, end=None):
"""
Get the labels on classified points within range start to end. Not inclusive
of end.
:returns: (dict) with format:
::
{
'isProcessing': boolean,
'recordLabels': list of results
}
``isProcessing`` - currently always false as recalculation blocks; used if
reprocessing of records is still being performed;
Each item in ``recordLabels`` is of format:
::
{
'ROWID': id of the row,
'labels': list of strings
}
"""
if len(self._recordsCache) == 0:
return {
'isProcessing': False,
'recordLabels': []
}
try:
start = int(start)
except Exception:
start = 0
try:
end = int(end)
except Exception:
end = self._recordsCache[-1].ROWID
if end <= start:
raise HTMPredictionModelInvalidRangeError("Invalid supplied range for 'getLabels'.",
debugInfo={
'requestRange': {
'startRecordID': start,
'endRecordID': end
},
'numRecordsStored': len(self._recordsCache)
})
results = {
'isProcessing': False,
'recordLabels': []
}
ROWIDX = numpy.array(
self._knnclassifier.getParameter('categoryRecencyList'))
validIdx = numpy.where((ROWIDX >= start) & (ROWIDX < end))[0].tolist()
categories = self._knnclassifier.getCategoryList()
for idx in validIdx:
row = dict(
ROWID=int(ROWIDX[idx]),
labels=self._categoryToLabelList(categories[idx]))
results['recordLabels'].append(row)
return results
def addLabel(self, start, end, labelName):
"""
Add the label labelName to each record with record ROWID in range from
``start`` to ``end``, noninclusive of end.
This will recalculate all points from end to the last record stored in the
internal cache of this classifier.
:param start: (int) start index
:param end: (int) end index (noninclusive)
:param labelName: (string) label name
"""
if len(self._recordsCache) == 0:
raise HTMPredictionModelInvalidRangeError("Invalid supplied range for 'addLabel'. "
"Model has no saved records.")
try:
start = int(start)
except Exception:
start = 0
try:
end = int(end)
except Exception:
end = int(self._recordsCache[-1].ROWID)
startID = self._recordsCache[0].ROWID
clippedStart = max(0, start - startID)
clippedEnd = max(0, min( len( self._recordsCache) , end - startID))
if clippedEnd <= clippedStart:
raise HTMPredictionModelInvalidRangeError("Invalid supplied range for 'addLabel'.",
debugInfo={
'requestRange': {
'startRecordID': start,
'endRecordID': end
},
'clippedRequestRange': {
'startRecordID': clippedStart,
'endRecordID': clippedEnd
},
'validRange': {
'startRecordID': startID,
'endRecordID': self._recordsCache[len(self._recordsCache)-1].ROWID
},
'numRecordsStored': len(self._recordsCache)
})
# Add label to range [clippedStart, clippedEnd)
for state in self._recordsCache[clippedStart:clippedEnd]:
if labelName not in state.anomalyLabel:
state.anomalyLabel.append(labelName)
state.setByUser = True
self._addRecordToKNN(state)
assert len(self.saved_categories) > 0
# Recompute [end, ...)
for state in self._recordsCache[clippedEnd:]:
self._classifyState(state)
def removeLabels(self, start=None, end=None, labelFilter=None):
"""
Remove labels from each record with record ROWID in range from
``start`` to ``end``, noninclusive of end. Removes all records if
``labelFilter`` is None, otherwise only removes the labels equal to
``labelFilter``.
This will recalculate all points from end to the last record stored in the
internal cache of this classifier.
:param start: (int) start index
:param end: (int) end index (noninclusive)
:param labelFilter: (string) label filter
"""
if len(self._recordsCache) == 0:
raise HTMPredictionModelInvalidRangeError("Invalid supplied range for "
"'removeLabels'. Model has no saved records.")
try:
start = int(start)
except Exception:
start = 0
try:
end = int(end)
except Exception:
end = self._recordsCache[-1].ROWID
startID = self._recordsCache[0].ROWID
clippedStart = 0 if start is None else max(0, start - startID)
clippedEnd = len(self._recordsCache) if end is None else \
max(0, min( len( self._recordsCache) , end - startID))
if clippedEnd <= clippedStart:
raise HTMPredictionModelInvalidRangeError("Invalid supplied range for "
"'removeLabels'.", debugInfo={
'requestRange': {
'startRecordID': start,
'endRecordID': end
},
'clippedRequestRange': {
'startRecordID': clippedStart,
'endRecordID': clippedEnd
},
'validRange': {
'startRecordID': startID,
'endRecordID': self._recordsCache[len(self._recordsCache)-1].ROWID
},
'numRecordsStored': len(self._recordsCache)
})
# Remove records within the cache
recordsToDelete = []
for state in self._recordsCache[clippedStart:clippedEnd]:
if labelFilter is not None:
if labelFilter in state.anomalyLabel:
state.anomalyLabel.remove(labelFilter)
else:
state.anomalyLabel = []
state.setByUser = False
recordsToDelete.append(state)
self._deleteRecordsFromKNN(recordsToDelete)
# Remove records not in cache
self._deleteRangeFromKNN(start, end)
# Recompute [clippedEnd, ...)
for state in self._recordsCache[clippedEnd:]:
self._classifyState(state)
#############################################################################
#
# Methods to support serialization
#
#############################################################################
def writeToProto(self, proto):
proto.version = self._version
proto.maxLabelOutputs = self._maxLabelOutputs
proto.activeColumnCount = self._activeColumnCount
if self._prevPredictedColumns is not None:
proto.prevPredictedColumns = self._prevPredictedColumns.tolist()
if self._anomalyVectorLength is not None:
proto.anomalyVectorLength = self._anomalyVectorLength
proto.classificationMaxDist = self._classificationMaxDist
proto.iteration = self._iteration
proto.trainRecords = self.trainRecords
proto.anomalyThreshold = self.anomalyThreshold
proto.cacheSize = self.cacheSize
proto.classificationVectorType = self.classificationVectorType
if self._knnclassifierArgs is not None:
knnParams = self._knnclassifierArgs.copy()
# Convert keys and types to be compatible with capnp
if "SVDSampleCount" in knnParams:
if knnParams["SVDSampleCount"] is not None:
knnParams["svdSampleCount"] = knnParams["SVDSampleCount"]
del knnParams["SVDSampleCount"]
if "SVDDimCount" in knnParams:
if knnParams["SVDDimCount"] is not None:
knnParams["svdDimCount"] = knnParams["SVDDimCount"]
del knnParams["SVDDimCount"]
if "outputProbabilitiesByDist" in knnParams:
knnParams["outputProbabilitiesByDist"] = bool(
knnParams["outputProbabilitiesByDist"])
if "doBinarization" in knnParams:
knnParams["doBinarization"] = bool(knnParams["doBinarization"])
if "useSparseMemory" in knnParams:
knnParams["useSparseMemory"] = bool(knnParams["useSparseMemory"])
if "relativeThreshold" in knnParams:
knnParams["relativeThreshold"] = bool(knnParams["relativeThreshold"])
if "doSphering" in knnParams:
knnParams["doSphering"] = bool(knnParams["doSphering"])
if "replaceDuplicates" in knnParams:
knnParams["replaceDuplicates"] = bool(knnParams["replaceDuplicates"])
proto.knnclassifierArgs = knnParams
if self._knnclassifier is not None:
self._knnclassifier.writeToProto(proto.knnclassifier)
if self.labelResults is not None:
proto.labelResults = self.labelResults
if self.saved_categories is not None:
proto.savedCategories = self.saved_categories
if self._recordsCache is not None:
recordsCache = proto.init("recordsCache", len(self._recordsCache))
for i, item in enumerate(self._recordsCache):
record = recordsCache[i]
record.rowid = int(item.ROWID)
record.anomalyScore = float(item.anomalyScore)
if item.anomalyVector is not None:
record.anomalyVector = item.anomalyVector
if item.anomalyLabel is not None:
record.anomalyLabel = item.anomalyLabel
record.setByUser = bool(item.setByUser)
@classmethod
def readFromProto(cls, proto):
if proto.version != KNNAnomalyClassifierRegion.__VERSION__:
raise RuntimeError("Invalid KNNAnomalyClassifierRegion Version")
instance = object.__new__(cls)
instance._version = proto.version
instance._maxLabelOutputs = proto.maxLabelOutputs
instance._activeColumnCount = proto.activeColumnCount
instance._prevPredictedColumns = numpy.array(proto.prevPredictedColumns)
instance._anomalyVectorLength = proto.anomalyVectorLength
instance._classificationMaxDist = proto.classificationMaxDist
instance._iteration = proto.iteration
instance.trainRecords = proto.trainRecords
instance.anomalyThreshold = proto.anomalyThreshold
instance.cacheSize = proto.cacheSize
instance.classificationVectorType = proto.classificationVectorType
instance._knnclassifierArgs = proto.knnclassifierArgs.to_dict()
# Convert keys
if "svdSampleCount" in instance._knnclassifierArgs:
SVDSampleCount = instance._knnclassifierArgs["svdSampleCount"]
del instance._knnclassifierArgs["svdSampleCount"]
instance._knnclassifierArgs["SVDSampleCount"] = SVDSampleCount
if "svdDimCount" in instance._knnclassifierArgs:
SVDDimCount = instance._knnclassifierArgs["svdDimCount"]
del instance._knnclassifierArgs["svdDimCount"]
instance._knnclassifierArgs["SVDDimCount"] = SVDDimCount
instance._knnclassifier = KNNClassifierRegion.readFromProto(
proto.knnclassifier)
instance.labelResults = list(proto.labelResults)
instance.saved_categories = list(proto.savedCategories)
instance._recordsCache = []
for item in proto.recordsCache:
instance._recordsCache.append(_CLAClassificationRecord(
ROWID=item.rowid,
anomalyScore=item.anomalyScore,
anomalyVector=list(item.anomalyVector),
anomalyLabel=list(item.anomalyLabel),
setByUser=item.setByUser
))
return instance
@staticmethod
def getSchema():
return KNNAnomalyClassifierRegionProto
def __getstate__(self):
"""
Return serializable state. This function will return a version of the
__dict__ with all "ephemeral" members stripped out. "Ephemeral" members
are defined as those that do not need to be (nor should be) stored
in any kind of persistent file (e.g., NuPIC network XML file.)
"""
state = self.__dict__.copy()
# Save knnclassifier properties
state['_knnclassifierProps'] = state['_knnclassifier'].__getstate__()
state.pop('_knnclassifier')
return state
def __setstate__(self, state):
"""
Set the state of ourself from a serialized state.
"""
if '_version' not in state or state['_version'] == 1:
knnclassifierProps = state.pop('_knnclassifierProps')
self.__dict__.update(state)
self._knnclassifier = KNNClassifierRegion(**self._knnclassifierArgs)
self._knnclassifier.__setstate__(knnclassifierProps)
self._version = KNNAnomalyClassifierRegion.__VERSION__
else:
raise Exception("Invalid KNNAnomalyClassifierRegion version. Current "
"version: %s" % (KNNAnomalyClassifierRegion.__VERSION__))
def diff(self, knnRegion2):
diff = []
toCheck = [((), self.__getstate__(), knnRegion2.__getstate__())]
while toCheck:
keys, a, b = toCheck.pop()
if type(a) != type(b):
diff.append((keys, a, b))
elif 'saved_categories' in keys:
cats1 = set(a)
cats2 = set(b)
if cats1 != cats2:
for k in cats1 - cats2:
diff.append((keys + (k,), a[k], None))
for k in cats1 - cats2:
diff.append((keys + (k,), None, b[k]))
elif '_recordsCache' in keys:
if len(a) != len(b):
diff.append((keys + ('len', ), len(a), len(b)))
for i, v in enumerate(a):
if not (a[i] == b[i]):
diff.append((keys + ('_' + str(i), ), a[i].__getstate__(),
b[i].__getstate__()))
elif isinstance(a, dict):
keys1 = set(a.keys())
keys2 = set(b.keys())
# If there are missing keys, add them to the diff.
if keys1 != keys2:
for k in keys1 - keys2:
diff.append((keys + (k,), [k], None))
for k in keys2 - keys1:
diff.append((keys + (k,), None, b[k]))
# For matching keys, add the values to the list of things to check.
for k in keys1.union(keys2):
toCheck.append((keys + (k,), a[k], b[k]))
elif (isinstance(a, numpy.ndarray) or isinstance(a, list) or
isinstance(a, tuple)):
if len(a) != len(b):
diff.append((keys + ('len', ), len(a), len(b)))
elif not numpy.array_equal(a, b):
diff.append((keys, a, b))
#for i in xrange(len(a)):
# toCheck.append((keys + (k, i), a[i], b[i]))
elif isinstance(a, Random):
for i, v in enumerate(a.get_state()):
toCheck.append((keys + (i,), v, b.get_state()[i]))
else:
try:
_ = a != b
except ValueError:
raise ValueError(type(a))
if a != b:
diff.append((keys, a, b))
return diff
#############################################################################
#
# NuPIC 2 Support
# These methods are required by NuPIC 2
#
#############################################################################
def getOutputElementCount(self, name):
"""
Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getOutputElementCount`.
"""
if name == 'labels':
return self._maxLabelOutputs
else:
raise Exception("Invalid output name specified")
class _CLAClassificationRecord(object):
"""
A single record to store data associated with a single prediction for the
anomaly classifier.
ROWID - prediction stream ROWID record number
setByUser - if true, a delete must be called explicitly on this point to
remove its label
"""
__slots__ = ["ROWID", "anomalyScore", "anomalyVector", "anomalyLabel",
"setByUser"]
def __init__(self, ROWID, anomalyScore, anomalyVector, anomalyLabel,
setByUser=False):
self.ROWID = ROWID
self.anomalyScore = anomalyScore
self.anomalyVector = anomalyVector
self.anomalyLabel = anomalyLabel
self.setByUser = setByUser
def __getstate__(self):
obj_slot_values = dict((k, getattr(self, k)) for k in self.__slots__)
return obj_slot_values
def __setstate__(self, data_dict):
for (name, value) in data_dict.iteritems():
setattr(self, name, value)
def __eq__(self, other):
return (self.ROWID == other.ROWID and
self.anomalyScore == other.anomalyScore and
self.anomalyLabel == other.anomalyLabel and
self.setByUser == other.setByUser and
numpy.array_equal(self.anomalyVector, other.anomalyVector))
| 37,731 | Python | .py | 904 | 34.238938 | 99 | 0.664746 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,903 | anomaly_region.py | numenta_nupic-legacy/src/nupic/regions/anomaly_region.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Region for computing the anomaly score."""
import numpy
from nupic.algorithms import anomaly
from nupic.bindings.regions.PyRegion import PyRegion
from nupic.serializable import Serializable
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.regions.AnomalyRegion_capnp import AnomalyRegionProto
class AnomalyRegion(PyRegion, Serializable):
"""Region for computing the anomaly score."""
@classmethod
def getSpec(cls):
return {
"description": ("Region that computes anomaly scores from temporal "
"memory."),
"singleNodeOnly": True,
"inputs": {
"activeColumns": {
"description": "The currently active columns.",
"regionLevel": True,
"dataType": "Real32",
"count": 0,
"required": True,
"isDefaultInput": False,
"requireSplitterMap": False,
},
"predictedColumns": {
"description": "The currently predicted columns.",
"regionLevel": True,
"dataType": "Real32",
"count": 0,
"required": True,
"isDefaultInput": False,
"requireSplitterMap": False,
},
},
"outputs": {
"rawAnomalyScore": {
"description": "The raw anomaly score.",
"dataType": "Real32",
"count": 1,
"regionLevel": True,
"isDefaultOutput": True,
},
},
"parameters": {
},
"commands": {
},
}
def __init__(self, *args, **kwargs):
self.prevPredictedColumns = numpy.zeros([], dtype="float32")
def __eq__(self, other):
for k, v1 in self.__dict__.iteritems():
if not k in other.__dict__:
return False
v2 = getattr(other, k)
if isinstance(v1, numpy.ndarray):
if v1.dtype != v2.dtype:
return False
if not numpy.isclose(v1, v2).all():
return False
else:
if type(v1) != type(v2):
return False
if v1 != v2:
return False
return True
def __ne__(self, other):
return not self == other
@classmethod
def getSchema(cls):
return AnomalyRegionProto
@classmethod
def read(cls, proto):
anomalyRegion = object.__new__(cls)
anomalyRegion.prevPredictedColumns = numpy.array(proto.prevPredictedColumns,
dtype=numpy.float32)
return anomalyRegion
def write(self, proto):
proto.prevPredictedColumns = self.prevPredictedColumns.ravel().tolist()
def initialize(self):
pass
def compute(self, inputs, outputs):
activeColumns = inputs["activeColumns"].nonzero()[0]
rawAnomalyScore = anomaly.computeRawAnomalyScore(
activeColumns, self.prevPredictedColumns)
self.prevPredictedColumns = numpy.array(
inputs["predictedColumns"].nonzero()[0], dtype=numpy.float32)
outputs["rawAnomalyScore"][0] = rawAnomalyScore
| 4,105 | Python | .py | 113 | 28.486726 | 80 | 0.603127 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,904 | record_sensor.py | numenta_nupic-legacy/src/nupic/regions/record_sensor.py |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-15, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy
from nupic.bindings.regions.PyRegion import PyRegion
from nupic.data.field_meta import FieldMetaType
from nupic.encoders.multi import MultiEncoder
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.record_sensor_capnp import RecordSensorProto
class RecordSensor(PyRegion):
"""
A Record Sensor (RS) retrieves an information "record" and encodes
it to be suitable as input to an HTM.
An information record is analogous database record -- it is just a
collection of typed values: date, amount, category, location, etc.
The RS may obtain information from one of two sources:
. a file (e.g. csv or tsv)
. a data generator (for artificial data)
The RS encodes a record using an encoding scheme that can be specified
programmatically.
An RS is essentially a shell containing two objects:
1. `dataSource` object gets one record at a time. This record is returned
either as a dictionary or a user-defined object. The fields within a record
correspond to entries in the dictionary or attributes of the object. For
example, a `dataSource` might return:
.. code-block:: python
dict(date="02-01-2010 23:12:23", amount=4.95, country="US",
_reset=0, _sequenceId=0)
or an object with attributes `date`, `amount` and `country`.
A data source is typically a
:class:`nupic.data.file_record_stream.FileRecordStream`: or an artificial
data generator.
2. An `encoder` object encodes one record into a fixed-sparsity
distributed representation. Usually
:class:`nupic.encoders.multi.MultiEncoder`.
Example usage in NuPIC:
.. code-block:: python
from nupic.net import Network
from nupic.encoders import MultiEncoder
from nupic.data.file.file_record_stream import FileRecordStream
n = Network()
s = n.addRegion("sensor", "py.RecordSensor", "")
mysource = FileRecordStream("mydata.txt")
myencoder = MultiEncoder()
... set up myencoder ...
s.getSelf().dataSource = mysource
s.getSelf().encoder = myencoder
l1 = n.addRegion("l1", "py.TestRegion", "[create params]")
n.initialize()
n.run(100)
:param verbosity: (int) verbosity, default 0
:param numCategories: (int) number of categories, default 1
"""
@classmethod
def getSpec(cls):
ns = dict(
singleNodeOnly=True,
description="Sensor that reads data records and encodes them for an HTM",
outputs=dict(
actValueOut=dict(
description="Actual value of the field to predict. The index of the "
"field to predict must be specified via the parameter "
"predictedField. If this parameter is not set, then "
"actValueOut won't be populated.",
dataType="Real32",
count=0,
regionLevel=True,
isDefaultOutput=False),
bucketIdxOut=dict(
description="Active index of the encoder bucket for the "
"actual value of the field to predict. The index of the "
"field to predict must be specified via the parameter "
"predictedField. If this parameter is not set, then "
"actValueOut won't be populated.",
dataType="UInt64",
count=0,
regionLevel=True,
isDefaultOutput=False),
dataOut=dict(
description="Encoded data",
dataType="Real32", # inefficient for bits, but that's what we use now
count=0,
regionLevel=True,
isDefaultOutput=True),
resetOut=dict(
description="Reset signal",
dataType="Real32",
count=1,
regionLevel=True,
isDefaultOutput=False),
sequenceIdOut=dict(
description="Sequence ID",
dataType='UInt64',
count=1,
regionLevel=True,
isDefaultOutput=False),
categoryOut=dict(
description="Category",
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=False),
sourceOut=dict(
description="Unencoded data from the source, input to the encoder",
dataType="Real32",
count=0,
regionLevel=True,
isDefaultOutput=False),
spatialTopDownOut=dict(
description="The top-down output signal, generated from "
"feedback from SP",
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=False),
temporalTopDownOut=dict(
description="The top-down output signal, generated from "
"feedback from TM through SP",
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=False),
),
inputs=dict(
spatialTopDownIn=dict(
description="The top-down input signal, generated from "
"feedback from SP",
dataType='Real32',
count=0,
required=False,
regionLevel=True,
isDefaultInput=False,
requireSplitterMap=False),
temporalTopDownIn=dict(
description="The top-down input signal, generated from "
"feedback from TM through SP",
dataType='Real32',
count=0,
required=False,
regionLevel=True,
isDefaultInput=False,
requireSplitterMap=False),
),
parameters=dict(
verbosity=dict(
description="Verbosity level",
dataType="UInt32",
accessMode="ReadWrite",
count=1,
constraints=""),
numCategories=dict(
description="Total number of categories to expect from the "
"FileRecordStream",
dataType="UInt32",
accessMode="ReadWrite",
count=1,
constraints=""),
predictedField=dict(
description="The name of the field to be predicted. This will result "
"in the outputs actValueOut and bucketIdxOut not being "
"populated.",
dataType="Byte",
accessMode="ReadWrite",
count=0,
defaultValue=-1,
constraints=""),
topDownMode=dict(
description="1 if the node should do top down compute on the next "
"call to compute into topDownOut (default 0).",
accessMode="ReadWrite",
dataType="UInt32",
count=1,
constraints="bool"),
),
commands=dict())
return ns
def __init__(self, verbosity=0, numCategories=1):
self.encoder = None
self.disabledEncoder = None
self.dataSource = None
self._outputValues = {}
self.preEncodingFilters = []
self.postEncodingFilters = []
self.topDownMode = False
self.verbosity = verbosity
self.numCategories = numCategories
self._iterNum = 0
# Optional field for which we want to populate bucketIdxOut
# and actValueOut. If predictedField is None, then bucketIdxOut and
# actValueOut won't be populated.
self.predictedField = None
# lastRecord is the last record returned. Used for debugging only
self.lastRecord = None
def __setstate__(self, state):
# Default value for older versions being deserialized.
self.disabledEncoder = None
self.__dict__.update(state)
if not hasattr(self, "numCategories"):
self.numCategories = 1
def initialize(self):
if self.encoder is None:
raise Exception("Unable to initialize RecordSensor "
"-- encoder has not been set")
if self.dataSource is None:
raise Exception("Unable to initialize RecordSensor "
"-- dataSource has not been set")
def rewind(self):
""" Reset the sensor to beginning of data.
"""
self._iterNum = 0
if self.dataSource is not None:
self.dataSource.rewind()
def getNextRecord(self):
"""
Get the next record to encode. Includes getting a record from the
`dataSource` and applying filters. If the filters request more data from the
`dataSource` continue to get data from the `dataSource` until all filters
are satisfied. This method is separate from :meth:`.RecordSensor.compute` so that we can
use a standalone :class:`.RecordSensor` to get filtered data.
"""
allFiltersHaveEnoughData = False
while not allFiltersHaveEnoughData:
# Get the data from the dataSource
data = self.dataSource.getNextRecordDict()
if not data:
raise StopIteration("Datasource has no more data")
# temporary check
if "_reset" not in data:
data["_reset"] = 0
if "_sequenceId" not in data:
data["_sequenceId"] = 0
if "_category" not in data:
data["_category"] = [None]
data, allFiltersHaveEnoughData = self.applyFilters(data)
self.lastRecord = data
return data
def applyFilters(self, data):
"""
Apply pre-encoding filters. These filters may modify or add data. If a
filter needs another record (e.g. a delta filter) it will request another
record by returning False and the current record will be skipped (but will
still be given to all filters).
We have to be very careful about resets. A filter may add a reset,
but other filters should not see the added reset, each filter sees
the original reset value, and we keep track of whether any filter
adds a reset.
:param data: (dict) The data that will be processed by the filter.
:returns: (tuple) with the data processed by the filter and a boolean to
know whether or not the filter needs mode data.
"""
if self.verbosity > 0:
print "RecordSensor got data: %s" % data
allFiltersHaveEnoughData = True
if len(self.preEncodingFilters) > 0:
originalReset = data['_reset']
actualReset = originalReset
for f in self.preEncodingFilters:
# if filter needs more data, it returns False
filterHasEnoughData = f.process(data)
allFiltersHaveEnoughData = (allFiltersHaveEnoughData
and filterHasEnoughData)
actualReset = actualReset or data['_reset']
data['_reset'] = originalReset
data['_reset'] = actualReset
return data, allFiltersHaveEnoughData
def populateCategoriesOut(self, categories, output):
"""
Populate the output array with the category indices.
.. note:: Non-categories are represented with ``-1``.
:param categories: (list) of category strings
:param output: (list) category output, will be overwritten
"""
if categories[0] is None:
# The record has no entry in category field.
output[:] = -1
else:
# Populate category output array by looping over the smaller of the
# output array (size specified by numCategories) and the record's number
# of categories.
for i, cat in enumerate(categories[:len(output)]):
output[i] = cat
output[len(categories):] = -1
def compute(self, inputs, outputs):
"""
Get a record from the dataSource and encode it.
Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.compute`.
"""
if not self.topDownMode:
data = self.getNextRecord()
# The private keys in data are standard of RecordStreamIface objects. Any
# add'l keys are column headers from the data source.
reset = data["_reset"]
sequenceId = data["_sequenceId"]
categories = data["_category"]
# Encode the processed records; populate outputs["dataOut"] in place
self.encoder.encodeIntoArray(data, outputs["dataOut"])
# If there is a field to predict, set bucketIdxOut and actValueOut.
# There is a special case where a predicted field might be a vector, as in
# the CoordinateEncoder. Since this encoder does not provide bucket
# indices for prediction, we will ignore it.
if self.predictedField is not None and self.predictedField != "vector":
allEncoders = list(self.encoder.encoders)
if self.disabledEncoder is not None:
allEncoders.extend(self.disabledEncoder.encoders)
encoders = [e for e in allEncoders
if e[0] == self.predictedField]
if len(encoders) == 0:
raise ValueError("There is no encoder for set for the predicted "
"field: %s" % self.predictedField)
# TODO: Figure out why there are sometimes multiple encoders with the
# same name.
#elif len(encoders) > 1:
# raise ValueError("There cant' be more than 1 encoder for the "
# "predicted field: %s" % self.predictedField)
else:
encoder = encoders[0][1]
actualValue = data[self.predictedField]
outputs["bucketIdxOut"][:] = encoder.getBucketIndices(actualValue)
if isinstance(actualValue, str):
outputs["actValueOut"][:] = encoder.getBucketIndices(actualValue)
else:
outputs["actValueOut"][:] = actualValue
# Write out the scalar values obtained from they data source.
outputs["sourceOut"][:] = self.encoder.getScalars(data)
self._outputValues["sourceOut"] = self.encoder.getEncodedValues(data)
# -----------------------------------------------------------------------
# Get the encoded bit arrays for each field
encoders = self.encoder.getEncoderList()
prevOffset = 0
sourceEncodings = []
bitData = outputs["dataOut"]
for encoder in encoders:
nextOffset = prevOffset + encoder.getWidth()
sourceEncodings.append(bitData[prevOffset:nextOffset])
prevOffset = nextOffset
self._outputValues['sourceEncodings'] = sourceEncodings
# Execute post-encoding filters, if any
for filter in self.postEncodingFilters:
filter.process(encoder=self.encoder, data=outputs['dataOut'])
# Populate the output numpy arrays; must assign by index.
outputs['resetOut'][0] = reset
outputs['sequenceIdOut'][0] = sequenceId
self.populateCategoriesOut(categories, outputs['categoryOut'])
# ------------------------------------------------------------------------
# Verbose print?
if self.verbosity >= 1:
if self._iterNum == 0:
self.encoder.pprintHeader(prefix="sensor:")
if reset:
print "RESET - sequenceID:%d" % sequenceId
if self.verbosity >= 2:
print
# If verbosity >=2, print the record fields
if self.verbosity >= 1:
self.encoder.pprint(outputs["dataOut"], prefix="%7d:" % (self._iterNum))
scalarValues = self.encoder.getScalars(data)
nz = outputs["dataOut"].nonzero()[0]
print " nz: (%d)" % (len(nz)), nz
print " encIn:", self.encoder.scalarsToStr(scalarValues)
if self.verbosity >= 2:
# if hasattr(data, 'header'):
# header = data.header()
# else:
# header = ' '.join(self.dataSource.names)
# print " ", header
print " data:", str(data)
if self.verbosity >= 3:
decoded = self.encoder.decode(outputs["dataOut"])
print "decoded:", self.encoder.decodedToStr(decoded)
self._iterNum += 1
else:
# ========================================================================
# Spatial
# ========================================================================
# This is the top down compute in sensor
# We get the spatial pooler's topDownOut as spatialTopDownIn
spatialTopDownIn = inputs['spatialTopDownIn']
spatialTopDownOut = self.encoder.topDownCompute(spatialTopDownIn)
# -----------------------------------------------------------------------
# Split topDownOutput into seperate outputs
values = [elem.value for elem in spatialTopDownOut]
scalars = [elem.scalar for elem in spatialTopDownOut]
encodings = [elem.encoding for elem in spatialTopDownOut]
self._outputValues['spatialTopDownOut'] = values
outputs['spatialTopDownOut'][:] = numpy.array(scalars)
self._outputValues['spatialTopDownEncodings'] = encodings
# ========================================================================
# Temporal
# ========================================================================
## TODO: Add temporal top-down loop
# We get the temporal memory's topDownOut passed through the spatial
# pooler as temporalTopDownIn
temporalTopDownIn = inputs['temporalTopDownIn']
temporalTopDownOut = self.encoder.topDownCompute(temporalTopDownIn)
# -----------------------------------------------------------------------
# Split topDownOutput into separate outputs
values = [elem.value for elem in temporalTopDownOut]
scalars = [elem.scalar for elem in temporalTopDownOut]
encodings = [elem.encoding for elem in temporalTopDownOut]
self._outputValues['temporalTopDownOut'] = values
outputs['temporalTopDownOut'][:] = numpy.array(scalars)
self._outputValues['temporalTopDownEncodings'] = encodings
assert len(spatialTopDownOut) == len(temporalTopDownOut), (
"Error: spatialTopDownOut and temporalTopDownOut should be the same "
"size")
def _convertNonNumericData(self, spatialOutput, temporalOutput, output):
"""
Converts all of the non-numeric fields from spatialOutput and temporalOutput
into their scalar equivalents and records them in the output dictionary.
:param spatialOutput: The results of topDownCompute() for the spatial input.
:param temporalOutput: The results of topDownCompute() for the temporal
input.
:param output: The main dictionary of outputs passed to compute(). It is
expected to have keys 'spatialTopDownOut' and 'temporalTopDownOut' that
are mapped to numpy arrays.
"""
encoders = self.encoder.getEncoderList()
types = self.encoder.getDecoderOutputFieldTypes()
for i, (encoder, type) in enumerate(zip(encoders, types)):
spatialData = spatialOutput[i]
temporalData = temporalOutput[i]
if type != FieldMetaType.integer and type != FieldMetaType.float:
# TODO: Make sure that this doesn't modify any state
spatialData = encoder.getScalars(spatialData)[0]
temporalData = encoder.getScalars(temporalData)[0]
assert isinstance(spatialData, (float, int))
assert isinstance(temporalData, (float, int))
output['spatialTopDownOut'][i] = spatialData
output['temporalTopDownOut'][i] = temporalData
def getOutputValues(self, outputName):
"""
.. note:: These are normal Python lists, rather than numpy arrays. This is
to support lists with mixed scalars and strings, as in the case of
records with categorical variables.
:returns: (dict) output values.
"""
return self._outputValues[outputName]
def getOutputElementCount(self, name):
"""
Computes the width of dataOut.
Overrides
:meth:`nupic.bindings.regions.PyRegion.PyRegion.getOutputElementCount`.
"""
if name == "resetOut":
print ("WARNING: getOutputElementCount should not have been called with "
"resetOut")
return 1
elif name == "sequenceIdOut":
print ("WARNING: getOutputElementCount should not have been called with "
"sequenceIdOut")
return 1
elif name == "dataOut":
if self.encoder is None:
raise Exception("NuPIC requested output element count for 'dataOut' "
"on a RecordSensor node, but the encoder has not "
"been set")
return self.encoder.getWidth()
elif name == "sourceOut":
if self.encoder is None:
raise Exception("NuPIC requested output element count for 'sourceOut' "
"on a RecordSensor node, "
"but the encoder has not been set")
return len(self.encoder.getDescription())
elif name == "bucketIdxOut":
return 1
elif name == "actValueOut":
return 1
elif name == "categoryOut":
return self.numCategories
elif name == 'spatialTopDownOut' or name == 'temporalTopDownOut':
if self.encoder is None:
raise Exception("NuPIC requested output element count for 'sourceOut' "
"on a RecordSensor node, "
"but the encoder has not been set")
return len(self.encoder.getDescription())
else:
raise Exception("Unknown output %s" % name)
def setParameter(self, parameterName, index, parameterValue):
"""
Set the value of a Spec parameter. Most parameters are handled
automatically by PyRegion's parameter set mechanism. The ones that need
special treatment are explicitly handled here.
"""
if parameterName == 'topDownMode':
self.topDownMode = parameterValue
elif parameterName == 'predictedField':
self.predictedField = parameterValue
else:
raise Exception('Unknown parameter: ' + parameterName)
@staticmethod
def getSchema():
"""
Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getSchema`.
"""
return RecordSensorProto
def writeToProto(self, proto):
"""
Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.writeToProto`.
"""
self.encoder.write(proto.encoder)
if self.disabledEncoder is not None:
self.disabledEncoder.write(proto.disabledEncoder)
proto.topDownMode = int(self.topDownMode)
proto.verbosity = self.verbosity
proto.numCategories = self.numCategories
@classmethod
def readFromProto(cls, proto):
"""
Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.readFromProto`.
"""
instance = cls()
instance.encoder = MultiEncoder.read(proto.encoder)
if proto.disabledEncoder is not None:
instance.disabledEncoder = MultiEncoder.read(proto.disabledEncoder)
instance.topDownMode = bool(proto.topDownMode)
instance.verbosity = proto.verbosity
instance.numCategories = proto.numCategories
return instance
| 23,440 | Python | .py | 539 | 35.404453 | 93 | 0.641626 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,905 | tm_region.py | numenta_nupic-legacy/src/nupic/regions/tm_region.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy
import os
try:
import capnp
except ImportError:
capnp = None
from nupic.bindings.regions.PyRegion import PyRegion
from nupic.algorithms import (anomaly, backtracking_tm, backtracking_tm_cpp,
backtracking_tm_shim)
if capnp:
from nupic.regions.tm_region_capnp import TMRegionProto
from nupic.support import getArgumentDescriptions
gDefaultTemporalImp = 'py'
def _getTPClass(temporalImp):
""" Return the class corresponding to the given temporalImp string
"""
if temporalImp == 'py':
return backtracking_tm.BacktrackingTM
elif temporalImp == 'cpp':
return backtracking_tm_cpp.BacktrackingTMCPP
elif temporalImp == 'tm_py':
return backtracking_tm_shim.TMShim
elif temporalImp == 'tm_cpp':
return backtracking_tm_shim.TMCPPShim
elif temporalImp == 'monitored_tm_py':
return backtracking_tm_shim.MonitoredTMShim
else:
raise RuntimeError("Invalid temporalImp '%s'. Legal values are: 'py', "
"'cpp', 'tm_py', 'monitored_tm_py'" % (temporalImp))
def _buildArgs(f, self=None, kwargs={}):
"""
Get the default arguments from the function and assign as instance vars.
Return a list of 3-tuples with (name, description, defaultValue) for each
argument to the function.
Assigns all arguments to the function as instance variables of TMRegion.
If the argument was not provided, uses the default value.
Pops any values from kwargs that go to the function.
"""
# Get the name, description, and default value for each argument
argTuples = getArgumentDescriptions(f)
argTuples = argTuples[1:] # Remove 'self'
# Get the names of the parameters to our own constructor and remove them
# Check for _originial_init first, because if LockAttributesMixin is used,
# __init__'s signature will be just (self, *args, **kw), but
# _original_init is created with the original signature
#init = getattr(self, '_original_init', self.__init__)
init = TMRegion.__init__
ourArgNames = [t[0] for t in getArgumentDescriptions(init)]
# Also remove a few other names that aren't in our constructor but are
# computed automatically (e.g. numberOfCols for the TM)
ourArgNames += [
'numberOfCols', # TM
]
for argTuple in argTuples[:]:
if argTuple[0] in ourArgNames:
argTuples.remove(argTuple)
# Build the dictionary of arguments
if self:
for argTuple in argTuples:
argName = argTuple[0]
if argName in kwargs:
# Argument was provided
argValue = kwargs.pop(argName)
else:
# Argument was not provided; use the default value if there is one, and
# raise an exception otherwise
if len(argTuple) == 2:
# No default value
raise TypeError("Must provide '%s'" % argName)
argValue = argTuple[2]
# Set as an instance variable if 'self' was passed in
setattr(self, argName, argValue)
return argTuples
def _getAdditionalSpecs(temporalImp, kwargs={}):
"""Build the additional specs in three groups (for the inspector)
Use the type of the default argument to set the Spec type, defaulting
to 'Byte' for None and complex types
Determines the spatial parameters based on the selected implementation.
It defaults to TemporalMemory.
Determines the temporal parameters based on the temporalImp
"""
typeNames = {int: 'UInt32', float: 'Real32', str: 'Byte', bool: 'bool', tuple: 'tuple'}
def getArgType(arg):
t = typeNames.get(type(arg), 'Byte')
count = 0 if t == 'Byte' else 1
if t == 'tuple':
t = typeNames.get(type(arg[0]), 'Byte')
count = len(arg)
if t == 'bool':
t = 'UInt32'
return (t, count)
def getConstraints(arg):
t = typeNames.get(type(arg), 'Byte')
if t == 'Byte':
return 'multiple'
elif t == 'bool':
return 'bool'
else:
return ''
# Build up parameters from temporal memory's constructor
TemporalClass = _getTPClass(temporalImp)
tArgTuples = _buildArgs(TemporalClass.__init__)
temporalSpec = {}
for argTuple in tArgTuples:
d = dict(
description=argTuple[1],
accessMode='ReadWrite',
dataType=getArgType(argTuple[2])[0],
count=getArgType(argTuple[2])[1],
constraints=getConstraints(argTuple[2]))
temporalSpec[argTuple[0]] = d
# Add temporal parameters that weren't handled automatically
temporalSpec.update(dict(
columnCount=dict(
description='Total number of columns.',
accessMode='Read',
dataType='UInt32',
count=1,
constraints=''),
cellsPerColumn=dict(
description='Number of cells per column.',
accessMode='Read',
dataType='UInt32',
count=1,
constraints=''),
inputWidth=dict(
description='Number of inputs to the TM.',
accessMode='Read',
dataType='UInt32',
count=1,
constraints=''),
predictedSegmentDecrement=dict(
description='Predicted segment decrement',
accessMode='Read',
dataType='Real',
count=1,
constraints=''),
orColumnOutputs=dict(
description="""OR together the cell outputs from each column to produce
the temporal memory output. When this mode is enabled, the number of
cells per column must also be specified and the output size of the region
should be set the same as columnCount""",
accessMode='Read',
dataType='Bool',
count=1,
constraints='bool'),
cellsSavePath=dict(
description="""Optional path to file in which large temporal memory cells
data structure is to be saved.""",
accessMode='ReadWrite',
dataType='Byte',
count=0,
constraints=''),
temporalImp=dict(
description="""Which temporal memory implementation to use. Set to either
'py' or 'cpp'. The 'cpp' implementation is optimized for speed in C++.""",
accessMode='ReadWrite',
dataType='Byte',
count=0,
constraints='enum: py, cpp'),
))
# The last group is for parameters that aren't strictly spatial or temporal
otherSpec = dict(
learningMode=dict(
description='True if the node is learning (default True).',
accessMode='ReadWrite',
dataType='Bool',
count=1,
defaultValue=True,
constraints='bool'),
inferenceMode=dict(
description='True if the node is inferring (default False).',
accessMode='ReadWrite',
dataType='Bool',
count=1,
defaultValue=False,
constraints='bool'),
computePredictedActiveCellIndices=dict(
description='True if active and predicted active indices should be computed',
accessMode='Create',
dataType='Bool',
count=1,
defaultValue=False,
constraints='bool'),
anomalyMode=dict(
description='True if an anomaly score is being computed',
accessMode='Create',
dataType='Bool',
count=1,
defaultValue=False,
constraints='bool'),
topDownMode=dict(
description='True if the node should do top down compute on the next call '
'to compute into topDownOut (default False).',
accessMode='ReadWrite',
dataType='Bool',
count=1,
defaultValue=False,
constraints='bool'),
activeOutputCount=dict(
description='Number of active elements in bottomUpOut output.',
accessMode='Read',
dataType='UInt32',
count=1,
constraints=''),
storeDenseOutput=dict(
description="""Whether to keep the dense column output (needed for
denseOutput parameter).""",
accessMode='ReadWrite',
dataType='UInt32',
count=1,
constraints='bool'),
logPathOutput=dict(
description='Optional name of output log file. If set, every output vector'
' will be logged to this file as a sparse vector.',
accessMode='ReadWrite',
dataType='Byte',
count=0,
constraints=''),
)
return temporalSpec, otherSpec
class TMRegion(PyRegion):
"""
TMRegion is designed to implement the temporal memory compute for a given
HTM level.
Uses a form of Temporal Memory to do most of the work. The specific TM
implementation is specified using the ``temporalImp`` parameter.
"""
def __init__(self,
columnCount, # Number of columns in the SP, a required parameter
inputWidth, # Size of inputs to the SP, a required parameter
cellsPerColumn, # Number of cells per column, required
# Constructor arguments are picked up automatically. There is no
# need to add them anywhere in TMRegion, unless you need to do
# something special with them. See docstring above.
orColumnOutputs=False,
cellsSavePath='',
temporalImp=gDefaultTemporalImp,
anomalyMode=False,
computePredictedActiveCellIndices=False,
**kwargs):
# Which Temporal implementation?
TemporalClass = _getTPClass(temporalImp)
# Make a list of automatic temporal arg names for later use
# Pull out the temporal arguments automatically
# These calls whittle down kwargs and create instance variables of TMRegion
tArgTuples = _buildArgs(TemporalClass.__init__, self, kwargs)
self._temporalArgNames = [t[0] for t in tArgTuples]
self.learningMode = True # Start out with learning enabled
self.inferenceMode = False
self.anomalyMode = anomalyMode
self.computePredictedActiveCellIndices = computePredictedActiveCellIndices
self.topDownMode = False
self.columnCount = columnCount
self.inputWidth = inputWidth
self.outputWidth = columnCount * cellsPerColumn
self.cellsPerColumn = cellsPerColumn
PyRegion.__init__(self, **kwargs)
# Initialize all non-persistent base members, as well as give
# derived class an opportunity to do the same.
self._loaded = False
self._initialize()
# Debugging support, used in _conditionalBreak
self.breakPdb = False
self.breakKomodo = False
# TMRegion only, or special handling
self.orColumnOutputs = orColumnOutputs
self.temporalImp = temporalImp
# Various file names
self.storeDenseOutput = False
self.logPathOutput = ''
self.cellsSavePath = cellsSavePath
self._fpLogTPOutput = None
# Variables set up in initInNetwork()
self._tfdr = None # FDRTemporal instance
#############################################################################
#
# Initialization code
#
#############################################################################
def _initialize(self):
"""
Initialize all ephemeral data members, and give the derived
class the opportunity to do the same by invoking the
virtual member _initEphemerals(), which is intended to be
overridden.
"""
for attrName in self._getEphemeralMembersBase():
if attrName != "_loaded":
if hasattr(self, attrName):
if self._loaded:
# print self.__class__.__name__, "contains base class member '%s' " \
# "after loading." % attrName
# TODO: Re-enable warning or turn into error in a future release.
pass
else:
print self.__class__.__name__, "contains base class member '%s'" % \
attrName
if not self._loaded:
for attrName in self._getEphemeralMembersBase():
if attrName != "_loaded":
# if hasattr(self, attrName):
# import pdb; pdb.set_trace()
assert not hasattr(self, attrName)
else:
assert hasattr(self, attrName)
# Profiling information
self._profileObj = None
self._iterations = 0
# Let derived class initialize ephemerals
self._initEphemerals()
self._checkEphemeralMembers()
def initialize(self):
"""
Overrides :meth:`~nupic.bindings.regions.PyRegion.initialize`.
"""
# Allocate appropriate temporal memory object
# Retrieve the necessary extra arguments that were handled automatically
autoArgs = dict((name, getattr(self, name))
for name in self._temporalArgNames)
if self._tfdr is None:
tpClass = _getTPClass(self.temporalImp)
if self.temporalImp in ['py', 'cpp', 'r',
'tm_py', 'tm_cpp',
'monitored_tm_py',]:
self._tfdr = tpClass(
numberOfCols=self.columnCount,
cellsPerColumn=self.cellsPerColumn,
**autoArgs)
else:
raise RuntimeError("Invalid temporalImp")
#############################################################################
#
# Core compute methods: learning, inference, and prediction
#
#############################################################################
#############################################################################
def compute(self, inputs, outputs):
"""
Run one iteration of :class:`~nupic.regions.tm_region.TMRegion` compute,
profiling it if requested.
:param inputs: (dict) mapping region input names to numpy.array values
:param outputs: (dict) mapping region output names to numpy.arrays that
should be populated with output values by this method
"""
# Uncomment this to find out who is generating divide by 0, or other numpy warnings
# numpy.seterr(divide='raise', invalid='raise', over='raise')
# Modify this line to turn on profiling for a given node. The results file
# ('hotshot.stats') will be sensed and printed out by the vision framework's
# RunInference.py script at the end of inference.
# Also uncomment the hotshot import at the top of this file.
if False and self.learningMode \
and self._iterations > 0 and self._iterations <= 10:
import hotshot
if self._iterations == 10:
print "\n Collecting and sorting internal node profiling stats generated by hotshot..."
stats = hotshot.stats.load("hotshot.stats")
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats()
# The guts of the compute are contained in the _compute() call so that
# we can profile it if requested.
if self._profileObj is None:
print "\n Preparing to capture profile using hotshot..."
if os.path.exists('hotshot.stats'):
# There is an old hotshot stats profile left over, remove it.
os.remove('hotshot.stats')
self._profileObj = hotshot.Profile("hotshot.stats", 1, 1)
# filename, lineevents, linetimings
self._profileObj.runcall(self._compute, *[inputs, outputs])
else:
self._compute(inputs, outputs)
def _compute(self, inputs, outputs):
"""
Run one iteration of TMRegion's compute
"""
#if self.topDownMode and (not 'topDownIn' in inputs):
# raise RuntimeError("The input topDownIn must be linked in if "
# "topDownMode is True")
if self._tfdr is None:
raise RuntimeError("TM has not been initialized")
# Conditional compute break
self._conditionalBreak()
self._iterations += 1
# Get our inputs as numpy array
buInputVector = inputs['bottomUpIn']
# Handle reset signal
resetSignal = False
if 'resetIn' in inputs:
assert len(inputs['resetIn']) == 1
if inputs['resetIn'][0] != 0:
self._tfdr.reset()
self._sequencePos = 0 # Position within the current sequence
if self.computePredictedActiveCellIndices:
prevPredictedState = self._tfdr.getPredictedState().reshape(-1).astype('float32')
if self.anomalyMode:
prevPredictedColumns = self._tfdr.topDownCompute().copy().nonzero()[0]
# Perform inference and/or learning
tpOutput = self._tfdr.compute(buInputVector, self.learningMode, self.inferenceMode)
self._sequencePos += 1
# OR'ing together the cells in each column?
if self.orColumnOutputs:
tpOutput= tpOutput.reshape(self.columnCount,
self.cellsPerColumn).max(axis=1)
# Direct logging of non-zero TM outputs
if self._fpLogTPOutput:
output = tpOutput.reshape(-1)
outputNZ = tpOutput.nonzero()[0]
outStr = " ".join(["%d" % int(token) for token in outputNZ])
print >>self._fpLogTPOutput, output.size, outStr
# Write the bottom up out to our node outputs
outputs['bottomUpOut'][:] = tpOutput.flat
if self.topDownMode:
# Top-down compute
outputs['topDownOut'][:] = self._tfdr.topDownCompute().copy()
# Set output for use with anomaly classification region if in anomalyMode
if self.anomalyMode:
activeLearnCells = self._tfdr.getLearnActiveStateT()
size = activeLearnCells.shape[0] * activeLearnCells.shape[1]
outputs['lrnActiveStateT'][:] = activeLearnCells.reshape(size)
activeColumns = buInputVector.nonzero()[0]
outputs['anomalyScore'][:] = anomaly.computeRawAnomalyScore(
activeColumns, prevPredictedColumns)
if self.computePredictedActiveCellIndices:
# Reshape so we are dealing with 1D arrays
activeState = self._tfdr._getActiveState().reshape(-1).astype('float32')
activeIndices = numpy.where(activeState != 0)[0]
predictedIndices= numpy.where(prevPredictedState != 0)[0]
predictedActiveIndices = numpy.intersect1d(activeIndices, predictedIndices)
outputs["activeCells"].fill(0)
outputs["activeCells"][activeIndices] = 1
outputs["predictedActiveCells"].fill(0)
outputs["predictedActiveCells"][predictedActiveIndices] = 1
#############################################################################
#
# Region API support methods: getSpec, getParameter, and setParameter
#
#############################################################################
#############################################################################
@classmethod
def getBaseSpec(cls):
"""
Doesn't include the spatial, temporal and other parameters
:returns: (dict) the base Spec for TMRegion.
"""
spec = dict(
description=TMRegion.__doc__,
singleNodeOnly=True,
inputs=dict(
bottomUpIn=dict(
description="""The input signal, conceptually organized as an
image pyramid data structure, but internally
organized as a flattened vector.""",
dataType='Real32',
count=0,
required=True,
regionLevel=False,
isDefaultInput=True,
requireSplitterMap=False),
resetIn=dict(
description="""Effectively a boolean flag that indicates whether
or not the input vector received in this compute cycle
represents the first training presentation in a
new temporal sequence.""",
dataType='Real32',
count=1,
required=False,
regionLevel=True,
isDefaultInput=False,
requireSplitterMap=False),
sequenceIdIn=dict(
description="Sequence ID",
dataType='UInt64',
count=1,
required=False,
regionLevel=True,
isDefaultInput=False,
requireSplitterMap=False),
),
outputs=dict(
bottomUpOut=dict(
description="""The output signal generated from the bottom-up inputs
from lower levels.""",
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=True),
topDownOut=dict(
description="""The top-down inputsignal, generated from
feedback from upper levels""",
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=False),
activeCells=dict(
description="The cells that are active",
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=False),
predictedActiveCells=dict(
description="The cells that are active and predicted",
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=False),
anomalyScore = dict(
description="""The score for how 'anomalous' (i.e. rare) the current
sequence is. Higher values are increasingly rare""",
dataType='Real32',
count=1,
regionLevel=True,
isDefaultOutput=False),
lrnActiveStateT = dict(
description="""Active cells during learn phase at time t. This is
used for anomaly classification.""",
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=False),
),
parameters=dict(
breakPdb=dict(
description='Set to 1 to stop in the pdb debugger on the next compute',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=0,
accessMode='ReadWrite'),
breakKomodo=dict(
description='Set to 1 to stop in the Komodo debugger on the next compute',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=0,
accessMode='ReadWrite'),
),
commands = {}
)
return spec
@classmethod
def getSpec(cls):
"""
Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getSpec`.
The parameters collection is constructed based on the parameters specified
by the various components (spatialSpec, temporalSpec and otherSpec)
"""
spec = cls.getBaseSpec()
t, o = _getAdditionalSpecs(temporalImp=gDefaultTemporalImp)
spec['parameters'].update(t)
spec['parameters'].update(o)
return spec
def getAlgorithmInstance(self):
"""
:returns: instance of the underlying
:class:`~nupic.algorithms.temporal_memory.TemporalMemory`
algorithm object.
"""
return self._tfdr
def getParameter(self, parameterName, index=-1):
"""
Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getParameter`.
Get the value of a parameter. Most parameters are handled automatically by
:class:`~nupic.bindings.regions.PyRegion.PyRegion`'s parameter get mechanism. The
ones that need special treatment are explicitly handled here.
"""
if parameterName in self._temporalArgNames:
return getattr(self._tfdr, parameterName)
else:
return PyRegion.getParameter(self, parameterName, index)
def setParameter(self, parameterName, index, parameterValue):
"""
Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.setParameter`.
"""
if parameterName in self._temporalArgNames:
setattr(self._tfdr, parameterName, parameterValue)
elif parameterName == "logPathOutput":
self.logPathOutput = parameterValue
# Close any existing log file
if self._fpLogTPOutput is not None:
self._fpLogTPOutput.close()
self._fpLogTPOutput = None
# Open a new log file if requested
if parameterValue:
self._fpLogTPOutput = open(self.logPathOutput, 'w')
elif hasattr(self, parameterName):
setattr(self, parameterName, parameterValue)
else:
raise Exception('Unknown parameter: ' + parameterName)
#############################################################################
#
# Commands
#
#############################################################################
def resetSequenceStates(self):
"""
Resets the region's sequence states.
"""
self._tfdr.reset()
self._sequencePos = 0 # Position within the current sequence
return
def finishLearning(self):
"""
Perform an internal optimization step that speeds up inference if we know
learning will not be performed anymore. This call may, for example, remove
all potential inputs to each column.
"""
if self._tfdr is None:
raise RuntimeError("Temporal memory has not been initialized")
if hasattr(self._tfdr, 'finishLearning'):
self.resetSequenceStates()
self._tfdr.finishLearning()
#############################################################################
#
# Methods to support serialization
#
#############################################################################
@staticmethod
def getSchema():
"""
Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getSchema`.
"""
return TMRegionProto
def writeToProto(self, proto):
"""
Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.writeToProto`.
Write state to proto object.
:param proto: TMRegionProto capnproto object
"""
proto.temporalImp = self.temporalImp
proto.columnCount = self.columnCount
proto.inputWidth = self.inputWidth
proto.cellsPerColumn = self.cellsPerColumn
proto.learningMode = self.learningMode
proto.inferenceMode = self.inferenceMode
proto.anomalyMode = self.anomalyMode
proto.topDownMode = self.topDownMode
proto.computePredictedActiveCellIndices = (
self.computePredictedActiveCellIndices)
proto.orColumnOutputs = self.orColumnOutputs
if self.temporalImp == "py":
tmProto = proto.init("backtrackingTM")
elif self.temporalImp == "cpp":
tmProto = proto.init("backtrackingTMCpp")
elif self.temporalImp == "tm_py":
tmProto = proto.init("temporalMemory")
elif self.temporalImp == "tm_cpp":
tmProto = proto.init("temporalMemory")
else:
raise TypeError(
"Unsupported temporalImp for capnp serialization: {}".format(
self.temporalImp))
self._tfdr.write(tmProto)
@classmethod
def readFromProto(cls, proto):
"""
Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.readFromProto`.
Read state from proto object.
:param proto: TMRegionProto capnproto object
"""
instance = cls(proto.columnCount, proto.inputWidth, proto.cellsPerColumn)
instance.temporalImp = proto.temporalImp
instance.learningMode = proto.learningMode
instance.inferenceMode = proto.inferenceMode
instance.anomalyMode = proto.anomalyMode
instance.topDownMode = proto.topDownMode
instance.computePredictedActiveCellIndices = (
proto.computePredictedActiveCellIndices)
instance.orColumnOutputs = proto.orColumnOutputs
if instance.temporalImp == "py":
tmProto = proto.backtrackingTM
elif instance.temporalImp == "cpp":
tmProto = proto.backtrackingTMCpp
elif instance.temporalImp == "tm_py":
tmProto = proto.temporalMemory
elif instance.temporalImp == "tm_cpp":
tmProto = proto.temporalMemory
else:
raise TypeError(
"Unsupported temporalImp for capnp serialization: {}".format(
instance.temporalImp))
instance._tfdr = _getTPClass(proto.temporalImp).read(tmProto)
return instance
def __getstate__(self):
"""
Return serializable state. This function will return a version of the
__dict__ with all "ephemeral" members stripped out. "Ephemeral" members
are defined as those that do not need to be (nor should be) stored
in any kind of persistent file (e.g., NuPIC network XML file.)
"""
state = self.__dict__.copy()
# We only want to serialize a single spatial/temporal FDR if they're cloned
for ephemeralMemberName in self._getEphemeralMembersAll():
state.pop(ephemeralMemberName, None)
return state
def serializeExtraData(self, filePath):
"""
Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.serializeExtraData`.
"""
if self._tfdr is not None:
self._tfdr.saveToFile(filePath)
def deSerializeExtraData(self, filePath):
"""
Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.deSerializeExtraData`.
This method is called during network deserialization with an external
filename that can be used to bypass pickle for loading large binary states.
:param filePath: (string) absolute file path
"""
if self._tfdr is not None:
self._tfdr.loadFromFile(filePath)
def __setstate__(self, state):
"""
Set the state of ourself from a serialized state.
"""
if not hasattr(self, 'storeDenseOutput'):
self.storeDenseOutput = False
if not hasattr(self, 'computePredictedActiveCellIndices'):
self.computePredictedActiveCellIndices = False
self.__dict__.update(state)
self._loaded = True
# Initialize all non-persistent base members, as well as give
# derived class an opportunity to do the same.
self._initialize()
def _initEphemerals(self):
"""
Initialize all ephemerals used by derived classes.
"""
self._sequencePos = 0
self._fpLogTPOutput = None
self.logPathOutput = None
def _getEphemeralMembers(self):
"""
Callback that returns a list of all "ephemeral" members (i.e., data members
that should not and/or cannot be pickled.)
"""
return ['_sequencePos', '_fpLogTPOutput', 'logPathOutput',]
def _getEphemeralMembersBase(self):
"""
Returns list of all ephemeral members.
"""
return [
'_loaded',
'_profileObj',
'_iterations',
]
def _getEphemeralMembersAll(self):
"""
Returns a concatenated list of both the standard base class
ephemeral members, as well as any additional ephemeral members
(e.g., file handles, etc.).
"""
return self._getEphemeralMembersBase() + self._getEphemeralMembers()
def _checkEphemeralMembers(self):
for attrName in self._getEphemeralMembersBase():
if not hasattr(self, attrName):
print "Missing base class member:", attrName
for attrName in self._getEphemeralMembers():
if not hasattr(self, attrName):
print "Missing derived class member:", attrName
for attrName in self._getEphemeralMembersBase():
assert hasattr(self, attrName)
for attrName in self._getEphemeralMembers():
assert hasattr(self, attrName), "Node missing attr '%s'." % attrName
#############################################################################
#
# Misc. code
#
#############################################################################
def _conditionalBreak(self):
if self.breakKomodo:
import dbgp.client; dbgp.client.brk()
if self.breakPdb:
import pdb; pdb.set_trace()
#############################################################################
#
# NuPIC 2 Support
#
#############################################################################
def getOutputElementCount(self, name):
"""
Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getOutputElementCount`.
"""
if name == 'bottomUpOut':
return self.outputWidth
elif name == 'topDownOut':
return self.columnCount
elif name == 'lrnActiveStateT':
return self.outputWidth
elif name == "activeCells":
return self.outputWidth
elif name == "predictedActiveCells":
return self.outputWidth
else:
raise Exception("Invalid output name specified")
# TODO: as a temporary hack, getParameterArrayCount checks to see if there's a variable, private or
# not, with that name. If so, it attempts to return the length of that variable.
def getParameterArrayCount(self, name, index):
"""
Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getParameterArrayCount`.
"""
p = self.getParameter(name)
if (not hasattr(p, '__len__')):
raise Exception("Attempt to access parameter '%s' as an array but it is not an array" % name)
return len(p)
# TODO: as a temporary hack, getParameterArray checks to see if there's a variable, private or not,
# with that name. If so, it returns the value of the variable.
def getParameterArray(self, name, index, a):
"""
Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getParameterArray`.
"""
p = self.getParameter(name)
if (not hasattr(p, '__len__')):
raise Exception("Attempt to access parameter '%s' as an array but it is not an array" % name)
if len(p) > 0:
a[:] = p[:]
| 33,436 | Python | .py | 835 | 33.267066 | 101 | 0.646364 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,906 | __init__.py | numenta_nupic-legacy/src/nupic/regions/extra/__init__.py |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
| 977 | Python | .py | 20 | 47.8 | 72 | 0.665272 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,907 | __init__.py | numenta_nupic-legacy/src/nupic/engine/__init__.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import nupic.bindings.engine_internal as engine_internal
from nupic.support.lock_attributes import LockAttributesMixin
import functools
basicTypes = ['Byte',
'Int16', 'UInt16',
'Int32', 'UInt32',
'Int64', 'UInt64',
'Real32', 'Real64',
'Handle',
'Bool']
arrayTypes = ['ByteArray',
'Int16Array', 'UInt16Array',
'Int32Array', 'UInt32Array',
'Int64Array', 'UInt64Array',
'Real32Array', 'Real64Array',
# No 'HandleArray'
'BoolArray']
pyRegions = (
("nupic.bindings.regions.TestNode", "TestNode"),
("nupic.regions.anomaly_likelihood_region", "AnomalyLikelihoodRegion"),
("nupic.regions.anomaly_region", "AnomalyRegion"),
("nupic.regions.knn_anomaly_classifier_region", "KNNAnomalyClassifierRegion"),
("nupic.regions.knn_classifier_region", "KNNClassifierRegion"),
("nupic.regions.pluggable_encoder_sensor", "PluggableEncoderSensor"),
("nupic.regions.PyRegion", "PyRegion"),
("nupic.regions.record_sensor", "RecordSensor"),
("nupic.regions.sdr_classifier_region", "SDRClassifierRegion"),
("nupic.regions.sp_region", "SPRegion"),
("nupic.regions.test_region", "TestRegion"),
("nupic.regions.tm_region", "TMRegion"),
("nupic.regions.unimportable_node", "UnimportableNode"),
)
registeredRegions = False
def registerBuiltInRegions():
global registeredRegions
# Initialize nupic regions
if not registeredRegions:
for module, className in pyRegions:
engine_internal.Network.registerPyRegion(module, className)
registeredRegions = True
registerBuiltInRegions()
for a in arrayTypes:
exec('from %s import %s as %s' % (engine_internal.__name__, a, a))
# Intercept the default exception handling for the purposes of stripping
# parts of the stack trace that can confuse users. If you want the original
# stack trace define this environment variable
if not 'NTA_STANDARD_PYTHON_UNHANDLED_EXCEPTIONS' in os.environ:
import traceback
import cStringIO
def customExceptionHandler(type, value, tb):
"""Catch unhandled Python exception
The handler prints the original exception info including into a buffer.
It then extracts the original error message (when the exception is raised
inside a Py node additional stacktrace info will be appended in the end)
and saves the original exception to a file called error.txt. It prints
just the error message to the screen and tells the user about the error.txt
file.
"""
# Print the exception info to a string IO buffer for manipulation
buff = cStringIO.StringIO()
traceback.print_exception(type, value, tb, file=buff)
text = buff.getvalue()
# get the lines skip the first one: "Traceback (most recent call last)"
lines = text.split('\n')[1:]
#
# Extract the error message
begin = 0
end = len(lines)
for i, line in enumerate(lines):
if line.startswith('RuntimeError:'):
begin = i
#
# elif line.startswith('Traceback (most recent call last):'):
# end = i
# break
#
message = '\n'.join(lines[begin:end])
message = message[len('Runtime Error:'):]
#stacktrace = lines[end:]
# Get the stack trace if available (default to empty string)
stacktrace = getattr(value, 'stackTrace', '')
# Remove engine from stack trace
lines = [x for x in lines if 'engine' not in x]
failMessage = 'The program failed with the following error message:'
dashes = '-' * len(failMessage)
print
print dashes
print 'Traceback (most recent call last):'
print '\n'.join(lines[:begin-2])
if stacktrace:
print stacktrace
print dashes
print 'The program failed with the following error message:'
print dashes
print message
print
#sys.excepthook = customExceptionHandler
# Expose the timer class directly
Timer = engine_internal.Timer
# Expose the os class directly
# The only wrapped method is getProcessMemoryUsage()
OS = engine_internal.OS
class Dimensions(engine_internal.Dimensions):
"""Represent the topology of an N-dimensional region
Basically, it is a list of integers such as: [4, 8, 6]
In this example the topology is a 3 dimensional region with
4 x 8 x 6 nodes.
You can initialize it with a list of dimensions or with no arguments
and then append dimensions.
"""
def __init__(self, *args):
"""Construct a Dimensions object
The constructor can be called with no arguments or with a list
of integers
"""
# Init the base class
engine_internal.Dimensions.__init__(self, *args)
def __str__(self):
return self.toString()
def Array(dtype, size=None, ref=False):
"""Factory function that creates typed Array or ArrayRef objects
dtype - the data type of the array (as string).
Supported types are: Byte, Int16, UInt16, Int32, UInt32, Int64, UInt64, Real32, Real64
size - the size of the array. Must be positive integer.
"""
def getArrayType(self):
"""A little function to replace the getType() method of arrays
It returns a string representation of the array element type instead of the
integer value (NTA_BasicType enum) returned by the origianl array
"""
return self._dtype
# ArrayRef can't be allocated
if ref:
assert size is None
index = basicTypes.index(dtype)
if index == -1:
raise Exception('Invalid data type: ' + dtype)
if size and size <= 0:
raise Exception('Array size must be positive')
suffix = 'ArrayRef' if ref else 'Array'
arrayFactory = getattr(engine_internal, dtype + suffix)
arrayFactory.getType = getArrayType
if size:
a = arrayFactory(size)
else:
a = arrayFactory()
a._dtype = basicTypes[index]
return a
def ArrayRef(dtype):
return Array(dtype, None, True)
class CollectionWrapper(object):
"""Wrap an nupic::Collection with a dict-like interface
The optional valueWrapper is used to wrap values for adaptation purposes.
Maintains the original documentation
collection - the original collection
valueWrapper - an optional callable object used to wrap values.
"""
def IdentityWrapper(o):
return o
def __init__(self, collection, valueWrapper=IdentityWrapper):
self.collection = collection
self.valueWrapper = valueWrapper
self.__class__.__doc__ == collection.__class__.__doc__
def __iter__(self):
return engine_internal.IterableCollection(self.collection)
def __str__(self):
return str(self.collection)
def __repr__(self):
return repr(self.collection)
def __len__(self):
return self.collection.getCount()
def __getitem__(self, key):
if not self.collection.contains(key):
raise KeyError('Key ' + key + ' not found')
value = self.collection.getByName(key)
value = self.valueWrapper(key, value)
return value
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
def __contains__(self, key):
return self.collection.contains(key)
def keys(self):
keys = list()
for i in range(self.collection.getCount()):
keys.append(self.collection.getByIndex(i)[0])
return keys
def values(self):
values = list()
for i in range(self.collection.getCount()):
p = self.collection.getByIndex(i)
values.append(self.valueWrapper(p[0], p[1]))
return values
def items(self):
return zip(self.keys(), self.values())
def __eq__(self, other):
return self.collection == other.collection
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.collection)
class SpecItem(object):
"""Wrapper that translates the data type and access code to a string
The original values are an enumerated type in C++ that become
just integers in Python. This class wraps the original ParameterSpec
and translates the integer values to meaningful strings: that correspond to the C++ enum labels.
It is used to wrap ParameterSpec, InputSpec and OutputSpec
"""
accessModes = ['Create', 'ReadOnly', 'ReadWrite']
def __init__(self, name, item):
self.name = name
self.item = item
self.__class__.__doc__ == item.__class__.__doc__
# Translate data type to string representation
self.dataType = basicTypes[item.dataType]
# Translate access mode to string representation
if hasattr(item, 'accessMode'): # ParameterSpec only
self.accessMode = SpecItem.accessModes[item.accessMode]
def __getattr__(self, name):
return getattr(self.item, name)
def __str__(self):
d = dict(name=self.name,
description=self.description,
dataType=self.dataType,
count=self.count)
if hasattr(self.item, 'accessMode'): # ParameterSpec only
self.accessMode = SpecItem.accessModes[self.item.accessMode]
if hasattr(self.item, 'accessMode'): # ParameterSpec only
d['accessMode'] = self.accessMode
if hasattr(self.item, 'constraints'): # ParameterSpec only
d['constraints'] = self.constraints
if hasattr(self.item, 'defaultValue'): # ParameterSpec only
d['defaultValue'] = self.defaultValue
return str(d)
class Spec(object):
def __init__(self, spec):
self.spec = spec
self.__class__.__doc__ == spec.__class__.__doc__
self.description = spec.description
self.singleNodeOnly = spec.singleNodeOnly
self.inputs = CollectionWrapper(spec.inputs, SpecItem)
self.outputs = CollectionWrapper(spec.outputs, SpecItem)
self.parameters = CollectionWrapper(spec.parameters, SpecItem)
self.commands = CollectionWrapper(spec.commands)
def __str__(self):
return self.spec.toString()
def __repr__(self):
return self.spec.toString()
class _ArrayParameterHelper:
"""This class is used by Region._getParameterMethods"""
def __init__(self, region, datatype):
self._region = region
self.datatype = basicTypes[datatype]
def getParameterArray(self, paramName):
# return a PyArray instead of a plain array.
# PyArray constructor/class for type X is called XArray()
#factoryName = self.datatype + 'Array'
#if factoryName not in globals():
# import exceptions
# raise exceptions.Exception("Internal error -- did not find %s constructor in engine" % factoryName)
#
#arrayFactory = globals()[factoryName]
#a = arrayFactory();
a = Array(self.datatype)
self._region.getParameterArray(paramName, a)
return a
class Region(LockAttributesMixin):
"""
@doc:place_holder(Region.description)
"""
#Wrapper for a network region
#- Maintains original documentation
#- Implement syntactic sugar properties:
#name = property(getName)
#type = property(getType)
#spec = property(getSpec)
#dimensions = property(getDimensions, setDimensions)
#network = property(getNetwork)
#- Makes sure that returned objects are high-level wrapper objects
#- Forwards everything else to internal region
def __init__(self, region, network):
"""Store the wraped region and hosting network
The network is the high-level Network and not the internal
Network. This is important in case the user requests the network
from the region (never leak a engine object, remember)
"""
self._network = network
self._region = region
self.__class__.__doc__ == region.__class__.__doc__
# A cache for typed get/setPArameter() calls
self._paramTypeCache = {}
def __getattr__(self, name):
if not '_region' in self.__dict__:
raise AttributeError
return getattr(self._region, name)
def __setattr__(self, name, value):
if name in ('_region', '__class__', '_network'):
self.__dict__[name] = value
elif name == 'dimensions':
self.setDimensions(value)
else:
setattr(self._region, name, value)
@staticmethod
def getSpecFromType(nodeType):
"""
@doc:place_holder(Region.getSpecFromType)
"""
return Spec(engine_internal.Region.getSpecFromType(nodeType))
def compute(self):
"""
@doc:place_holder(Region.compute)
** This line comes from the original docstring (not generated by Documentor)
"""
return self._region.compute()
def getInputData(self, inputName):
"""
@doc:place_holder(Region.getInputData)
"""
return self._region.getInputArray(inputName)
def getOutputData(self, outputName):
"""
@doc:place_holder(Region.getOutputData)
"""
return self._region.getOutputArray(outputName)
def getInputNames(self):
"""
Returns list of input names in spec.
"""
inputs = self.getSpec().inputs
return [inputs.getByIndex(i)[0] for i in xrange(inputs.getCount())]
def getOutputNames(self):
"""
Returns list of output names in spec.
"""
outputs = self.getSpec().outputs
return [outputs.getByIndex(i)[0] for i in xrange(outputs.getCount())]
def executeCommand(self, args):
"""
@doc:place_holder(Region.executeCommand)
"""
return self._region.executeCommand(args)
def _getSpec(self):
"""Spec of the region"""
return Spec(self._region.getSpec())
def _getDimensions(self):
"""Dimensions of the region"""
return Dimensions(tuple(self._region.getDimensions()))
def _getNetwork(self):
"""Network for the region"""
return self._network
def __hash__(self):
"""Hash a region"""
return self._region.__hash__()
def __eq__(self, other):
"""Compare regions"""
return self._region == other._region
def __ne__(self, other):
"""Compare regions"""
return self._region != other._region
def _getParameterMethods(self, paramName):
"""Returns functions to set/get the parameter. These are
the strongly typed functions get/setParameterUInt32, etc.
The return value is a pair:
setfunc, getfunc
If the parameter is not available on this region, setfunc/getfunc
are None. """
if paramName in self._paramTypeCache:
return self._paramTypeCache[paramName]
try:
# Catch the error here. We will re-throw in getParameter or
# setParameter with a better error message than we could generate here
paramSpec = self.getSpec().parameters.getByName(paramName)
except:
return (None, None)
dataType = paramSpec.dataType
dataTypeName = basicTypes[dataType]
count = paramSpec.count
if count == 1:
# Dynamically generate the proper typed get/setParameter<dataType>
x = 'etParameter' + dataTypeName
try:
g = getattr(self, 'g' + x) # get the typed getParameter method
s = getattr(self, 's' + x) # get the typed setParameter method
except AttributeError:
raise Exception("Internal error: unknown parameter type %s" %
dataTypeName)
info = (s, g)
else:
if dataTypeName == "Byte":
info = (self.setParameterString, self.getParameterString)
else:
helper = _ArrayParameterHelper(self, dataType)
info = (self.setParameterArray, helper.getParameterArray)
self._paramTypeCache[paramName] = info
return info
def getParameter(self, paramName):
"""Get parameter value"""
(setter, getter) = self._getParameterMethods(paramName)
if getter is None:
import exceptions
raise exceptions.Exception(
"getParameter -- parameter name '%s' does not exist in region %s of type %s"
% (paramName, self.name, self.type))
return getter(paramName)
def setParameter(self, paramName, value):
"""Set parameter value"""
(setter, getter) = self._getParameterMethods(paramName)
if setter is None:
import exceptions
raise exceptions.Exception(
"setParameter -- parameter name '%s' does not exist in region %s of type %s"
% (paramName, self.name, self.type))
setter(paramName, value)
def _get(self, method):
"""Auto forwarding of properties to get methods of internal region"""
return getattr(self._region, method)()
network = property(_getNetwork,
doc='@property:place_holder(Region.getNetwork)')
name = property(functools.partial(_get,
method='getName'),
doc="@property:place_holder(Region.getName)")
type = property(functools.partial(_get,
method='getType'),
doc='@property:place_holder(Region.getType)')
spec = property(_getSpec, doc='@property:place_holder(Region.getSpec)')
dimensions = property(_getDimensions,
engine_internal.Region.setDimensions,
doc='@property:place_holder(Region.getDimensions)')
computeTimer = property(functools.partial(_get,
method='getComputeTimer'),
doc='@property:place_holder(Region.getComputeTimer)')
executeTimer = property(functools.partial(_get,
method='getExecuteTimer'),
doc='@property:place_holder(Region.getExecuteTimer)')
class Network(engine_internal.Network):
"""
@doc:place_holder(Network.description)
"""
def __init__(self, *args):
"""Constructor
- Initialize the internal engine_internal.Network class generated by Swig
- Attach docstrings to selected methods
"""
# Init engine_internal.Network class
engine_internal.Network.__init__(self, *args)
# Prepare documentation table.
# Each item is pair of method/property, docstring
# The docstring is attached later to the method or property.
# The key for method items is the method object of the engine_internal.Network class.
# The key for properties is the property name
docTable = (
(engine_internal.Network.getRegions, 'Get the collection of regions in a network'),
)
# Attach documentation to methods and properties
for obj, docString in docTable:
if isinstance(obj, str):
prop = getattr(Network, obj)
assert isinstance(prop, property)
setattr(Network, obj, property(prop.fget, prop.fset, prop.fdel,
docString))
else:
obj.im_func.__doc__ = docString
def _getRegions(self):
"""Get the collection of regions in a network
This is a tricky one. The collection of regions returned from
from the internal network is a collection of internal regions.
The desired collection is a collelcion of net.Region objects
that also points to this network (net.network) and not to
the internal network. To achieve that a CollectionWrapper
class is used with a custom makeRegion() function (see bellow)
as a value wrapper. The CollectionWrapper class wraps each value in the
original collection with the result of the valueWrapper.
"""
def makeRegion(name, r):
"""Wrap a engine region with a nupic.engine_internal.Region
Also passes the containing nupic.engine_internal.Network network in _network. This
function is passed a value wrapper to the CollectionWrapper
"""
r = Region(r, self)
#r._network = self
return r
regions = CollectionWrapper(engine_internal.Network.getRegions(self), makeRegion)
return regions
def addRegion(self, name, nodeType, nodeParams):
"""
@doc:place_holder(Network.addRegion)
"""
engine_internal.Network.addRegion(self, name, nodeType, nodeParams)
return self._getRegions()[name]
def addRegionFromBundle(self, name, nodeType, dimensions, bundlePath, label):
"""
@doc:place_holder(Network.addRegionFromBundle)
"""
engine_internal.Network.addRegionFromBundle(self, name, nodeType, dimensions,
bundlePath, label)
return self._getRegions()[name]
def setPhases(self, name, phases):
"""
@doc:place_holder(Network.setPhases)
"""
phases = engine_internal.UInt32Set(phases)
engine_internal.Network.setPhases(self, name, phases)
def run(self, n):
"""
@doc:place_holder(Network.run)
"""
#Just forward to the internal network
#This is needed for inspectors to work properly because they wrap some key
#methods such as 'run'.
engine_internal.Network.run(self, n)
def disableProfiling(self, *args, **kwargs):
"""
@doc:place_holder(Network.disableProfiling)
"""
engine_internal.Network.disableProfiling(self, *args, **kwargs)
def enableProfiling(self, *args, **kwargs):
"""
@doc:place_holder(Network.enableProfiling)
"""
engine_internal.Network.enableProfiling(self, *args, **kwargs)
def getCallbacks(self, *args, **kwargs):
"""
@doc:place_holder(Network.getCallbacks)
"""
engine_internal.Network.getCallbacks(self, *args, **kwargs)
def initialize(self, *args, **kwargs):
"""
@doc:place_holder(Network.initialize)
"""
engine_internal.Network.initialize(self, *args, **kwargs)
def link(self, *args, **kwargs):
"""
@doc:place_holder(Network.link)
"""
engine_internal.Network.link(self, *args, **kwargs)
def removeLink(self, *args, **kwargs):
"""
@doc:place_holder(Network.removeLink)
"""
engine_internal.Network.removeLink(self, *args, **kwargs)
def removeRegion(self, *args, **kwargs):
"""
@doc:place_holder(Network.removeRegion)
"""
engine_internal.Network.removeRegion(self, *args, **kwargs)
def resetProfiling(self, *args, **kwargs):
"""
@doc:place_holder(Network.resetProfiling)
"""
engine_internal.Network.resetProfiling(self, *args, **kwargs)
def save(self, *args, **kwargs):
"""
@doc:place_holder(Network.save)
"""
if len(args) > 0 and not isinstance(args[0], str):
raise TypeError("Save path must be of type {}.".format(str))
engine_internal.Network.save(self, *args, **kwargs)
def getRegionsByType(self, regionClass):
"""
Gets all region instances of a given class
(for example, nupic.regions.sp_region.SPRegion).
"""
regions = []
for region in self.regions.values():
if type(region.getSelf()) is regionClass:
regions.append(region)
return regions
@staticmethod
def registerRegion(regionClass):
"""
Adds the module and class name for the region to the list of classes the network can use
regionClass: a pointer to a subclass of PyRegion
"""
engine_internal.Network.registerPyRegion(regionClass.__module__,
regionClass.__name__)
@staticmethod
def unregisterRegion(regionName):
"""
Unregisters a region from the internal list of regions
:param str regionName: The name of the region to unregister
(ex: regionName=regionClass.__name__)
"""
engine_internal.Network.unregisterPyRegion(regionName)
# Syntactic sugar properties
regions = property(_getRegions,
doc='@property:place_holder(Network.getRegions)')
minPhase = property(engine_internal.Network.getMinPhase,
doc='@property:place_holder(Network.getMinPhase)')
maxPhase = property(engine_internal.Network.getMaxPhase,
doc='@property:place_holder(Network.getMaxPhase)')
minEnabledPhase = property(
engine_internal.Network.getMinEnabledPhase,
engine_internal.Network.setMinEnabledPhase,
doc='@property:place_holder(Network.getMinEnabledPhase)')
maxEnabledPhase = property(
engine_internal.Network.getMaxEnabledPhase,
engine_internal.Network.setMaxEnabledPhase,
doc='@property:place_holder(Network.getMaxEnabledPhase)')
if __name__ == '__main__':
n = Network()
print n.regions
print len(n.regions)
print Network.regions.__doc__
d = Dimensions([3, 4, 5])
print len(d)
print d
a = Array('Byte', 5)
print len(a)
for i in range(len(a)):
a[i] = ord('A') + i
for i in range(len(a)):
print a[i]
r = n.addRegion('r', 'TestNode', '')
print 'name:', r.name
print 'node type:', r.type
print 'node spec:', r.spec
| 25,146 | Python | .py | 639 | 33.658842 | 106 | 0.684247 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,908 | backtracking_tm.py | numenta_nupic-legacy/src/nupic/algorithms/backtracking_tm.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Temporal memory implementation.
This is the Python implementation and is used as the base class for the C++
implementation in :class:`~nupic.algorithms.backtracking_tm.BacktrackingTMCPP`.
"""
import copy
import cPickle as pickle
import itertools
try:
import capnp
except ImportError:
capnp = None
import numpy
if capnp:
from nupic.algorithms.backtracking_tm_capnp import (
SegmentProto, SegmentUpdateProto, BacktrackingTMProto)
from nupic.bindings.math import Random
from nupic.bindings.algorithms import getSegmentActivityLevel, isSegmentActive
from nupic.math import GetNTAReal
from nupic.serializable import Serializable
from nupic.support.console_printer import ConsolePrinterMixin
# Default verbosity while running unit tests
VERBOSITY = 0
# The current TM version used to track the checkpoint state.
TM_VERSION = 1
# The numpy equivalent to the floating point type used by NTA
dtype = GetNTAReal()
class BacktrackingTM(ConsolePrinterMixin, Serializable):
"""
Class implementing the temporal memory algorithm as described in
`BAMI <https://numenta.com/biological-and-machine-intelligence/>`_. The
implementation here attempts to closely match the pseudocode in the
documentation. This implementation does contain several additional bells and
whistles such as a column confidence measure.
:param numberOfCols: (int) Number of mini-columns in the region. This values
needs to be the same as the number of columns in the SP, if one is
used.
:param cellsPerColumn: (int) The number of cells per mini-column.
:param initialPerm: (float) Initial permanence for newly created synapses.
:param connectedPerm: TODO: document
:param minThreshold: (int) Minimum number of active synapses for a segment to
be considered during search for the best-matching segments.
:param newSynapseCount: (int) The max number of synapses added to a segment
during learning.
:param permanenceInc: (float) Active synapses get their permanence counts
incremented by this value.
:param permanenceDec: (float) All other synapses get their permanence counts
decremented by this value.
:param permanenceMax: TODO: document
:param maxAge: (int) Number of iterations before global decay takes effect.
Also the global decay execution interval. After global decay starts, it
will will run again every ``maxAge`` iterations. If ``maxAge==1``,
global decay is applied to every iteration to every segment.
.. note:: Using ``maxAge > 1`` can significantly speed up the TM when
global decay is used.
:param globalDecay: (float) Value to decrease permanences when the global
decay process runs. Global decay will remove synapses if their
permanence value reaches 0. It will also remove segments when they no
longer have synapses.
.. note:: Global decay is applied after ``maxAge`` iterations, after
which it will run every ``maxAge`` iterations.
:param activationThreshold: (int) Number of synapses that must be active to
activate a segment.
:param doPooling: (bool) If True, pooling is enabled. False is the default.
:param segUpdateValidDuration: TODO: document
:param burnIn: (int) Used for evaluating the prediction score. Default is 2.
:param collectStats: (bool) If True, collect training / inference stats.
Default is False.
:param seed: (int) Random number generator seed. The seed affects the random
aspects of initialization like the initial permanence values. A fixed
value ensures a reproducible result.
:param verbosity: (int) Controls the verbosity of the TM diagnostic output:
- verbosity == 0: silent
- verbosity in [1..6]: increasing levels of verbosity
:param pamLength: (int) Number of time steps to remain in "Pay Attention Mode"
after we detect we've reached the end of a learned sequence. Setting
this to 0 disables PAM mode. When we are in PAM mode, we do not burst
unpredicted columns during learning, which in turn prevents us from
falling into a previously learned sequence for a while (until we run
through another 'pamLength' steps).
The advantage of PAM mode is that it requires fewer presentations to
learn a set of sequences which share elements. The disadvantage of PAM
mode is that if a learned sequence is immediately followed by set set
of elements that should be learned as a 2nd sequence, the first
``pamLength`` elements of that sequence will not be learned as part of
that 2nd sequence.
:param maxInfBacktrack: (int) How many previous inputs to keep in a buffer for
inference backtracking.
:param maxLrnBacktrack: (int) How many previous inputs to keep in a buffer for
learning backtracking.
:param maxSeqLength: (int) If not 0, we will never learn more than
``maxSeqLength`` inputs in a row without starting over at start cells.
This sets an upper bound on the length of learned sequences and thus is
another means (besides ``maxAge`` and ``globalDecay``) by which to
limit how much the TM tries to learn.
:param maxSegmentsPerCell: (int) The maximum number of segments allowed on a
cell. This is used to turn on "fixed size CLA" mode. When in effect,
``globalDecay`` is not applicable and must be set to 0 and ``maxAge``
must be set to 0. When this is used (> 0), ``maxSynapsesPerSegment``
must also be > 0.
:param maxSynapsesPerSegment: (int) The maximum number of synapses allowed in
a segment. This is used to turn on "fixed size CLA" mode. When in
effect, ``globalDecay`` is not applicable and must be set to 0, and
``maxAge`` must be set to 0. When this is used (> 0),
``maxSegmentsPerCell`` must also be > 0.
:param outputType: (string) Can be one of the following (default ``normal``):
- ``normal``: output the OR of the active and predicted state.
- ``activeState``: output only the active state.
- ``activeState1CellPerCol``: output only the active state, and at most
1 cell/column. If more than 1 cell is active in a column, the one
with the highest confidence is sent up.
"""
def __init__(self,
numberOfCols=500,
cellsPerColumn=10,
initialPerm=0.11,
connectedPerm=0.50,
minThreshold=8,
newSynapseCount=15,
permanenceInc=0.10,
permanenceDec=0.10,
permanenceMax=1.0,
globalDecay=0.10,
activationThreshold=12,
doPooling=False,
segUpdateValidDuration=5,
burnIn=2,
collectStats=False,
seed=42,
verbosity=VERBOSITY,
checkSynapseConsistency=False, # for cpp only -- ignored
pamLength=1,
maxInfBacktrack=10,
maxLrnBacktrack=5,
maxAge=100000,
maxSeqLength=32,
maxSegmentsPerCell=-1,
maxSynapsesPerSegment=-1,
outputType='normal',
):
ConsolePrinterMixin.__init__(self, verbosity)
# Check arguments
assert pamLength > 0, "This implementation must have pamLength > 0"
# Fixed size CLA mode?
if maxSegmentsPerCell != -1 or maxSynapsesPerSegment != -1:
assert (maxSegmentsPerCell > 0 and maxSynapsesPerSegment > 0)
assert (globalDecay == 0.0)
assert (maxAge == 0)
assert maxSynapsesPerSegment >= newSynapseCount, ("TM requires that "
"maxSynapsesPerSegment >= newSynapseCount. (Currently %s >= %s)" % (
maxSynapsesPerSegment, newSynapseCount))
# Seed random number generator
if seed >= 0:
self._random = Random(seed)
else:
self._random = Random(numpy.random.randint(256))
# Store creation parameters
self.numberOfCols = numberOfCols
self.cellsPerColumn = cellsPerColumn
self._numberOfCells = numberOfCols * cellsPerColumn
self.initialPerm = numpy.float32(initialPerm)
self.connectedPerm = numpy.float32(connectedPerm)
self.minThreshold = minThreshold
self.newSynapseCount = newSynapseCount
self.permanenceInc = numpy.float32(permanenceInc)
self.permanenceDec = numpy.float32(permanenceDec)
self.permanenceMax = numpy.float32(permanenceMax)
self.globalDecay = numpy.float32(globalDecay)
self.activationThreshold = activationThreshold
## Allows to turn off pooling
self.doPooling = doPooling
self.segUpdateValidDuration = segUpdateValidDuration
## Used for evaluating the prediction score
self.burnIn = burnIn
## If true, collect training/inference stats
self.collectStats = collectStats
self.verbosity = verbosity
self.pamLength = pamLength
self.maxAge = maxAge
self.maxInfBacktrack = maxInfBacktrack
self.maxLrnBacktrack = maxLrnBacktrack
self.maxSeqLength = maxSeqLength
self.maxSegmentsPerCell = maxSegmentsPerCell
self.maxSynapsesPerSegment = maxSynapsesPerSegment
assert outputType in ('normal', 'activeState', 'activeState1CellPerCol')
self.outputType = outputType
# No point having larger expiration if we are not doing pooling
if not doPooling:
self.segUpdateValidDuration = 1
# Create data structures
self.activeColumns = [] # list of indices of active columns
## Cells are indexed by column and index in the column
# Every self.cells[column][index] contains a list of segments
# Each segment is a structure of class Segment
self.cells = []
for c in xrange(self.numberOfCols):
self.cells.append([])
for _ in xrange(self.cellsPerColumn):
self.cells[c].append([])
self.lrnIterationIdx = 0
self.iterationIdx = 0
## unique segment id, so we can put segments in hashes
self.segID = 0
self.currentOutput = None # for checkPrediction
## pamCounter gets reset to pamLength whenever we detect that the learning
# state is making good predictions (at least half the columns predicted).
# Whenever we do not make a good prediction, we decrement pamCounter.
# When pamCounter reaches 0, we start the learn state over again at start
# cells.
self.pamCounter = self.pamLength
## If True, the TM will compute a signature for each sequence
self.collectSequenceStats = False
## This gets set when we receive a reset and cleared on the first compute
# following a reset.
self.resetCalled = False
## We keep track of the average input density here
self.avgInputDensity = None
## Keeps track of the length of the sequence currently being learned.
self.learnedSeqLength = 0
## Keeps track of the moving average of all learned sequence length.
self.avgLearnedSeqLength = 0.0
# Set attributes intialized later on.
self._prevLrnPatterns = None
self._prevInfPatterns = None
self.segmentUpdates = None
# Set attributes that are initialized in _initEphemerals.
self._stats = None
self.cellConfidence = None
self.colConfidence = None
self.lrnActiveState = None
self.infActiveState = None
self.lrnPredictedState = None
self.infPredictedState = None
self._internalStats = None
# All other members are ephemeral - don't need to be saved when we save
# state. So they get separated out into _initEphemerals, which also
# gets called when we are being restored from a saved state (via
# __setstate__)
self._initEphemerals()
def _getEphemeralMembers(self):
"""
List of our member variables that we don't need to be saved.
"""
return []
def _initEphemerals(self):
"""
Initialize all ephemeral members after being restored to a pickled state.
"""
## We store the lists of segments updates, per cell, so that they can be
# applied later during learning, when the cell gets bottom-up activation.
# We store one list per cell. The lists are identified with a hash key which
# is a tuple (column index, cell index).
self.segmentUpdates = {}
# Allocate and reset all stats
self.resetStats()
# NOTE: We don't use the same backtrack buffer for inference and learning
# because learning has a different metric for determining if an input from
# the past is potentially useful again for backtracking.
#
# Our inference backtrack buffer. This keeps track of up to
# maxInfBacktrack of previous input. Each entry is a list of active column
# inputs.
self._prevInfPatterns = []
# Our learning backtrack buffer. This keeps track of up to maxLrnBacktrack
# of previous input. Each entry is a list of active column inputs
self._prevLrnPatterns = []
# Keep integers rather than bools. Float?
stateShape = (self.numberOfCols, self.cellsPerColumn)
self.lrnActiveState = {}
self.lrnActiveState["t"] = numpy.zeros(stateShape, dtype="int8")
self.lrnActiveState["t-1"] = numpy.zeros(stateShape, dtype="int8")
self.lrnPredictedState = {}
self.lrnPredictedState["t"] = numpy.zeros(stateShape, dtype="int8")
self.lrnPredictedState["t-1"] = numpy.zeros(stateShape, dtype="int8")
self.infActiveState = {}
self.infActiveState["t"] = numpy.zeros(stateShape, dtype="int8")
self.infActiveState["t-1"] = numpy.zeros(stateShape, dtype="int8")
self.infActiveState["backup"] = numpy.zeros(stateShape, dtype="int8")
self.infActiveState["candidate"] = numpy.zeros(stateShape, dtype="int8")
self.infPredictedState = {}
self.infPredictedState["t"] = numpy.zeros(stateShape, dtype="int8")
self.infPredictedState["t-1"] = numpy.zeros(stateShape, dtype="int8")
self.infPredictedState["backup"] = numpy.zeros(stateShape, dtype="int8")
self.infPredictedState["candidate"] = numpy.zeros(stateShape, dtype="int8")
self.cellConfidence = {}
self.cellConfidence["t"] = numpy.zeros(stateShape, dtype="float32")
self.cellConfidence["t-1"] = numpy.zeros(stateShape, dtype="float32")
self.cellConfidence["candidate"] = numpy.zeros(stateShape, dtype="float32")
self.colConfidence = {}
self.colConfidence["t"] = numpy.zeros(self.numberOfCols, dtype="float32")
self.colConfidence["t-1"] = numpy.zeros(self.numberOfCols, dtype="float32")
self.colConfidence["candidate"] = numpy.zeros(self.numberOfCols,
dtype="float32")
def __getstate__(self):
""" @internal
Return serializable state. This function will return a version of the
__dict__ with all "ephemeral" members stripped out. "Ephemeral" members
are defined as those that do not need to be (nor should be) stored
in any kind of persistent file (e.g., NuPIC network XML file.)
"""
state = self.__dict__.copy()
for ephemeralMemberName in self._getEphemeralMembers():
state.pop(ephemeralMemberName, None)
state['_random'] = self._getRandomState()
state['version'] = TM_VERSION
return state
def __setstate__(self, state):
""" @internal
Set the state of ourself from a serialized state.
"""
self._setRandomState(state['_random'])
del state['_random']
version = state.pop('version')
assert version == TM_VERSION
self.__dict__.update(state)
@staticmethod
def getSchema():
return BacktrackingTMProto
def write(self, proto):
"""Populate serialization proto instance.
:param proto: (BacktrackingTMProto) the proto instance to populate
"""
proto.version = TM_VERSION
self._random.write(proto.random)
proto.numberOfCols = self.numberOfCols
proto.cellsPerColumn = self.cellsPerColumn
proto.initialPerm = float(self.initialPerm)
proto.connectedPerm = float(self.connectedPerm)
proto.minThreshold = self.minThreshold
proto.newSynapseCount = self.newSynapseCount
proto.permanenceInc = float(self.permanenceInc)
proto.permanenceDec = float(self.permanenceDec)
proto.permanenceMax = float(self.permanenceMax)
proto.globalDecay = float(self.globalDecay)
proto.activationThreshold = self.activationThreshold
proto.doPooling = self.doPooling
proto.segUpdateValidDuration = self.segUpdateValidDuration
proto.burnIn = self.burnIn
proto.collectStats = self.collectStats
proto.verbosity = self.verbosity
proto.pamLength = self.pamLength
proto.maxAge = self.maxAge
proto.maxInfBacktrack = self.maxInfBacktrack
proto.maxLrnBacktrack = self.maxLrnBacktrack
proto.maxSeqLength = self.maxSeqLength
proto.maxSegmentsPerCell = self.maxSegmentsPerCell
proto.maxSynapsesPerSegment = self.maxSynapsesPerSegment
proto.outputType = self.outputType
proto.activeColumns = self.activeColumns
cellListProto = proto.init("cells", len(self.cells))
for i, columnSegments in enumerate(self.cells):
columnSegmentsProto = cellListProto.init(i, len(columnSegments))
for j, cellSegments in enumerate(columnSegments):
cellSegmentsProto = columnSegmentsProto.init(j, len(cellSegments))
for k, segment in enumerate(cellSegments):
segment.write(cellSegmentsProto[k])
proto.lrnIterationIdx = self.lrnIterationIdx
proto.iterationIdx = self.iterationIdx
proto.segID = self.segID
if self.currentOutput is None:
proto.currentOutput.none = None
else:
proto.currentOutput.list = self.currentOutput.tolist()
proto.pamCounter = self.pamCounter
proto.collectSequenceStats = self.collectSequenceStats
proto.resetCalled = self.resetCalled
# In case of None, use negative value as placeholder for serialization
proto.avgInputDensity = self.avgInputDensity or -1.0
proto.learnedSeqLength = self.learnedSeqLength
proto.avgLearnedSeqLength = self.avgLearnedSeqLength
proto.prevLrnPatterns = self._prevLrnPatterns
proto.prevInfPatterns = self._prevInfPatterns
segmentUpdatesListProto = proto.init("segmentUpdates",
len(self.segmentUpdates))
for i, (key, updates) in enumerate(self.segmentUpdates.iteritems()):
cellSegmentUpdatesProto = segmentUpdatesListProto[i]
cellSegmentUpdatesProto.columnIdx = key[0]
cellSegmentUpdatesProto.cellIdx = key[1]
segmentUpdatesProto = cellSegmentUpdatesProto.init("segmentUpdates",
len(updates))
for j, (lrnIterationIdx, segmentUpdate) in enumerate(updates):
segmentUpdateWrapperProto = segmentUpdatesProto[j]
segmentUpdateWrapperProto.lrnIterationIdx = lrnIterationIdx
segmentUpdate.write(segmentUpdateWrapperProto.segmentUpdate)
# self.cellConfidence
proto.cellConfidenceT = self.cellConfidence["t"].tolist()
proto.cellConfidenceT1 = self.cellConfidence["t-1"].tolist()
proto.cellConfidenceCandidate = self.cellConfidence["candidate"].tolist()
# self.colConfidence
proto.colConfidenceT = self.colConfidence["t"].tolist()
proto.colConfidenceT1 = self.colConfidence["t-1"].tolist()
proto.colConfidenceCandidate = self.colConfidence["candidate"].tolist()
# self.lrnActiveState
proto.lrnActiveStateT = self.lrnActiveState["t"].tolist()
proto.lrnActiveStateT1 = self.lrnActiveState["t-1"].tolist()
# self.infActiveState
proto.infActiveStateT = self.infActiveState["t"].tolist()
proto.infActiveStateT1 = self.infActiveState["t-1"].tolist()
proto.infActiveStateBackup = self.infActiveState["backup"].tolist()
proto.infActiveStateCandidate = self.infActiveState["candidate"].tolist()
# self.lrnPredictedState
proto.lrnPredictedStateT = self.lrnPredictedState["t"].tolist()
proto.lrnPredictedStateT1 = self.lrnPredictedState["t-1"].tolist()
# self.infPredictedState
proto.infPredictedStateT = self.infPredictedState["t"].tolist()
proto.infPredictedStateT1 = self.infPredictedState["t-1"].tolist()
proto.infPredictedStateBackup = self.infPredictedState["backup"].tolist()
proto.infPredictedStateCandidate = self.infPredictedState["candidate"].tolist()
proto.consolePrinterVerbosity = self.consolePrinterVerbosity
@classmethod
def read(cls, proto):
"""Deserialize from proto instance.
:param proto: (BacktrackingTMProto) the proto instance to read from
"""
assert proto.version == TM_VERSION
obj = object.__new__(cls)
obj._random = Random()
obj._random.read(proto.random)
obj.numberOfCols = int(proto.numberOfCols)
obj.cellsPerColumn = int(proto.cellsPerColumn)
obj._numberOfCells = obj.numberOfCols * obj.cellsPerColumn
obj.initialPerm = numpy.float32(proto.initialPerm)
obj.connectedPerm = numpy.float32(proto.connectedPerm)
obj.minThreshold = int(proto.minThreshold)
obj.newSynapseCount = int(proto.newSynapseCount)
obj.permanenceInc = numpy.float32(proto.permanenceInc)
obj.permanenceDec = numpy.float32(proto.permanenceDec)
obj.permanenceMax = numpy.float32(proto.permanenceMax)
obj.globalDecay = numpy.float32(proto.globalDecay)
obj.activationThreshold = int(proto.activationThreshold)
obj.doPooling = proto.doPooling
obj.segUpdateValidDuration = int(proto.segUpdateValidDuration)
obj.burnIn = int(proto.burnIn)
obj.collectStats = proto.collectStats
obj.verbosity = int(proto.verbosity)
obj.pamLength = int(proto.pamLength)
obj.maxAge = int(proto.maxAge)
obj.maxInfBacktrack = int(proto.maxInfBacktrack)
obj.maxLrnBacktrack = int(proto.maxLrnBacktrack)
obj.maxSeqLength = int(proto.maxSeqLength)
obj.maxSegmentsPerCell = proto.maxSegmentsPerCell
obj.maxSynapsesPerSegment = proto.maxSynapsesPerSegment
obj.outputType = proto.outputType
obj.activeColumns = [int(col) for col in proto.activeColumns]
obj.cells = [[] for _ in xrange(len(proto.cells))]
for columnSegments, columnSegmentsProto in zip(obj.cells, proto.cells):
columnSegments.extend([[] for _ in xrange(len(columnSegmentsProto))])
for cellSegments, cellSegmentsProto in zip(columnSegments,
columnSegmentsProto):
for segmentProto in cellSegmentsProto:
segment = Segment.read(segmentProto, obj)
cellSegments.append(segment)
obj.lrnIterationIdx = int(proto.lrnIterationIdx)
obj.iterationIdx = int(proto.iterationIdx)
obj.segID = int(proto.segID)
obj.pamCounter = int(proto.pamCounter)
obj.collectSequenceStats = proto.collectSequenceStats
obj.resetCalled = proto.resetCalled
avgInputDensity = proto.avgInputDensity
if avgInputDensity < 0.0:
# Negative value placeholder indicates None
obj.avgInputDensity = None
else:
obj.avgInputDensity = avgInputDensity
obj.learnedSeqLength = int(proto.learnedSeqLength)
obj.avgLearnedSeqLength = proto.avgLearnedSeqLength
# Initialize various structures
obj._initEphemerals()
if proto.currentOutput.which() == "none":
obj.currentOutput = None
else:
obj.currentOutput = numpy.array(proto.currentOutput.list,
dtype='float32')
for pattern in proto.prevLrnPatterns:
obj.prevLrnPatterns.append([v for v in pattern])
for pattern in proto.prevInfPatterns:
obj.prevInfPatterns.append([v for v in pattern])
for cellWrapperProto in proto.segmentUpdates:
key = (cellWrapperProto.columnIdx, cellWrapperProto.cellIdx)
value = []
for updateWrapperProto in cellWrapperProto.segmentUpdates:
segmentUpdate = SegmentUpdate.read(updateWrapperProto.segmentUpdate, obj)
value.append((int(updateWrapperProto.lrnIterationIdx), segmentUpdate))
obj.segmentUpdates[key] = value
# cellConfidence
numpy.copyto(obj.cellConfidence["t"], proto.cellConfidenceT)
numpy.copyto(obj.cellConfidence["t-1"], proto.cellConfidenceT1)
numpy.copyto(obj.cellConfidence["candidate"],
proto.cellConfidenceCandidate)
# colConfidence
numpy.copyto(obj.colConfidence["t"], proto.colConfidenceT)
numpy.copyto(obj.colConfidence["t-1"], proto.colConfidenceT1)
numpy.copyto(obj.colConfidence["candidate"], proto.colConfidenceCandidate)
# lrnActiveState
numpy.copyto(obj.lrnActiveState["t"], proto.lrnActiveStateT)
numpy.copyto(obj.lrnActiveState["t-1"], proto.lrnActiveStateT1)
# infActiveState
numpy.copyto(obj.infActiveState["t"], proto.infActiveStateT)
numpy.copyto(obj.infActiveState["t-1"], proto.infActiveStateT1)
numpy.copyto(obj.infActiveState["backup"], proto.infActiveStateBackup)
numpy.copyto(obj.infActiveState["candidate"],
proto.infActiveStateCandidate)
# lrnPredictedState
numpy.copyto(obj.lrnPredictedState["t"], proto.lrnPredictedStateT)
numpy.copyto(obj.lrnPredictedState["t-1"], proto.lrnPredictedStateT1)
# infPredictedState
numpy.copyto(obj.infPredictedState["t"], proto.infPredictedStateT)
numpy.copyto(obj.infPredictedState["t-1"], proto.infPredictedStateT1)
numpy.copyto(obj.infPredictedState["backup"],
proto.infPredictedStateBackup)
numpy.copyto(obj.infPredictedState["candidate"],
proto.infPredictedStateCandidate)
obj.consolePrinterVerbosity = int(proto.consolePrinterVerbosity)
return obj
def __getattr__(self, name):
""" @internal
Patch __getattr__ so that we can catch the first access to 'cells' and load.
This function is only called when we try to access an attribute that doesn't
exist. We purposely make sure that "self.cells" doesn't exist after
unpickling so that we'll hit this, then we can load it on the first access.
If this is called at any other time, it will raise an AttributeError.
That's because:
- If 'name' is "cells", after the first call, self._realCells won't exist
so we'll get an implicit AttributeError.
- If 'name' isn't "cells", I'd expect our super wouldn't have __getattr__,
so we'll raise our own Attribute error. If the super did get __getattr__,
we'll just return what it gives us.
"""
try:
return super(BacktrackingTM, self).__getattr__(name)
except AttributeError:
raise AttributeError("'TM' object has no attribute '%s'" % name)
def __del__(self):
pass
def __ne__(self, tm):
return not self == tm
def __eq__(self, tm):
return not self.diff(tm)
def diff(self, tm):
diff = []
toCheck = [((), self.__getstate__(), tm.__getstate__())]
while toCheck:
keys, a, b = toCheck.pop()
if type(a) != type(b):
diff.append((keys, a, b))
elif isinstance(a, dict):
keys1 = set(a.keys())
keys2 = set(b.keys())
# If there are missing keys, add them to the diff.
if keys1 != keys2:
for k in keys1 - keys2:
diff.append((keys + (k,), a[k], None))
for k in keys2 - keys1:
diff.append((keys + (k,), None, b[k]))
# For matching keys, add the values to the list of things to check
for k in keys1.intersection(keys2):
toCheck.append((keys + (k,), a[k], b[k]))
elif isinstance(a, list) or isinstance(a, tuple):
if len(a) != len(b):
diff.append((keys + ('len',), len(a), len(b)))
else:
for i in xrange(len(a)):
toCheck.append((keys + (i,), a[i], b[i]))
elif isinstance(a, numpy.ndarray):
if len(a) != len(b):
diff.append((keys + ('len',), len(a), len(b)))
elif not numpy.array_equal(a, b):
diff.append((keys, a, b))
elif isinstance(a, Random):
if a.getState() != b.getState():
diff.append((keys, a.getState(), b.getState()))
elif (a.__class__.__name__ == 'Cells4' and
b.__class__.__name__ == 'Cells4'):
continue
else:
try:
_ = a != b
except ValueError:
raise ValueError(type(a))
if a != b:
diff.append((keys, a, b))
return diff
def getLearnActiveStateT(self):
return self.lrnActiveState['t']
def saveToFile(self, filePath):
"""
Implemented in
:meth:`nupic.algorithms.backtracking_tm_cpp.BacktrackingTMCPP.saveToFile`.
"""
pass
def loadFromFile(self, filePath):
"""
Implemented in
:meth:`nupic.algorithms.backtracking_tm_cpp.BacktrackingTMCPP.loadFromFile`.
"""
pass
def _getRandomState(self):
""" @internal
Return the random number state.
This is used during unit testing to generate repeatable results.
"""
return pickle.dumps(self._random)
def _setRandomState(self, state):
""" @internal Set the random number state.
This is used during unit testing to generate repeatable results.
"""
self._random = pickle.loads(state)
def reset(self,):
"""
Reset the state of all cells.
This is normally used between sequences while training. All internal states
are reset to 0.
"""
if self.verbosity >= 3:
print "\n==== RESET ====="
self.lrnActiveState['t-1'].fill(0)
self.lrnActiveState['t'].fill(0)
self.lrnPredictedState['t-1'].fill(0)
self.lrnPredictedState['t'].fill(0)
self.infActiveState['t-1'].fill(0)
self.infActiveState['t'].fill(0)
self.infPredictedState['t-1'].fill(0)
self.infPredictedState['t'].fill(0)
self.cellConfidence['t-1'].fill(0)
self.cellConfidence['t'].fill(0)
# Flush the segment update queue
self.segmentUpdates = {}
self._internalStats['nInfersSinceReset'] = 0
#To be removed
self._internalStats['curPredictionScore'] = 0
#New prediction score
self._internalStats['curPredictionScore2'] = 0
self._internalStats['curFalseNegativeScore'] = 0
self._internalStats['curFalsePositiveScore'] = 0
self._internalStats['curMissing'] = 0
self._internalStats['curExtra'] = 0
# When a reset occurs, set prevSequenceSignature to the signature of the
# just-completed sequence and start accumulating histogram for the next
# sequence.
self._internalStats['prevSequenceSignature'] = None
if self.collectSequenceStats:
if self._internalStats['confHistogram'].sum() > 0:
sig = self._internalStats['confHistogram'].copy()
sig.reshape(self.numberOfCols * self.cellsPerColumn)
self._internalStats['prevSequenceSignature'] = sig
self._internalStats['confHistogram'].fill(0)
self.resetCalled = True
# Clear out input history
self._prevInfPatterns = []
self._prevLrnPatterns = []
def resetStats(self):
"""
Reset the learning and inference stats. This will usually be called by
user code at the start of each inference run (for a particular data set).
"""
self._stats = dict()
self._internalStats = dict()
self._internalStats['nInfersSinceReset'] = 0
self._internalStats['nPredictions'] = 0
#New prediction score
self._internalStats['curPredictionScore'] = 0
self._internalStats['curPredictionScore2'] = 0
self._internalStats['predictionScoreTotal2'] = 0
self._internalStats['curFalseNegativeScore'] = 0
self._internalStats['falseNegativeScoreTotal'] = 0
self._internalStats['curFalsePositiveScore'] = 0
self._internalStats['falsePositiveScoreTotal'] = 0
self._internalStats['pctExtraTotal'] = 0
self._internalStats['pctMissingTotal'] = 0
self._internalStats['curMissing'] = 0
self._internalStats['curExtra'] = 0
self._internalStats['totalMissing'] = 0
self._internalStats['totalExtra'] = 0
# Sequence signature statistics. Note that we don't reset the sequence
# signature list itself.
self._internalStats['prevSequenceSignature'] = None
if self.collectSequenceStats:
self._internalStats['confHistogram'] = (
numpy.zeros((self.numberOfCols, self.cellsPerColumn),
dtype="float32"))
def getStats(self):
"""
Return the current learning and inference stats. This returns a dict
containing all the learning and inference stats we have collected since the
last :meth:`resetStats` call. If :class:`BacktrackingTM` ``collectStats``
parameter is False, then None is returned.
:returns: (dict) The following keys are returned in the dict when
``collectStats`` is True:
- ``nPredictions``: the number of predictions. This is the total
number of inferences excluding burn-in and the last inference.
- ``curPredictionScore``: the score for predicting the current input
(predicted during the previous inference)
- ``curMissing``: the number of bits in the current input that were
not predicted to be on.
- ``curExtra``: the number of bits in the predicted output that are
not in the next input
- ``predictionScoreTotal``: the sum of every prediction score to date
- ``predictionScoreAvg``: ``predictionScoreTotal / nPredictions``
- ``pctMissingTotal``: the total number of bits that were missed over
all predictions
- ``pctMissingAvg``: ``pctMissingTotal / nPredictions``
- ``prevSequenceSignature``: signature for the sequence immediately
preceding the last reset. 'None' if ``collectSequenceStats`` is
False.
"""
if not self.collectStats:
return None
self._stats['nPredictions'] = self._internalStats['nPredictions']
self._stats['curMissing'] = self._internalStats['curMissing']
self._stats['curExtra'] = self._internalStats['curExtra']
self._stats['totalMissing'] = self._internalStats['totalMissing']
self._stats['totalExtra'] = self._internalStats['totalExtra']
nPredictions = max(1, self._stats['nPredictions'])
# New prediction score
self._stats['curPredictionScore2'] = (
self._internalStats['curPredictionScore2'])
self._stats['predictionScoreAvg2'] = (
self._internalStats['predictionScoreTotal2'] / nPredictions)
self._stats['curFalseNegativeScore'] = (
self._internalStats['curFalseNegativeScore'])
self._stats['falseNegativeAvg'] = (
self._internalStats['falseNegativeScoreTotal'] / nPredictions)
self._stats['curFalsePositiveScore'] = (
self._internalStats['curFalsePositiveScore'])
self._stats['falsePositiveAvg'] = (
self._internalStats['falsePositiveScoreTotal'] / nPredictions)
self._stats['pctExtraAvg'] = (self._internalStats['pctExtraTotal'] /
nPredictions)
self._stats['pctMissingAvg'] = (self._internalStats['pctMissingTotal'] /
nPredictions)
# This will be None if collectSequenceStats is False
self._stats['prevSequenceSignature'] = (
self._internalStats['prevSequenceSignature'])
return self._stats
def _updateStatsInferEnd(self, stats, bottomUpNZ, predictedState,
colConfidence):
"""
Called at the end of learning and inference, this routine will update
a number of stats in our _internalStats dictionary, including our computed
prediction score.
:param stats internal stats dictionary
:param bottomUpNZ list of the active bottom-up inputs
:param predictedState The columns we predicted on the last time step (should
match the current bottomUpNZ in the best case)
:param colConfidence Column confidences we determined on the last time step
"""
# Return if not collecting stats
if not self.collectStats:
return
stats['nInfersSinceReset'] += 1
# Compute the prediction score, how well the prediction from the last
# time step predicted the current bottom-up input
(numExtra2, numMissing2, confidences2) = self._checkPrediction(
patternNZs=[bottomUpNZ], output=predictedState,
colConfidence=colConfidence)
predictionScore, positivePredictionScore, negativePredictionScore = (
confidences2[0])
# Store the stats that don't depend on burn-in
stats['curPredictionScore2'] = float(predictionScore)
stats['curFalseNegativeScore'] = 1.0 - float(positivePredictionScore)
stats['curFalsePositiveScore'] = float(negativePredictionScore)
stats['curMissing'] = numMissing2
stats['curExtra'] = numExtra2
# If we are passed the burn-in period, update the accumulated stats
# Here's what various burn-in values mean:
# 0: try to predict the first element of each sequence and all subsequent
# 1: try to predict the second element of each sequence and all subsequent
# etc.
if stats['nInfersSinceReset'] <= self.burnIn:
return
# Burn-in related stats
stats['nPredictions'] += 1
numExpected = max(1.0, float(len(bottomUpNZ)))
stats['totalMissing'] += numMissing2
stats['totalExtra'] += numExtra2
stats['pctExtraTotal'] += 100.0 * numExtra2 / numExpected
stats['pctMissingTotal'] += 100.0 * numMissing2 / numExpected
stats['predictionScoreTotal2'] += float(predictionScore)
stats['falseNegativeScoreTotal'] += 1.0 - float(positivePredictionScore)
stats['falsePositiveScoreTotal'] += float(negativePredictionScore)
if self.collectSequenceStats:
# Collect cell confidences for every cell that correctly predicted current
# bottom up input. Normalize confidence across each column
cc = self.cellConfidence['t-1'] * self.infActiveState['t']
sconf = cc.sum(axis=1)
for c in range(self.numberOfCols):
if sconf[c] > 0:
cc[c, :] /= sconf[c]
# Update cell confidence histogram: add column-normalized confidence
# scores to the histogram
self._internalStats['confHistogram'] += cc
def printState(self, aState):
"""
Print an integer array that is the same shape as activeState.
:param aState: TODO: document
"""
def formatRow(var, i):
s = ''
for c in range(self.numberOfCols):
if c > 0 and c % 10 == 0:
s += ' '
s += str(var[c, i])
s += ' '
return s
for i in xrange(self.cellsPerColumn):
print formatRow(aState, i)
def printConfidence(self, aState, maxCols = 20):
"""
Print a floating point array that is the same shape as activeState.
:param aState: TODO: document
:param maxCols: TODO: document
"""
def formatFPRow(var, i):
s = ''
for c in range(min(maxCols, self.numberOfCols)):
if c > 0 and c % 10 == 0:
s += ' '
s += ' %5.3f' % var[c, i]
s += ' '
return s
for i in xrange(self.cellsPerColumn):
print formatFPRow(aState, i)
def printColConfidence(self, aState, maxCols = 20):
"""
Print up to maxCols number from a flat floating point array.
:param aState: TODO: document
:param maxCols: TODO: document
"""
def formatFPRow(var):
s = ''
for c in range(min(maxCols, self.numberOfCols)):
if c > 0 and c % 10 == 0:
s += ' '
s += ' %5.3f' % var[c]
s += ' '
return s
print formatFPRow(aState)
def printStates(self, printPrevious = True, printLearnState = True):
"""
TODO: document
:param printPrevious:
:param printLearnState:
:return:
"""
def formatRow(var, i):
s = ''
for c in range(self.numberOfCols):
if c > 0 and c % 10 == 0:
s += ' '
s += str(var[c, i])
s += ' '
return s
print "\nInference Active state"
for i in xrange(self.cellsPerColumn):
if printPrevious:
print formatRow(self.infActiveState['t-1'], i),
print formatRow(self.infActiveState['t'], i)
print "Inference Predicted state"
for i in xrange(self.cellsPerColumn):
if printPrevious:
print formatRow(self.infPredictedState['t-1'], i),
print formatRow(self.infPredictedState['t'], i)
if printLearnState:
print "\nLearn Active state"
for i in xrange(self.cellsPerColumn):
if printPrevious:
print formatRow(self.lrnActiveState['t-1'], i),
print formatRow(self.lrnActiveState['t'], i)
print "Learn Predicted state"
for i in xrange(self.cellsPerColumn):
if printPrevious:
print formatRow(self.lrnPredictedState['t-1'], i),
print formatRow(self.lrnPredictedState['t'], i)
def printOutput(self, y):
"""
TODO: document
:param y:
:return:
"""
print "Output"
for i in xrange(self.cellsPerColumn):
for c in xrange(self.numberOfCols):
print int(y[c, i]),
print
def printInput(self, x):
"""
TODO: document
:param x:
:return:
"""
print "Input"
for c in xrange(self.numberOfCols):
print int(x[c]),
print
def printParameters(self):
"""
Print the parameter settings for the TM.
"""
print "numberOfCols=", self.numberOfCols
print "cellsPerColumn=", self.cellsPerColumn
print "minThreshold=", self.minThreshold
print "newSynapseCount=", self.newSynapseCount
print "activationThreshold=", self.activationThreshold
print
print "initialPerm=", self.initialPerm
print "connectedPerm=", self.connectedPerm
print "permanenceInc=", self.permanenceInc
print "permanenceDec=", self.permanenceDec
print "permanenceMax=", self.permanenceMax
print "globalDecay=", self.globalDecay
print
print "doPooling=", self.doPooling
print "segUpdateValidDuration=", self.segUpdateValidDuration
print "pamLength=", self.pamLength
def printActiveIndices(self, state, andValues=False):
"""
Print the list of ``[column, cellIdx]`` indices for each of the active cells
in state.
:param state: TODO: document
:param andValues: TODO: document
"""
if len(state.shape) == 2:
(cols, cellIdxs) = state.nonzero()
else:
cols = state.nonzero()[0]
cellIdxs = numpy.zeros(len(cols))
if len(cols) == 0:
print "NONE"
return
prevCol = -1
for (col, cellIdx) in zip(cols, cellIdxs):
if col != prevCol:
if prevCol != -1:
print "] ",
print "Col %d: [" % (col),
prevCol = col
if andValues:
if len(state.shape) == 2:
value = state[col, cellIdx]
else:
value = state[col]
print "%d: %s," % (cellIdx, value),
else:
print "%d," % (cellIdx),
print "]"
def printComputeEnd(self, output, learn=False):
"""
Called at the end of inference to print out various diagnostic
information based on the current verbosity level.
:param output: TODO: document
:param learn: TODO: document
"""
if self.verbosity >= 3:
print "----- computeEnd summary: "
print "learn:", learn
print "numBurstingCols: %s, " % (
self.infActiveState['t'].min(axis=1).sum()),
print "curPredScore2: %s, " % (
self._internalStats['curPredictionScore2']),
print "curFalsePosScore: %s, " % (
self._internalStats['curFalsePositiveScore']),
print "1-curFalseNegScore: %s, " % (
1 - self._internalStats['curFalseNegativeScore'])
print "numSegments: ", self.getNumSegments(),
print "avgLearnedSeqLength: ", self.avgLearnedSeqLength
print "----- infActiveState (%d on) ------" % (
self.infActiveState['t'].sum())
self.printActiveIndices(self.infActiveState['t'])
if self.verbosity >= 6:
self.printState(self.infActiveState['t'])
print "----- infPredictedState (%d on)-----" % (
self.infPredictedState['t'].sum())
self.printActiveIndices(self.infPredictedState['t'])
if self.verbosity >= 6:
self.printState(self.infPredictedState['t'])
print "----- lrnActiveState (%d on) ------" % (
self.lrnActiveState['t'].sum())
self.printActiveIndices(self.lrnActiveState['t'])
if self.verbosity >= 6:
self.printState(self.lrnActiveState['t'])
print "----- lrnPredictedState (%d on)-----" % (
self.lrnPredictedState['t'].sum())
self.printActiveIndices(self.lrnPredictedState['t'])
if self.verbosity >= 6:
self.printState(self.lrnPredictedState['t'])
print "----- cellConfidence -----"
self.printActiveIndices(self.cellConfidence['t'], andValues=True)
if self.verbosity >= 6:
self.printConfidence(self.cellConfidence['t'])
print "----- colConfidence -----"
self.printActiveIndices(self.colConfidence['t'], andValues=True)
print "----- cellConfidence[t-1] for currently active cells -----"
cc = self.cellConfidence['t-1'] * self.infActiveState['t']
self.printActiveIndices(cc, andValues=True)
if self.verbosity == 4:
print "Cells, predicted segments only:"
self.printCells(predictedOnly=True)
elif self.verbosity >= 5:
print "Cells, all segments:"
self.printCells(predictedOnly=False)
print
elif self.verbosity >= 1:
print "TM: learn:", learn
print "TM: active outputs(%d):" % len(output.nonzero()[0]),
self.printActiveIndices(output.reshape(self.numberOfCols,
self.cellsPerColumn))
def printSegmentUpdates(self):
"""
TODO: document
:return:
"""
print "=== SEGMENT UPDATES ===, Num = ", len(self.segmentUpdates)
for key, updateList in self.segmentUpdates.iteritems():
c, i = key[0], key[1]
print c, i, updateList
def printCell(self, c, i, onlyActiveSegments=False):
"""
TODO: document
:param c:
:param i:
:param onlyActiveSegments:
:return:
"""
if len(self.cells[c][i]) > 0:
print "Column", c, "Cell", i, ":",
print len(self.cells[c][i]), "segment(s)"
for j, s in enumerate(self.cells[c][i]):
isActive = self._isSegmentActive(s, self.infActiveState['t'])
if not onlyActiveSegments or isActive:
isActiveStr = "*" if isActive else " "
print " %sSeg #%-3d" % (isActiveStr, j),
s.debugPrint()
def printCells(self, predictedOnly=False):
"""
TODO: document
:param predictedOnly:
:return:
"""
if predictedOnly:
print "--- PREDICTED CELLS ---"
else:
print "--- ALL CELLS ---"
print "Activation threshold=", self.activationThreshold,
print "min threshold=", self.minThreshold,
print "connected perm=", self.connectedPerm
for c in xrange(self.numberOfCols):
for i in xrange(self.cellsPerColumn):
if not predictedOnly or self.infPredictedState['t'][c, i]:
self.printCell(c, i, predictedOnly)
def getNumSegmentsInCell(self, c, i):
"""
:param c: (int) column index
:param i: (int) cell index within column
:returns: (int) the total number of synapses in cell (c, i)
"""
return len(self.cells[c][i])
def getNumSynapses(self):
"""
:returns: (int) the total number of synapses
"""
nSyns = self.getSegmentInfo()[1]
return nSyns
def getNumSynapsesPerSegmentAvg(self):
"""
:returns: (int) the average number of synapses per segment
"""
return float(self.getNumSynapses()) / max(1, self.getNumSegments())
def getNumSegments(self):
"""
:returns: (int) the total number of segments
"""
nSegs = self.getSegmentInfo()[0]
return nSegs
def getNumCells(self):
"""
:returns: (int) the total number of cells
"""
return self.numberOfCols * self.cellsPerColumn
def getSegmentOnCell(self, c, i, segIdx):
"""
:param c: (int) column index
:param i: (int) cell index in column
:param segIdx: (int) segment index to match
:returns: (list) representing the the segment on cell (c, i) with index
``segIdx``.
::
[ [segmentID, sequenceSegmentFlag, positiveActivations,
totalActivations, lastActiveIteration,
lastPosDutyCycle, lastPosDutyCycleIteration],
[col1, idx1, perm1],
[col2, idx2, perm2], ...
]
"""
seg = self.cells[c][i][segIdx]
retlist = [[seg.segID, seg.isSequenceSeg, seg.positiveActivations,
seg.totalActivations, seg.lastActiveIteration,
seg._lastPosDutyCycle, seg._lastPosDutyCycleIteration]]
retlist += seg.syns
return retlist
class _SegmentUpdate(object):
"""
Class used to carry instructions for updating a segment.
"""
def __init__(self, c, i, seg=None, activeSynapses=[]):
self.columnIdx = c
self.cellIdx = i
self.segment = seg # The segment object itself, not an index (can be None)
self.activeSynapses = activeSynapses
self.sequenceSegment = False
self.phase1Flag = False
# Set true if segment only reaches activationThreshold when including
# not fully connected synapses.
self.weaklyPredicting = False
def write(self, proto):
proto.columnIdx = self.columnIdx
proto.cellIdx = self.cellIdx
self.segment.write(proto.segment)
activeSynapsesProto = proto.init("activeSynapses", len(self.activeSynapses))
for i, idx in enumerate(self.activeSynapses):
activeSynapsesProto[i] = idx
proto.sequenceSegment = self.sequenceSegment
proto.phase1Flag = self.phase1Flag
proto.weaklyPredicting = self.weaklyPredicting
@classmethod
def read(cls, proto, tm):
obj = object.__new__(cls)
obj.columnIdx = proto.columnIdx
obj.cellIdx = proto.cellIdx
obj.segment.read(proto.segment, tm)
obj.activeSynapses = [syn for syn in proto.activeSynapses]
obj.sequenceSegment = proto.sequenceSegment
obj.phase1Flag = proto.phase1Flag
obj.weaklyPredicting = proto.weaklyPredicting
return obj
def __eq__(self, other):
if set(self.__dict__.keys()) != set(other.__dict__.keys()):
return False
for k in self.__dict__:
if self.__dict__[k] != other.__dict__[k]:
return False
return True
def __ne__(self, other):
return not self == other
# Just for debugging
def __str__(self):
return ("Seg update: cell=[%d,%d]" % (self.columnIdx, self.cellIdx) +
", seq seg=" + str(self.sequenceSegment) +
", seg=" + str(self.segment) +
", synapses=" + str(self.activeSynapses))
def _addToSegmentUpdates(self, c, i, segUpdate):
"""
Store a dated potential segment update. The "date" (iteration index) is used
later to determine whether the update is too old and should be forgotten.
This is controlled by parameter ``segUpdateValidDuration``.
:param c: TODO: document
:param i: TODO: document
:param segUpdate: TODO: document
"""
# Sometimes we might be passed an empty update
if segUpdate is None or len(segUpdate.activeSynapses) == 0:
return
key = (c, i) # key = (column index, cell index in column)
# TODO: scan list of updates for that cell and consolidate?
# But watch out for dates!
if self.segmentUpdates.has_key(key):
self.segmentUpdates[key] += [(self.lrnIterationIdx, segUpdate)]
else:
self.segmentUpdates[key] = [(self.lrnIterationIdx, segUpdate)]
def _removeSegmentUpdate(self, updateInfo):
"""
Remove a segment update (called when seg update expires or is processed)
:param updateInfo: (tuple) (creationDate, SegmentUpdate)
"""
# An updateInfo contains (creationDate, SegmentUpdate)
(creationDate, segUpdate) = updateInfo
# Key is stored in segUpdate itself...
key = (segUpdate.columnIdx, segUpdate.cellIdx)
self.segmentUpdates[key].remove(updateInfo)
def _computeOutput(self):
"""
Computes output for both learning and inference. In both cases, the
output is the boolean OR of ``activeState`` and ``predictedState`` at ``t``.
Stores ``currentOutput`` for ``checkPrediction``.
:returns: TODO: document
"""
# TODO: This operation can be sped up by:
# 1.) Pre-allocating space for the currentOutput
# 2.) Making predictedState and activeState of type 'float32' up front
# 3.) Using logical_or(self.predictedState['t'], self.activeState['t'],
# self.currentOutput)
if self.outputType == 'activeState1CellPerCol':
# Fire only the most confident cell in columns that have 2 or more
# active cells
mostActiveCellPerCol = self.cellConfidence['t'].argmax(axis=1)
self.currentOutput = numpy.zeros(self.infActiveState['t'].shape,
dtype='float32')
# Turn on the most confident cell in each column. Note here that
# Columns refers to TM columns, even though each TM column is a row
# in the numpy array.
numCols = self.currentOutput.shape[0]
self.currentOutput[(xrange(numCols), mostActiveCellPerCol)] = 1
# Don't turn on anything in columns which are not active at all
activeCols = self.infActiveState['t'].max(axis=1)
inactiveCols = numpy.where(activeCols==0)[0]
self.currentOutput[inactiveCols, :] = 0
elif self.outputType == 'activeState':
self.currentOutput = self.infActiveState['t']
elif self.outputType == 'normal':
self.currentOutput = numpy.logical_or(self.infPredictedState['t'],
self.infActiveState['t'])
else:
raise RuntimeError("Unimplemented outputType")
return self.currentOutput.reshape(-1).astype('float32')
def _getActiveState(self):
"""
Return the current active state. This is called by the node to
obtain the sequence output of the TM.
:returns: TODO: document
"""
# TODO: This operation can be sped up by making activeState of
# type 'float32' up front.
return self.infActiveState['t'].reshape(-1).astype('float32')
def getPredictedState(self):
"""
:returns: numpy array of predicted cells, representing the current predicted
state. ``predictedCells[c][i]`` represents the state of the i'th cell in
the c'th column.
"""
return self.infPredictedState['t']
def predict(self, nSteps):
"""
This function gives the future predictions for <nSteps> timesteps starting
from the current TM state. The TM is returned to its original state at the
end before returning.
1. We save the TM state.
2. Loop for nSteps
a. Turn-on with lateral support from the current active cells
b. Set the predicted cells as the next step's active cells. This step
in learn and infer methods use input here to correct the predictions.
We don't use any input here.
3. Revert back the TM state to the time before prediction
:param nSteps: (int) The number of future time steps to be predicted
:returns: all the future predictions - a numpy array of type "float32" and
shape (nSteps, numberOfCols). The ith row gives the tm prediction for
each column at a future timestep (t+i+1).
"""
# Save the TM dynamic state, we will use to revert back in the end
pristineTPDynamicState = self._getTPDynamicState()
assert (nSteps>0)
# multiStepColumnPredictions holds all the future prediction.
multiStepColumnPredictions = numpy.zeros((nSteps, self.numberOfCols),
dtype="float32")
# This is a (nSteps-1)+half loop. Phase 2 in both learn and infer methods
# already predicts for timestep (t+1). We use that prediction for free and
# save the half-a-loop of work.
step = 0
while True:
# We get the prediction for the columns in the next time step from
# the topDownCompute method. It internally uses confidences.
multiStepColumnPredictions[step, :] = self.topDownCompute()
# Cleanest way in python to handle one and half loops
if step == nSteps-1:
break
step += 1
# Copy t-1 into t
self.infActiveState['t-1'][:, :] = self.infActiveState['t'][:, :]
self.infPredictedState['t-1'][:, :] = self.infPredictedState['t'][:, :]
self.cellConfidence['t-1'][:, :] = self.cellConfidence['t'][:, :]
# Predicted state at "t-1" becomes the active state at "t"
self.infActiveState['t'][:, :] = self.infPredictedState['t-1'][:, :]
# Predicted state and confidence are set in phase2.
self.infPredictedState['t'].fill(0)
self.cellConfidence['t'].fill(0.0)
self._inferPhase2()
# Revert the dynamic state to the saved state
self._setTPDynamicState(pristineTPDynamicState)
return multiStepColumnPredictions
def _getTPDynamicStateVariableNames(self):
"""
Any newly added dynamic states in the TM should be added to this list.
Parameters:
--------------------------------------------
retval: The list of names of TM dynamic state variables.
"""
return ["infActiveState",
"infPredictedState",
"lrnActiveState",
"lrnPredictedState",
"cellConfidence",
"colConfidence",
]
def _getTPDynamicState(self,):
"""
Parameters:
--------------------------------------------
retval: A dict with all the dynamic state variable names as keys and
their values at this instant as values.
"""
tpDynamicState = dict()
for variableName in self._getTPDynamicStateVariableNames():
tpDynamicState[variableName] = copy.deepcopy(self.__dict__[variableName])
return tpDynamicState
def _setTPDynamicState(self, tpDynamicState):
"""
Set all the dynamic state variables from the <tpDynamicState> dict.
<tpDynamicState> dict has all the dynamic state variable names as keys and
their values at this instant as values.
We set the dynamic state variables in the tm object with these items.
"""
for variableName in self._getTPDynamicStateVariableNames():
self.__dict__[variableName] = tpDynamicState.pop(variableName)
def _updateAvgLearnedSeqLength(self, prevSeqLength):
"""Update our moving average of learned sequence length."""
if self.lrnIterationIdx < 100:
alpha = 0.5
else:
alpha = 0.1
self.avgLearnedSeqLength = ((1.0 - alpha) * self.avgLearnedSeqLength +
(alpha * prevSeqLength))
def getAvgLearnedSeqLength(self):
"""
:returns: Moving average of learned sequence length
"""
return self.avgLearnedSeqLength
def _inferBacktrack(self, activeColumns):
"""
This "backtracks" our inference state, trying to see if we can lock onto
the current set of inputs by assuming the sequence started up to N steps
ago on start cells.
This will adjust @ref infActiveState['t'] if it does manage to lock on to a
sequence that started earlier. It will also compute infPredictedState['t']
based on the possibly updated @ref infActiveState['t'], so there is no need to
call inferPhase2() after calling inferBacktrack().
This looks at:
- ``infActiveState['t']``
This updates/modifies:
- ``infActiveState['t']``
- ``infPredictedState['t']``
- ``colConfidence['t']``
- ``cellConfidence['t']``
How it works:
This method gets called from :meth:`updateInferenceState` when we detect
either of the following two conditions:
#. The current bottom-up input had too many un-expected columns
#. We fail to generate a sufficient number of predicted columns for the
next time step.
Either of these two conditions indicate that we have fallen out of a
learned sequence.
Rather than simply "giving up" and bursting on the unexpected input
columns, a better approach is to see if perhaps we are in a sequence that
started a few steps ago. The real world analogy is that you are driving
along and suddenly hit a dead-end, you will typically go back a few turns
ago and pick up again from a familiar intersection.
This back-tracking goes hand in hand with our learning methodology, which
always tries to learn again from start cells after it loses context. This
results in a network that has learned multiple, overlapping paths through
the input data, each starting at different points. The lower the global
decay and the more repeatability in the data, the longer each of these
paths will end up being.
The goal of this function is to find out which starting point in the past
leads to the current input with the most context as possible. This gives us
the best chance of predicting accurately going forward. Consider the
following example, where you have learned the following sub-sequences which
have the given frequencies:
::
? - Q - C - D - E 10X seq 0
? - B - C - D - F 1X seq 1
? - B - C - H - I 2X seq 2
? - B - C - D - F 3X seq 3
? - Z - A - B - C - D - J 2X seq 4
? - Z - A - B - C - H - I 1X seq 5
? - Y - A - B - C - D - F 3X seq 6
----------------------------------------
W - X - Z - A - B - C - D <= input history
^
current time step
Suppose, in the current time step, the input pattern is D and you have not
predicted D, so you need to backtrack. Suppose we can backtrack up to 6
steps in the past, which path should we choose? From the table above, we can
see that the correct answer is to assume we are in seq 4. How do we
implement the backtrack to give us this right answer? The current
implementation takes the following approach:
#. Start from the farthest point in the past.
#. For each starting point S, calculate the confidence of the current
input, conf(startingPoint=S), assuming we followed that sequence.
Note that we must have learned at least one sequence that starts at
point S.
#. If conf(startingPoint=S) is significantly different from
conf(startingPoint=S-1), then choose S-1 as the starting point.
The assumption here is that starting point S-1 is the starting point of
a learned sub-sequence that includes the current input in it's path and
that started the longest ago. It thus has the most context and will be
the best predictor going forward.
From the statistics in the above table, we can compute what the confidences
will be for each possible starting point:
::
startingPoint confidence of D
-----------------------------------------
B (t-2) 4/6 = 0.667 (seq 1,3)/(seq 1,2,3)
Z (t-4) 2/3 = 0.667 (seq 4)/(seq 4,5)
First of all, we do not compute any confidences at starting points t-1, t-3,
t-5, t-6 because there are no learned sequences that start at those points.
Notice here that Z is the starting point of the longest sub-sequence leading
up to the current input. Event though starting at t-2 and starting at t-4
give the same confidence value, we choose the sequence starting at t-4
because it gives the most context, and it mirrors the way that learning
extends sequences.
:param activeColumns: (list) of active column indices
"""
# How much input history have we accumulated?
# The current input is always at the end of self._prevInfPatterns (at
# index -1), but it is also evaluated as a potential starting point by
# turning on it's start cells and seeing if it generates sufficient
# predictions going forward.
numPrevPatterns = len(self._prevInfPatterns)
if numPrevPatterns <= 0:
return
# This is an easy to use label for the current time step
currentTimeStepsOffset = numPrevPatterns - 1
# Save our current active state in case we fail to find a place to restart
# todo: save infActiveState['t-1'], infPredictedState['t-1']?
self.infActiveState['backup'][:, :] = self.infActiveState['t'][:, :]
# Save our t-1 predicted state because we will write over it as as evaluate
# each potential starting point.
self.infPredictedState['backup'][:, :] = self.infPredictedState['t-1'][:, :]
# We will record which previous input patterns did not generate predictions
# up to the current time step and remove all the ones at the head of the
# input history queue so that we don't waste time evaluating them again at
# a later time step.
badPatterns = []
# Let's go back in time and replay the recent inputs from start cells and
# see if we can lock onto this current set of inputs that way.
#
# Start the farthest back and work our way forward. For each starting point,
# See if firing on start cells at that point would predict the current
# input as well as generate sufficient predictions for the next time step.
#
# We want to pick the point closest to the current time step that gives us
# the relevant confidence. Think of this example, where we are at D and need
# to
# A - B - C - D
# decide if we should backtrack to C, B, or A. Suppose B-C-D is a high order
# sequence and A is unrelated to it. If we backtrock to B would we get a
# certain confidence of D, but if went went farther back, to A, the
# confidence wouldn't change, since A has no impact on the B-C-D series.
#
# So, our strategy will be to pick the "B" point, since choosing the A point
# does not impact our confidences going forward at all.
inSequence = False
candConfidence = None
candStartOffset = None
for startOffset in range(0, numPrevPatterns):
# If we have a candidate already in the past, don't bother falling back
# to start cells on the current input.
if startOffset == currentTimeStepsOffset and candConfidence is not None:
break
if self.verbosity >= 3:
print (
"Trying to lock-on using startCell state from %d steps ago:" % (
numPrevPatterns - 1 - startOffset),
self._prevInfPatterns[startOffset])
# Play through starting from starting point 'startOffset'
inSequence = False
for offset in range(startOffset, numPrevPatterns):
# If we are about to set the active columns for the current time step
# based on what we predicted, capture and save the total confidence of
# predicting the current input
if offset == currentTimeStepsOffset:
totalConfidence = self.colConfidence['t'][activeColumns].sum()
# Compute activeState[t] given bottom-up and predictedState[t-1]
self.infPredictedState['t-1'][:, :] = self.infPredictedState['t'][:, :]
inSequence = self._inferPhase1(self._prevInfPatterns[offset],
useStartCells = (offset == startOffset))
if not inSequence:
break
# Compute predictedState['t'] given activeState['t']
if self.verbosity >= 3:
print (" backtrack: computing predictions from ",
self._prevInfPatterns[offset])
inSequence = self._inferPhase2()
if not inSequence:
break
# If starting from startOffset got lost along the way, mark it as an
# invalid start point.
if not inSequence:
badPatterns.append(startOffset)
continue
# If we got to here, startOffset is a candidate starting point.
# Save this state as a candidate state. It will become the chosen state if
# we detect a change in confidences starting at a later startOffset
candConfidence = totalConfidence
candStartOffset = startOffset
if self.verbosity >= 3 and startOffset != currentTimeStepsOffset:
print (" # Prediction confidence of current input after starting %d "
"steps ago:" % (numPrevPatterns - 1 - startOffset),
totalConfidence)
if candStartOffset == currentTimeStepsOffset: # no more to try
break
self.infActiveState['candidate'][:, :] = self.infActiveState['t'][:, :]
self.infPredictedState['candidate'][:, :] = (
self.infPredictedState['t'][:, :])
self.cellConfidence['candidate'][:, :] = self.cellConfidence['t'][:, :]
self.colConfidence['candidate'][:] = self.colConfidence['t'][:]
break
# If we failed to lock on at any starting point, fall back to the original
# active state that we had on entry
if candStartOffset is None:
if self.verbosity >= 3:
print "Failed to lock on. Falling back to bursting all unpredicted."
self.infActiveState['t'][:, :] = self.infActiveState['backup'][:, :]
self._inferPhase2()
else:
if self.verbosity >= 3:
print ("Locked on to current input by using start cells from %d "
" steps ago:" % (numPrevPatterns - 1 - candStartOffset),
self._prevInfPatterns[candStartOffset])
# Install the candidate state, if it wasn't the last one we evaluated.
if candStartOffset != currentTimeStepsOffset:
self.infActiveState['t'][:, :] = self.infActiveState['candidate'][:, :]
self.infPredictedState['t'][:, :] = (
self.infPredictedState['candidate'][:, :])
self.cellConfidence['t'][:, :] = self.cellConfidence['candidate'][:, :]
self.colConfidence['t'][:] = self.colConfidence['candidate'][:]
# Remove any useless patterns at the head of the previous input pattern
# queue.
for i in range(numPrevPatterns):
if (i in badPatterns or
(candStartOffset is not None and i <= candStartOffset)):
if self.verbosity >= 3:
print ("Removing useless pattern from history:",
self._prevInfPatterns[0])
self._prevInfPatterns.pop(0)
else:
break
# Restore the original predicted state.
self.infPredictedState['t-1'][:, :] = self.infPredictedState['backup'][:, :]
def _inferPhase1(self, activeColumns, useStartCells):
"""
Update the inference active state from the last set of predictions
and the current bottom-up.
This looks at:
- ``infPredictedState['t-1']``
This modifies:
- ``infActiveState['t']``
:param activeColumns: (list) active bottom-ups
:param useStartCells: (bool) If true, ignore previous predictions and simply
turn on the start cells in the active columns
:returns: (bool) True if the current input was sufficiently predicted, OR if
we started over on startCells. False indicates that the current input
was NOT predicted, and we are now bursting on most columns.
"""
# Init to zeros to start
self.infActiveState['t'].fill(0)
# Phase 1 - turn on predicted cells in each column receiving bottom-up
# If we are following a reset, activate only the start cell in each
# column that has bottom-up
numPredictedColumns = 0
if useStartCells:
for c in activeColumns:
self.infActiveState['t'][c, 0] = 1
# else, turn on any predicted cells in each column. If there are none, then
# turn on all cells (burst the column)
else:
for c in activeColumns:
predictingCells = numpy.where(self.infPredictedState['t-1'][c] == 1)[0]
numPredictingCells = len(predictingCells)
if numPredictingCells > 0:
self.infActiveState['t'][c, predictingCells] = 1
numPredictedColumns += 1
else:
self.infActiveState['t'][c, :] = 1 # whole column bursts
# Did we predict this input well enough?
if useStartCells or numPredictedColumns >= 0.50 * len(activeColumns):
return True
else:
return False
def _inferPhase2(self):
"""
Phase 2 for the inference state. The computes the predicted state, then
checks to insure that the predicted state is not over-saturated, i.e.
look too close like a burst. This indicates that there were so many
separate paths learned from the current input columns to the predicted
input columns that bursting on the current input columns is most likely
generated mix and match errors on cells in the predicted columns. If
we detect this situation, we instead turn on only the start cells in the
current active columns and re-generate the predicted state from those.
This looks at:
- `` infActiveState['t']``
This modifies:
- `` infPredictedState['t']``
- `` colConfidence['t']``
- `` cellConfidence['t']``
:returns: (bool) True if we have a decent guess as to the next input.
Returning False from here indicates to the caller that we have
reached the end of a learned sequence.
"""
# Init to zeros to start
self.infPredictedState['t'].fill(0)
self.cellConfidence['t'].fill(0)
self.colConfidence['t'].fill(0)
# Phase 2 - Compute new predicted state and update cell and column
# confidences
for c in xrange(self.numberOfCols):
# For each cell in the column
for i in xrange(self.cellsPerColumn):
# For each segment in the cell
for s in self.cells[c][i]:
# See if it has the min number of active synapses
numActiveSyns = self._getSegmentActivityLevel(
s, self.infActiveState['t'], connectedSynapsesOnly=False)
if numActiveSyns < self.activationThreshold:
continue
# Incorporate the confidence into the owner cell and column
if self.verbosity >= 6:
print "incorporating DC from cell[%d,%d]: " % (c, i),
s.debugPrint()
dc = s.dutyCycle()
self.cellConfidence['t'][c, i] += dc
self.colConfidence['t'][c] += dc
# If we reach threshold on the connected synapses, predict it
# If not active, skip over it
if self._isSegmentActive(s, self.infActiveState['t']):
self.infPredictedState['t'][c, i] = 1
# Normalize column and cell confidences
sumConfidences = self.colConfidence['t'].sum()
if sumConfidences > 0:
self.colConfidence['t'] /= sumConfidences
self.cellConfidence['t'] /= sumConfidences
# Are we predicting the required minimum number of columns?
numPredictedCols = self.infPredictedState['t'].max(axis=1).sum()
if numPredictedCols >= 0.5 * self.avgInputDensity:
return True
else:
return False
def _updateInferenceState(self, activeColumns):
"""
Update the inference state. Called from :meth:`compute` on every iteration.
:param activeColumns: (list) active column indices.
"""
# Copy t to t-1
self.infActiveState['t-1'][:, :] = self.infActiveState['t'][:, :]
self.infPredictedState['t-1'][:, :] = self.infPredictedState['t'][:, :]
self.cellConfidence['t-1'][:, :] = self.cellConfidence['t'][:, :]
self.colConfidence['t-1'][:] = self.colConfidence['t'][:]
# Each phase will zero/initilize the 't' states that it affects
# Update our inference input history
if self.maxInfBacktrack > 0:
if len(self._prevInfPatterns) > self.maxInfBacktrack:
self._prevInfPatterns.pop(0)
self._prevInfPatterns.append(activeColumns)
# Compute the active state given the predictions from last time step and
# the current bottom-up
inSequence = self._inferPhase1(activeColumns, self.resetCalled)
# If this input was considered unpredicted, let's go back in time and
# replay the recent inputs from start cells and see if we can lock onto
# this current set of inputs that way.
if not inSequence:
if self.verbosity >= 3:
print ("Too much unpredicted input, re-tracing back to try and lock on "
"at an earlier timestep.")
# inferBacktrack() will call inferPhase2() for us.
self._inferBacktrack(activeColumns)
return
# Compute the predicted cells and the cell and column confidences
inSequence = self._inferPhase2()
if not inSequence:
if self.verbosity >= 3:
print ("Not enough predictions going forward, "
"re-tracing back to try and lock on at an earlier timestep.")
# inferBacktrack() will call inferPhase2() for us.
self._inferBacktrack(activeColumns)
def _learnBacktrackFrom(self, startOffset, readOnly=True):
"""
A utility method called from learnBacktrack. This will backtrack
starting from the given startOffset in our prevLrnPatterns queue.
It returns True if the backtrack was successful and we managed to get
predictions all the way up to the current time step.
If readOnly, then no segments are updated or modified, otherwise, all
segment updates that belong to the given path are applied.
This updates/modifies:
- lrnActiveState['t']
This trashes:
- lrnPredictedState['t']
- lrnPredictedState['t-1']
- lrnActiveState['t-1']
:param startOffset: Start offset within the prevLrnPatterns input history
:param readOnly:
:return: True if we managed to lock on to a sequence that started
earlier.
If False, we lost predictions somewhere along the way
leading up to the current time.
"""
# How much input history have we accumulated?
# The current input is always at the end of self._prevInfPatterns (at
# index -1), but it is also evaluated as a potential starting point by
# turning on it's start cells and seeing if it generates sufficient
# predictions going forward.
numPrevPatterns = len(self._prevLrnPatterns)
# This is an easy to use label for the current time step
currentTimeStepsOffset = numPrevPatterns - 1
# Clear out any old segment updates. learnPhase2() adds to the segment
# updates if we're not readOnly
if not readOnly:
self.segmentUpdates = {}
# Status message
if self.verbosity >= 3:
if readOnly:
print (
"Trying to lock-on using startCell state from %d steps ago:" % (
numPrevPatterns - 1 - startOffset),
self._prevLrnPatterns[startOffset])
else:
print (
"Locking on using startCell state from %d steps ago:" % (
numPrevPatterns - 1 - startOffset),
self._prevLrnPatterns[startOffset])
# Play through up to the current time step
inSequence = True
for offset in range(startOffset, numPrevPatterns):
# Copy predicted and active states into t-1
self.lrnPredictedState['t-1'][:, :] = self.lrnPredictedState['t'][:, :]
self.lrnActiveState['t-1'][:, :] = self.lrnActiveState['t'][:, :]
# Get the input pattern
inputColumns = self._prevLrnPatterns[offset]
# Apply segment updates from the last set of predictions
if not readOnly:
self._processSegmentUpdates(inputColumns)
# Phase 1:
# Compute activeState[t] given bottom-up and predictedState[t-1]
if offset == startOffset:
self.lrnActiveState['t'].fill(0)
for c in inputColumns:
self.lrnActiveState['t'][c, 0] = 1
inSequence = True
else:
# Uses lrnActiveState['t-1'] and lrnPredictedState['t-1']
# computes lrnActiveState['t']
inSequence = self._learnPhase1(inputColumns, readOnly=readOnly)
# Break out immediately if we fell out of sequence or reached the current
# time step
if not inSequence or offset == currentTimeStepsOffset:
break
# Phase 2:
# Computes predictedState['t'] given activeState['t'] and also queues
# up active segments into self.segmentUpdates, unless this is readOnly
if self.verbosity >= 3:
print " backtrack: computing predictions from ", inputColumns
self._learnPhase2(readOnly=readOnly)
# Return whether or not this starting point was valid
return inSequence
def _learnBacktrack(self):
"""
This "backtracks" our learning state, trying to see if we can lock onto
the current set of inputs by assuming the sequence started up to N steps
ago on start cells.
This will adjust @ref lrnActiveState['t'] if it does manage to lock on to a
sequence that started earlier.
:returns: >0 if we managed to lock on to a sequence that started
earlier. The value returned is how many steps in the
past we locked on.
If 0 is returned, the caller needs to change active
state to start on start cells.
How it works:
-------------------------------------------------------------------
This method gets called from updateLearningState when we detect either of
the following two conditions:
#. Our PAM counter (@ref pamCounter) expired
#. We reached the max allowed learned sequence length
Either of these two conditions indicate that we want to start over on start
cells.
Rather than start over on start cells on the current input, we can
accelerate learning by backtracking a few steps ago and seeing if perhaps
a sequence we already at least partially know already started.
This updates/modifies:
- @ref lrnActiveState['t']
This trashes:
- @ref lrnActiveState['t-1']
- @ref lrnPredictedState['t']
- @ref lrnPredictedState['t-1']
"""
# How much input history have we accumulated?
# The current input is always at the end of self._prevInfPatterns (at
# index -1), and is not a valid startingOffset to evaluate.
numPrevPatterns = len(self._prevLrnPatterns) - 1
if numPrevPatterns <= 0:
if self.verbosity >= 3:
print "lrnBacktrack: No available history to backtrack from"
return False
# We will record which previous input patterns did not generate predictions
# up to the current time step and remove all the ones at the head of the
# input history queue so that we don't waste time evaluating them again at
# a later time step.
badPatterns = []
# Let's go back in time and replay the recent inputs from start cells and
# see if we can lock onto this current set of inputs that way.
#
# Start the farthest back and work our way forward. For each starting point,
# See if firing on start cells at that point would predict the current
# input.
#
# We want to pick the point farthest in the past that has continuity
# up to the current time step
inSequence = False
for startOffset in range(0, numPrevPatterns):
# Can we backtrack from startOffset?
inSequence = self._learnBacktrackFrom(startOffset, readOnly=True)
# Done playing through the sequence from starting point startOffset
# Break out as soon as we find a good path
if inSequence:
break
# Take this bad starting point out of our input history so we don't
# try it again later.
badPatterns.append(startOffset)
# If we failed to lock on at any starting point, return failure. The caller
# will start over again on start cells
if not inSequence:
if self.verbosity >= 3:
print ("Failed to lock on. Falling back to start cells on current "
"time step.")
# Nothing in our input history was a valid starting point, so get rid
# of it so we don't try any of them again at a later iteration
self._prevLrnPatterns = []
return False
# We did find a valid starting point in the past. Now, we need to
# re-enforce all segments that became active when following this path.
if self.verbosity >= 3:
print ("Discovered path to current input by using start cells from %d "
"steps ago:" % (numPrevPatterns - startOffset),
self._prevLrnPatterns[startOffset])
self._learnBacktrackFrom(startOffset, readOnly=False)
# Remove any useless patterns at the head of the input pattern history
# queue.
for i in range(numPrevPatterns):
if i in badPatterns or i <= startOffset:
if self.verbosity >= 3:
print ("Removing useless pattern from history:",
self._prevLrnPatterns[0])
self._prevLrnPatterns.pop(0)
else:
break
return numPrevPatterns - startOffset
def _learnPhase1(self, activeColumns, readOnly=False):
"""
Compute the learning active state given the predicted state and
the bottom-up input.
:param activeColumns list of active bottom-ups
:param readOnly True if being called from backtracking logic.
This tells us not to increment any segment
duty cycles or queue up any updates.
:returns: True if the current input was sufficiently predicted, OR
if we started over on startCells. False indicates that the current
input was NOT predicted, well enough to consider it as "inSequence"
This looks at:
- @ref lrnActiveState['t-1']
- @ref lrnPredictedState['t-1']
This modifies:
- @ref lrnActiveState['t']
- @ref lrnActiveState['t-1']
"""
# Save previous active state and start out on a clean slate
self.lrnActiveState['t'].fill(0)
# For each column, turn on the predicted cell. There will always be at most
# one predicted cell per column
numUnpredictedColumns = 0
for c in activeColumns:
predictingCells = numpy.where(self.lrnPredictedState['t-1'][c] == 1)[0]
numPredictedCells = len(predictingCells)
assert numPredictedCells <= 1
# If we have a predicted cell, turn it on. The segment's posActivation
# count will have already been incremented by processSegmentUpdates
if numPredictedCells == 1:
i = predictingCells[0]
self.lrnActiveState['t'][c, i] = 1
continue
numUnpredictedColumns += 1
if readOnly:
continue
# If no predicted cell, pick the closest matching one to reinforce, or
# if none exists, create a new segment on a cell in that column
i, s, numActive = self._getBestMatchingCell(
c, self.lrnActiveState['t-1'], self.minThreshold)
if s is not None and s.isSequenceSegment():
if self.verbosity >= 4:
print "Learn branch 0, found segment match. Learning on col=", c
self.lrnActiveState['t'][c, i] = 1
segUpdate = self._getSegmentActiveSynapses(
c, i, s, self.lrnActiveState['t-1'], newSynapses = True)
s.totalActivations += 1
# This will update the permanences, posActivationsCount, and the
# lastActiveIteration (age).
trimSegment = self._adaptSegment(segUpdate)
if trimSegment:
self._trimSegmentsInCell(c, i, [s], minPermanence = 0.00001,
minNumSyns = 0)
# If no close match exists, create a new one
else:
# Choose a cell in this column to add a new segment to
i = self._getCellForNewSegment(c)
if (self.verbosity >= 4):
print "Learn branch 1, no match. Learning on col=", c,
print ", newCellIdxInCol=", i
self.lrnActiveState['t'][c, i] = 1
segUpdate = self._getSegmentActiveSynapses(
c, i, None, self.lrnActiveState['t-1'], newSynapses=True)
segUpdate.sequenceSegment = True # Make it a sequence segment
self._adaptSegment(segUpdate) # No need to check whether perm reached 0
# Determine if we are out of sequence or not and reset our PAM counter
# if we are in sequence
numBottomUpColumns = len(activeColumns)
if numUnpredictedColumns < numBottomUpColumns / 2:
return True # in sequence
else:
return False # out of sequence
def _learnPhase2(self, readOnly=False):
"""
Compute the predicted segments given the current set of active cells.
:param readOnly True if being called from backtracking logic.
This tells us not to increment any segment
duty cycles or queue up any updates.
This computes the lrnPredictedState['t'] and queues up any segments that
became active (and the list of active synapses for each segment) into
the segmentUpdates queue
This looks at:
- @ref lrnActiveState['t']
This modifies:
- @ref lrnPredictedState['t']
- @ref segmentUpdates
"""
# Clear out predicted state to start with
self.lrnPredictedState['t'].fill(0)
# Compute new predicted state. When computing predictions for
# phase 2, we predict at most one cell per column (the one with the best
# matching segment).
for c in xrange(self.numberOfCols):
# Is there a cell predicted to turn on in this column?
i, s, numActive = self._getBestMatchingCell(
c, self.lrnActiveState['t'], minThreshold = self.activationThreshold)
if i is None:
continue
# Turn on the predicted state for the best matching cell and queue
# the pertinent segment up for an update, which will get processed if
# the cell receives bottom up in the future.
self.lrnPredictedState['t'][c, i] = 1
if readOnly:
continue
# Queue up this segment for updating
segUpdate = self._getSegmentActiveSynapses(
c, i, s, activeState=self.lrnActiveState['t'],
newSynapses=(numActive < self.newSynapseCount))
s.totalActivations += 1 # increment totalActivations
self._addToSegmentUpdates(c, i, segUpdate)
if self.doPooling:
# creates a new pooling segment if no best matching segment found
# sum(all synapses) >= minThreshold, "weak" activation
predSegment = self._getBestMatchingSegment(c, i,
self.lrnActiveState['t-1'])
segUpdate = self._getSegmentActiveSynapses(c, i, predSegment,
self.lrnActiveState['t-1'], newSynapses=True)
self._addToSegmentUpdates(c, i, segUpdate)
def _updateLearningState(self, activeColumns):
"""
Update the learning state. Called from compute() on every iteration
:param activeColumns List of active column indices
"""
# Copy predicted and active states into t-1
self.lrnPredictedState['t-1'][:, :] = self.lrnPredictedState['t'][:, :]
self.lrnActiveState['t-1'][:, :] = self.lrnActiveState['t'][:, :]
# Update our learning input history
if self.maxLrnBacktrack > 0:
if len(self._prevLrnPatterns) > self.maxLrnBacktrack:
self._prevLrnPatterns.pop(0)
self._prevLrnPatterns.append(activeColumns)
if self.verbosity >= 4:
print "Previous learn patterns: \n"
print self._prevLrnPatterns
# Process queued up segment updates, now that we have bottom-up, we
# can update the permanences on the cells that we predicted to turn on
# and did receive bottom-up
self._processSegmentUpdates(activeColumns)
# Decrement the PAM counter if it is running and increment our learned
# sequence length
if self.pamCounter > 0:
self.pamCounter -= 1
self.learnedSeqLength += 1
# Phase 1 - turn on the predicted cell in each column that received
# bottom-up. If there was no predicted cell, pick one to learn to.
if not self.resetCalled:
# Uses lrnActiveState['t-1'] and lrnPredictedState['t-1']
# computes lrnActiveState['t']
inSequence = self._learnPhase1(activeColumns)
# Reset our PAM counter if we are in sequence
if inSequence:
self.pamCounter = self.pamLength
# Print status of PAM counter, learned sequence length
if self.verbosity >= 3:
print "pamCounter = ", self.pamCounter, "seqLength = ", \
self.learnedSeqLength
# Start over on start cells if any of the following occur:
# 1.) A reset was just called
# 2.) We have been loo long out of sequence (the pamCounter has expired)
# 3.) We have reached maximum allowed sequence length.
#
# Note that, unless we are following a reset, we also just learned or
# re-enforced connections to the current set of active columns because
# this input is still a valid prediction to learn.
#
# It is especially helpful to learn the connections to this input when
# you have a maxSeqLength constraint in place. Otherwise, you will have
# no continuity at all between sub-sequences of length maxSeqLength.
if (self.resetCalled or self.pamCounter == 0 or
(self.maxSeqLength != 0 and
self.learnedSeqLength >= self.maxSeqLength)):
if self.verbosity >= 3:
if self.resetCalled:
print "Starting over:", activeColumns, "(reset was called)"
elif self.pamCounter == 0:
print "Starting over:", activeColumns, "(PAM counter expired)"
else:
print "Starting over:", activeColumns, "(reached maxSeqLength)"
# Update average learned sequence length - this is a diagnostic statistic
if self.pamCounter == 0:
seqLength = self.learnedSeqLength - self.pamLength
else:
seqLength = self.learnedSeqLength
if self.verbosity >= 3:
print " learned sequence length was:", seqLength
self._updateAvgLearnedSeqLength(seqLength)
# Backtrack to an earlier starting point, if we find one
backSteps = 0
if not self.resetCalled:
backSteps = self._learnBacktrack()
# Start over in the current time step if reset was called, or we couldn't
# backtrack.
if self.resetCalled or backSteps is None or backSteps == 0:
backSteps = 0
self.lrnActiveState['t'].fill(0)
for c in activeColumns:
self.lrnActiveState['t'][c, 0] = 1
# Remove any old input history patterns
self._prevLrnPatterns = []
# Reset PAM counter
self.pamCounter = self.pamLength
self.learnedSeqLength = backSteps
# Clear out any old segment updates from prior sequences
self.segmentUpdates = {}
# Phase 2 - Compute new predicted state. When computing predictions for
# phase 2, we predict at most one cell per column (the one with the best
# matching segment).
self._learnPhase2()
def compute(self, bottomUpInput, enableLearn, enableInference=None):
"""
Handle one compute, possibly learning.
.. note:: It is an error to have both ``enableLearn`` and
``enableInference`` set to False
.. note:: By default, we don't compute the inference output when learning
because it slows things down, but you can override this by passing
in True for ``enableInference``.
:param bottomUpInput: The bottom-up input as numpy list, typically from a
spatial pooler.
:param enableLearn: (bool) If true, perform learning
:param enableInference: (bool) If None, default behavior is to disable the
inference output when ``enableLearn`` is on. If true, compute the
inference output. If false, do not compute the inference output.
:returns: TODO: document
"""
# As a speed optimization for now (until we need online learning), skip
# computing the inference output while learning
if enableInference is None:
if enableLearn:
enableInference = False
else:
enableInference = True
assert (enableLearn or enableInference)
# Get the list of columns that have bottom-up
activeColumns = bottomUpInput.nonzero()[0]
if enableLearn:
self.lrnIterationIdx += 1
self.iterationIdx += 1
if self.verbosity >= 3:
print "\n==== PY Iteration: %d =====" % (self.iterationIdx)
print "Active cols:", activeColumns
# Update segment duty cycles if we are crossing a "tier"
# We determine if it's time to update the segment duty cycles. Since the
# duty cycle calculation is a moving average based on a tiered alpha, it is
# important that we update all segments on each tier boundary
if enableLearn:
if self.lrnIterationIdx in Segment.dutyCycleTiers:
for c, i in itertools.product(xrange(self.numberOfCols),
xrange(self.cellsPerColumn)):
for segment in self.cells[c][i]:
segment.dutyCycle()
# Update the average input density
if self.avgInputDensity is None:
self.avgInputDensity = len(activeColumns)
else:
self.avgInputDensity = (0.99 * self.avgInputDensity +
0.01 * len(activeColumns))
# First, update the inference state
# As a speed optimization for now (until we need online learning), skip
# computing the inference output while learning
if enableInference:
self._updateInferenceState(activeColumns)
# Next, update the learning state
if enableLearn:
self._updateLearningState(activeColumns)
# Apply global decay, and remove synapses and/or segments.
# Synapses are removed if their permanence value is <= 0.
# Segments are removed when they don't have synapses anymore.
# Removal of synapses can trigger removal of whole segments!
# todo: isolate the synapse/segment retraction logic so that
# it can be called in adaptSegments, in the case where we
# do global decay only episodically.
if self.globalDecay > 0.0 and ((self.lrnIterationIdx % self.maxAge) == 0):
for c, i in itertools.product(xrange(self.numberOfCols),
xrange(self.cellsPerColumn)):
segsToDel = [] # collect and remove outside the loop
for segment in self.cells[c][i]:
age = self.lrnIterationIdx - segment.lastActiveIteration
if age <= self.maxAge:
continue
synsToDel = [] # collect and remove outside the loop
for synapse in segment.syns:
synapse[2] = synapse[2] - self.globalDecay # decrease permanence
if synapse[2] <= 0:
synsToDel.append(synapse) # add to list to delete
# 1 for sequenceSegment flag
if len(synsToDel) == segment.getNumSynapses():
segsToDel.append(segment) # will remove the whole segment
elif len(synsToDel) > 0:
for syn in synsToDel: # remove some synapses on segment
segment.syns.remove(syn)
for seg in segsToDel: # remove some segments of this cell
self._cleanUpdatesList(c, i, seg)
self.cells[c][i].remove(seg)
# Update the prediction score stats
# Learning always includes inference
if self.collectStats:
if enableInference:
predictedState = self.infPredictedState['t-1']
else:
predictedState = self.lrnPredictedState['t-1']
self._updateStatsInferEnd(self._internalStats,
activeColumns,
predictedState,
self.colConfidence['t-1'])
# Finally return the TM output
output = self._computeOutput()
# Print diagnostic information based on the current verbosity level
self.printComputeEnd(output, learn=enableLearn)
self.resetCalled = False
return output
def infer(self, bottomUpInput):
"""
TODO: document
:param bottomUpInput:
:return:
"""
return self.compute(bottomUpInput, enableLearn=False)
def learn(self, bottomUpInput, enableInference=None):
"""
TODO: document
:param bottomUpInput:
:param enableInference:
:return:
"""
return self.compute(bottomUpInput, enableLearn=True,
enableInference=enableInference)
def _columnConfidences(self):
"""
Returns the stored cell confidences from the last compute.
:returns: Column confidence scores
"""
return self.colConfidence['t']
def topDownCompute(self):
"""
For now, we will assume there is no one above us and that bottomUpOut is
simply the output that corresponds to our currently stored column
confidences.
:returns: the same thing as :meth:`columnConfidences`
"""
# Simply return the column confidences
return self._columnConfidences()
def _trimSegmentsInCell(self, colIdx, cellIdx, segList, minPermanence,
minNumSyns):
"""
This method goes through a list of segments for a given cell and
deletes all synapses whose permanence is less than minPermanence and deletes
any segments that have less than minNumSyns synapses remaining.
:param colIdx Column index
:param cellIdx Cell index within the column
:param segList List of segment references
:param minPermanence Any syn whose permamence is 0 or < minPermanence will
be deleted.
:param minNumSyns Any segment with less than minNumSyns synapses remaining
in it will be deleted.
:returns: tuple (numSegsRemoved, numSynsRemoved)
"""
# Fill in defaults
if minPermanence is None:
minPermanence = self.connectedPerm
if minNumSyns is None:
minNumSyns = self.activationThreshold
# Loop through all segments
nSegsRemoved, nSynsRemoved = 0, 0
segsToDel = [] # collect and remove segments outside the loop
for segment in segList:
# List if synapses to delete
synsToDel = [syn for syn in segment.syns if syn[2] < minPermanence]
if len(synsToDel) == len(segment.syns):
segsToDel.append(segment) # will remove the whole segment
else:
if len(synsToDel) > 0:
for syn in synsToDel: # remove some synapses on segment
segment.syns.remove(syn)
nSynsRemoved += 1
if len(segment.syns) < minNumSyns:
segsToDel.append(segment)
# Remove segments that don't have enough synapses and also take them
# out of the segment update list, if they are in there
nSegsRemoved += len(segsToDel)
for seg in segsToDel: # remove some segments of this cell
self._cleanUpdatesList(colIdx, cellIdx, seg)
self.cells[colIdx][cellIdx].remove(seg)
nSynsRemoved += len(seg.syns)
return nSegsRemoved, nSynsRemoved
def trimSegments(self, minPermanence=None, minNumSyns=None):
"""
This method deletes all synapses whose permanence is less than
minPermanence and deletes any segments that have less than
minNumSyns synapses remaining.
:param minPermanence: (float) Any syn whose permanence is 0 or <
``minPermanence`` will be deleted. If None is passed in, then
``self.connectedPerm`` is used.
:param minNumSyns: (int) Any segment with less than ``minNumSyns`` synapses
remaining in it will be deleted. If None is passed in, then
``self.activationThreshold`` is used.
:returns: (tuple) ``numSegsRemoved``, ``numSynsRemoved``
"""
# Fill in defaults
if minPermanence is None:
minPermanence = self.connectedPerm
if minNumSyns is None:
minNumSyns = self.activationThreshold
# Loop through all cells
totalSegsRemoved, totalSynsRemoved = 0, 0
for c, i in itertools.product(xrange(self.numberOfCols),
xrange(self.cellsPerColumn)):
(segsRemoved, synsRemoved) = self._trimSegmentsInCell(
colIdx=c, cellIdx=i, segList=self.cells[c][i],
minPermanence=minPermanence, minNumSyns=minNumSyns)
totalSegsRemoved += segsRemoved
totalSynsRemoved += synsRemoved
# Print all cells if verbosity says to
if self.verbosity >= 5:
print "Cells, all segments:"
self.printCells(predictedOnly=False)
return totalSegsRemoved, totalSynsRemoved
def _cleanUpdatesList(self, col, cellIdx, seg):
"""
Removes any update that would be for the given col, cellIdx, segIdx.
NOTE: logically, we need to do this when we delete segments, so that if
an update refers to a segment that was just deleted, we also remove
that update from the update list. However, I haven't seen it trigger
in any of the unit tests yet, so it might mean that it's not needed
and that situation doesn't occur, by construction.
"""
# TODO: check if the situation described in the docstring above actually
# occurs.
for key, updateList in self.segmentUpdates.iteritems():
c, i = key[0], key[1]
if c == col and i == cellIdx:
for update in updateList:
if update[1].segment == seg:
self._removeSegmentUpdate(update)
def finishLearning(self):
"""
Called when learning has been completed. This method just calls
:meth:`trimSegments` and then clears out caches.
"""
# Keep weakly formed synapses around because they contain confidence scores
# for paths out of learned sequenced and produce a better prediction than
# chance.
self.trimSegments(minPermanence=0.0001)
# Update all cached duty cycles for better performance right after loading
# in the trained network.
for c, i in itertools.product(xrange(self.numberOfCols),
xrange(self.cellsPerColumn)):
for segment in self.cells[c][i]:
segment.dutyCycle()
# For error checking purposes, make sure no start cell has incoming
# connections
if self.cellsPerColumn > 1:
for c in xrange(self.numberOfCols):
assert self.getNumSegmentsInCell(c, 0) == 0
def _checkPrediction(self, patternNZs, output=None, colConfidence=None,
details=False):
"""
This function produces goodness-of-match scores for a set of input patterns,
by checking for their presence in the current and predicted output of the
TM. Returns a global count of the number of extra and missing bits, the
confidence scores for each input pattern, and (if requested) the
bits in each input pattern that were not present in the TM's prediction.
:param patternNZs a list of input patterns that we want to check for. Each
element is a list of the non-zeros in that pattern.
:param output The output of the TM. If not specified, then use the
TM's current output. This can be specified if you are
trying to check the prediction metric for an output from
the past.
:param colConfidence The column confidences. If not specified, then use the
TM's current self.colConfidence. This can be specified if you
are trying to check the prediction metrics for an output
from the past.
:param details if True, also include details of missing bits per pattern.
:returns: list containing:
[
totalExtras,
totalMissing,
[conf_1, conf_2, ...],
[missing1, missing2, ...]
]
@retval totalExtras a global count of the number of 'extras', i.e. bits that
are on in the current output but not in the or of all the
passed in patterns
@retval totalMissing a global count of all the missing bits, i.e. the bits
that are on in the or of the patterns, but not in the
current output
@retval conf_i the confidence score for the i'th pattern inpatternsToCheck
This consists of 3 items as a tuple:
(predictionScore, posPredictionScore, negPredictionScore)
@retval missing_i the bits in the i'th pattern that were missing
in the output. This list is only returned if details is
True.
"""
# TODO: Add option to check predictedState only.
# Get the non-zeros in each pattern
numPatterns = len(patternNZs)
# Compute the union of all the expected patterns
orAll = set()
orAll = orAll.union(*patternNZs)
# Get the list of active columns in the output
if output is None:
assert self.currentOutput is not None
output = self.currentOutput
output = set(output.sum(axis=1).nonzero()[0])
# Compute the total extra and missing in the output
totalExtras = len(output.difference(orAll))
totalMissing = len(orAll.difference(output))
# Get the percent confidence level per column by summing the confidence
# levels of the cells in the column. During training, each segment's
# confidence number is computed as a running average of how often it
# correctly predicted bottom-up activity on that column. A cell's
# confidence number is taken from the first active segment found in the
# cell. Note that confidence will only be non-zero for predicted columns.
if colConfidence is None:
colConfidence = self.colConfidence['t']
# Assign confidences to each pattern
confidences = []
for i in xrange(numPatterns):
# Sum of the column confidences for this pattern
positivePredictionSum = colConfidence[patternNZs[i]].sum()
# How many columns in this pattern
positiveColumnCount = len(patternNZs[i])
# Sum of all the column confidences
totalPredictionSum = colConfidence.sum()
# Total number of columns
totalColumnCount = len(colConfidence)
negativePredictionSum = totalPredictionSum - positivePredictionSum
negativeColumnCount = totalColumnCount - positiveColumnCount
# Compute the average confidence score per column for this pattern
if positiveColumnCount != 0:
positivePredictionScore = positivePredictionSum
else:
positivePredictionScore = 0.0
# Compute the average confidence score per column for the other patterns
if negativeColumnCount != 0:
negativePredictionScore = negativePredictionSum
else:
negativePredictionScore = 0.0
# Scale the positive and negative prediction scores so that they sum to
# 1.0
currentSum = negativePredictionScore + positivePredictionScore
if currentSum > 0:
positivePredictionScore *= 1.0/currentSum
negativePredictionScore *= 1.0/currentSum
predictionScore = positivePredictionScore - negativePredictionScore
confidences.append((predictionScore,
positivePredictionScore,
negativePredictionScore))
# Include detail? (bits in each pattern that were missing from the output)
if details:
missingPatternBits = [set(pattern).difference(output)
for pattern in patternNZs]
return (totalExtras, totalMissing, confidences, missingPatternBits)
else:
return (totalExtras, totalMissing, confidences)
def _isSegmentActive(self, seg, activeState):
"""
A segment is active if it has >= activationThreshold connected
synapses that are active due to activeState.
Notes: studied various cutoffs, none of which seem to be worthwhile
list comprehension didn't help either
:param seg TODO: document
:param activeState TODO: document
"""
# Computing in C - *much* faster
return isSegmentActive(seg.syns, activeState,
self.connectedPerm, self.activationThreshold)
def _getSegmentActivityLevel(self, seg, activeState,
connectedSynapsesOnly=False):
"""
This routine computes the activity level of a segment given activeState.
It can tally up only connected synapses (permanence >= connectedPerm), or
all the synapses of the segment, at either t or t-1.
:param seg TODO: document
:param activeState TODO: document
:param connectedSynapsesOnly TODO: document
"""
# Computing in C - *much* faster
return getSegmentActivityLevel(seg.syns, activeState, connectedSynapsesOnly,
self.connectedPerm)
def _getBestMatchingCell(self, c, activeState, minThreshold):
"""
Find weakly activated cell in column with at least minThreshold active
synapses.
:param c which column to look at
:param activeState the active cells
:param minThreshold minimum number of synapses required
:returns: tuple (cellIdx, segment, numActiveSynapses)
"""
# Collect all cells in column c that have at least minThreshold in the most
# activated segment
bestActivityInCol = minThreshold
bestSegIdxInCol = -1
bestCellInCol = -1
for i in xrange(self.cellsPerColumn):
maxSegActivity = 0
maxSegIdx = 0
for j, s in enumerate(self.cells[c][i]):
activity = self._getSegmentActivityLevel(s, activeState)
if activity > maxSegActivity:
maxSegActivity = activity
maxSegIdx = j
if maxSegActivity >= bestActivityInCol:
bestActivityInCol = maxSegActivity
bestSegIdxInCol = maxSegIdx
bestCellInCol = i
if bestCellInCol == -1:
return (None, None, None)
else:
return (bestCellInCol, self.cells[c][bestCellInCol][bestSegIdxInCol],
bestActivityInCol)
def _getBestMatchingSegment(self, c, i, activeState):
"""
For the given cell, find the segment with the largest number of active
synapses. This routine is aggressive in finding the best match. The
permanence value of synapses is allowed to be below connectedPerm. The number
of active synapses is allowed to be below activationThreshold, but must be
above minThreshold. The routine returns the segment index. If no segments are
found, then an index of -1 is returned.
:param c TODO: document
:param i TODO: document
:param activeState TODO: document
"""
maxActivity, which = self.minThreshold, -1
for j, s in enumerate(self.cells[c][i]):
activity = self._getSegmentActivityLevel(s, activeState,
connectedSynapsesOnly=False)
if activity >= maxActivity:
maxActivity, which = activity, j
if which == -1:
return None
else:
return self.cells[c][i][which]
def _getCellForNewSegment(self, colIdx):
"""
Return the index of a cell in this column which is a good candidate
for adding a new segment.
When we have fixed size resources in effect, we insure that we pick a
cell which does not already have the max number of allowed segments. If
none exists, we choose the least used segment in the column to re-allocate.
:param colIdx which column to look at
:returns: cell index
"""
# Not fixed size CLA, just choose a cell randomly
if self.maxSegmentsPerCell < 0:
if self.cellsPerColumn > 1:
# Don't ever choose the start cell (cell # 0) in each column
i = self._random.getUInt32(self.cellsPerColumn-1) + 1
else:
i = 0
return i
# Fixed size CLA, choose from among the cells that are below the maximum
# number of segments.
# NOTE: It is important NOT to always pick the cell with the fewest number
# of segments. The reason is that if we always do that, we are more likely
# to run into situations where we choose the same set of cell indices to
# represent an 'A' in both context 1 and context 2. This is because the
# cell indices we choose in each column of a pattern will advance in
# lockstep (i.e. we pick cell indices of 1, then cell indices of 2, etc.).
candidateCellIdxs = []
if self.cellsPerColumn == 1:
minIdx = 0
maxIdx = 0
else:
minIdx = 1 # Don't include startCell in the mix
maxIdx = self.cellsPerColumn-1
for i in xrange(minIdx, maxIdx+1):
numSegs = len(self.cells[colIdx][i])
if numSegs < self.maxSegmentsPerCell:
candidateCellIdxs.append(i)
# If we found one, return with it. Note we need to use _random to maintain
# correspondence with CPP code.
if len(candidateCellIdxs) > 0:
#candidateCellIdx = random.choice(candidateCellIdxs)
candidateCellIdx = (
candidateCellIdxs[self._random.getUInt32(len(candidateCellIdxs))])
if self.verbosity >= 5:
print "Cell [%d,%d] chosen for new segment, # of segs is %d" % (
colIdx, candidateCellIdx, len(self.cells[colIdx][candidateCellIdx]))
return candidateCellIdx
# All cells in the column are full, find a segment to free up
candidateSegment = None
candidateSegmentDC = 1.0
# For each cell in this column
for i in xrange(minIdx, maxIdx+1):
# For each segment in this cell
for s in self.cells[colIdx][i]:
dc = s.dutyCycle()
if dc < candidateSegmentDC:
candidateCellIdx = i
candidateSegmentDC = dc
candidateSegment = s
# Free up the least used segment
if self.verbosity >= 5:
print ("Deleting segment #%d for cell[%d,%d] to make room for new "
"segment" % (candidateSegment.segID, colIdx, candidateCellIdx))
candidateSegment.debugPrint()
self._cleanUpdatesList(colIdx, candidateCellIdx, candidateSegment)
self.cells[colIdx][candidateCellIdx].remove(candidateSegment)
return candidateCellIdx
def _getSegmentActiveSynapses(self, c, i, s, activeState, newSynapses=False):
"""
Return a segmentUpdate data structure containing a list of proposed
changes to segment s. Let activeSynapses be the list of active synapses
where the originating cells have their activeState output = 1 at time step
t. (This list is empty if s is None since the segment doesn't exist.)
newSynapses is an optional argument that defaults to false. If newSynapses
is true, then newSynapseCount - len(activeSynapses) synapses are added to
activeSynapses. These synapses are randomly chosen from the set of cells
that have learnState = 1 at timeStep.
:param c TODO: document
:param i TODO: document
:param s TODO: document
:param activeState TODO: document
:param newSynapses TODO: document
"""
activeSynapses = []
if s is not None: # s can be None, if adding a new segment
# Here we add *integers* to activeSynapses
activeSynapses = [idx for idx, syn in enumerate(s.syns) \
if activeState[syn[0], syn[1]]]
if newSynapses: # add a few more synapses
nSynapsesToAdd = self.newSynapseCount - len(activeSynapses)
# Here we add *pairs* (colIdx, cellIdx) to activeSynapses
activeSynapses += self._chooseCellsToLearnFrom(c, i, s, nSynapsesToAdd,
activeState)
# It's still possible that activeSynapses is empty, and this will
# be handled in addToSegmentUpdates
# NOTE: activeSynapses contains a mixture of integers and pairs of integers
# - integers are indices of synapses already existing on the segment,
# that we will need to update.
# - pairs represent source (colIdx, cellIdx) of new synapses to create on
# the segment
update = BacktrackingTM._SegmentUpdate(c, i, s, activeSynapses)
return update
def _chooseCellsToLearnFrom(self, c, i, s, n, activeState):
"""
Choose n random cells to learn from.
This function is called several times while learning with timeStep = t-1, so
we cache the set of candidates for that case. It's also called once with
timeStep = t, and we cache that set of candidates.
:returns: tuple (column index, cell index).
"""
if n <= 0:
return []
tmpCandidates = numpy.where(activeState == 1)
# Candidates can be empty at this point, in which case we return
# an empty segment list. adaptSegments will do nothing when getting
# that list.
if len(tmpCandidates[0]) == 0:
return []
if s is None: # new segment
cands = [syn for syn in zip(tmpCandidates[0], tmpCandidates[1])]
else:
# We exclude any synapse that is already in this segment.
synapsesAlreadyInSegment = set((syn[0], syn[1]) for syn in s.syns)
cands = [syn for syn in zip(tmpCandidates[0], tmpCandidates[1])
if (syn[0], syn[1]) not in synapsesAlreadyInSegment]
# If we have no more candidates than requested, return all of them,
# no shuffle necessary.
if len(cands) <= n:
return cands
if n == 1: # so that we don't shuffle if only one is needed
idx = self._random.getUInt32(len(cands))
return [cands[idx]] # col and cell idx in col
# If we need more than one candidate
indices = numpy.array([j for j in range(len(cands))], dtype='uint32')
tmp = numpy.zeros(min(n, len(indices)), dtype='uint32')
self._random.sample(indices, tmp)
return sorted([cands[j] for j in tmp])
def _processSegmentUpdates(self, activeColumns):
"""
Go through the list of accumulated segment updates and process them
as follows:
if the segment update is too old, remove the update
else if the cell received bottom-up, update its permanences
else if it's still being predicted, leave it in the queue
else remove it.
:param activeColumns TODO: document
"""
# The segmentUpdates dict has keys which are the column,cellIdx of the
# owner cell. The values are lists of segment updates for that cell
removeKeys = []
trimSegments = []
for key, updateList in self.segmentUpdates.iteritems():
# Get the column number and cell index of the owner cell
c, i = key[0], key[1]
# If the cell received bottom-up, update its segments
if c in activeColumns:
action = 'update'
# If not, either keep it around if it's still predicted, or remove it
else:
# If it is still predicted, and we are pooling, keep it around
if self.doPooling and self.lrnPredictedState['t'][c, i] == 1:
action = 'keep'
else:
action = 'remove'
# Process each segment for this cell. Each segment entry contains
# [creationDate, SegmentInfo]
updateListKeep = []
if action != 'remove':
for (createDate, segUpdate) in updateList:
if self.verbosity >= 4:
print "_nLrnIterations =", self.lrnIterationIdx,
print segUpdate
# If this segment has expired. Ignore this update (and hence remove it
# from list)
if self.lrnIterationIdx - createDate > self.segUpdateValidDuration:
continue
if action == 'update':
trimSegment = self._adaptSegment(segUpdate)
if trimSegment:
trimSegments.append((segUpdate.columnIdx, segUpdate.cellIdx,
segUpdate.segment))
else:
# Keep segments that haven't expired yet (the cell is still being
# predicted)
updateListKeep.append((createDate, segUpdate))
self.segmentUpdates[key] = updateListKeep
if len(updateListKeep) == 0:
removeKeys.append(key)
# Clean out empty segment updates
for key in removeKeys:
self.segmentUpdates.pop(key)
# Trim segments that had synapses go to 0
for (c, i, segment) in trimSegments:
self._trimSegmentsInCell(c, i, [segment], minPermanence = 0.00001,
minNumSyns = 0)
def _adaptSegment(self, segUpdate):
"""
This function applies segment update information to a segment in a
cell.
Synapses on the active list get their permanence counts incremented by
permanenceInc. All other synapses get their permanence counts decremented
by permanenceDec.
We also increment the positiveActivations count of the segment.
:param segUpdate SegmentUpdate instance
:returns: True if some synapses were decremented to 0 and the segment is a
candidate for trimming
"""
# This will be set to True if detect that any syapses were decremented to
# 0
trimSegment = False
# segUpdate.segment is None when creating a new segment
c, i, segment = segUpdate.columnIdx, segUpdate.cellIdx, segUpdate.segment
# update.activeSynapses can be empty.
# If not, it can contain either or both integers and tuples.
# The integers are indices of synapses to update.
# The tuples represent new synapses to create (src col, src cell in col).
# We pre-process to separate these various element types.
# synToCreate is not empty only if positiveReinforcement is True.
# NOTE: the synapse indices start at *1* to skip the segment flags.
activeSynapses = segUpdate.activeSynapses
synToUpdate = set([syn for syn in activeSynapses if type(syn) == int])
# Modify an existing segment
if segment is not None:
if self.verbosity >= 4:
print "Reinforcing segment #%d for cell[%d,%d]" % (segment.segID, c, i)
print " before:",
segment.debugPrint()
# Mark it as recently useful
segment.lastActiveIteration = self.lrnIterationIdx
# Update frequency and positiveActivations
segment.positiveActivations += 1 # positiveActivations += 1
segment.dutyCycle(active=True)
# First, decrement synapses that are not active
# s is a synapse *index*, with index 0 in the segment being the tuple
# (segId, sequence segment flag). See below, creation of segments.
lastSynIndex = len(segment.syns) - 1
inactiveSynIndices = [s for s in xrange(0, lastSynIndex+1) \
if s not in synToUpdate]
trimSegment = segment.updateSynapses(inactiveSynIndices,
-self.permanenceDec)
# Now, increment active synapses
activeSynIndices = [syn for syn in synToUpdate if syn <= lastSynIndex]
segment.updateSynapses(activeSynIndices, self.permanenceInc)
# Finally, create new synapses if needed
# syn is now a tuple (src col, src cell)
synsToAdd = [syn for syn in activeSynapses if type(syn) != int]
# If we have fixed resources, get rid of some old syns if necessary
if self.maxSynapsesPerSegment > 0 \
and len(synsToAdd) + len(segment.syns) > self.maxSynapsesPerSegment:
numToFree = (len(segment.syns) + len(synsToAdd) -
self.maxSynapsesPerSegment)
segment.freeNSynapses(numToFree, inactiveSynIndices, self.verbosity)
for newSyn in synsToAdd:
segment.addSynapse(newSyn[0], newSyn[1], self.initialPerm)
if self.verbosity >= 4:
print " after:",
segment.debugPrint()
# Create a new segment
else:
# (segID, sequenceSegment flag, frequency, positiveActivations,
# totalActivations, lastActiveIteration)
newSegment = Segment(tm=self, isSequenceSeg=segUpdate.sequenceSegment)
# numpy.float32 important so that we can match with C++
for synapse in activeSynapses:
newSegment.addSynapse(synapse[0], synapse[1], self.initialPerm)
if self.verbosity >= 3:
print "New segment #%d for cell[%d,%d]" % (self.segID-1, c, i),
newSegment.debugPrint()
self.cells[c][i].append(newSegment)
return trimSegment
def getSegmentInfo(self, collectActiveData = False):
"""Returns information about the distribution of segments, synapses and
permanence values in the current TM. If requested, also returns information
regarding the number of currently active segments and synapses.
:returns: tuple described below:
::
(
nSegments,
nSynapses,
nActiveSegs,
nActiveSynapses,
distSegSizes,
distNSegsPerCell,
distPermValues,
distAges
)
- ``nSegments``: (int) total number of segments
- ``nSynapses``: (int) total number of synapses
- ``nActiveSegs``: (int) total number of active segments (0 if
``collectActiveData`` is False)
- ``nActiveSynapses``: (int) total number of active synapses 0 if
``collectActiveData`` is False
- ``distSegSizes``: (dict) where d[n] = number of segments with n synapses
- ``distNSegsPerCell``: (dict) where d[n] = number of cells with n segments
- ``distPermValues``: (dict) where d[p] = number of synapses with perm = p/10
- ``distAges``: (list) of tuples (``ageRange``, ``numSegments``)
"""
nSegments, nSynapses = 0, 0
nActiveSegs, nActiveSynapses = 0, 0
distSegSizes, distNSegsPerCell = {}, {}
distPermValues = {} # Num synapses with given permanence values
numAgeBuckets = 20
distAges = []
ageBucketSize = int((self.lrnIterationIdx+20) / 20)
for i in range(numAgeBuckets):
distAges.append(['%d-%d' % (i*ageBucketSize, (i+1)*ageBucketSize-1), 0])
for c in xrange(self.numberOfCols):
for i in xrange(self.cellsPerColumn):
if len(self.cells[c][i]) > 0:
nSegmentsThisCell = len(self.cells[c][i])
nSegments += nSegmentsThisCell
if distNSegsPerCell.has_key(nSegmentsThisCell):
distNSegsPerCell[nSegmentsThisCell] += 1
else:
distNSegsPerCell[nSegmentsThisCell] = 1
for seg in self.cells[c][i]:
nSynapsesThisSeg = seg.getNumSynapses()
nSynapses += nSynapsesThisSeg
if distSegSizes.has_key(nSynapsesThisSeg):
distSegSizes[nSynapsesThisSeg] += 1
else:
distSegSizes[nSynapsesThisSeg] = 1
# Accumulate permanence value histogram
for syn in seg.syns:
p = int(syn[2]*10)
if distPermValues.has_key(p):
distPermValues[p] += 1
else:
distPermValues[p] = 1
# Accumulate segment age histogram
age = self.lrnIterationIdx - seg.lastActiveIteration
ageBucket = int(age/ageBucketSize)
distAges[ageBucket][1] += 1
# Get active synapse statistics if requested
if collectActiveData:
if self._isSegmentActive(seg, self.infActiveState['t']):
nActiveSegs += 1
for syn in seg.syns:
if self.activeState['t'][syn[0]][syn[1]] == 1:
nActiveSynapses += 1
return (nSegments, nSynapses, nActiveSegs, nActiveSynapses,
distSegSizes, distNSegsPerCell, distPermValues, distAges)
class Segment(object):
"""
The Segment class is a container for all of the segment variables and
the synapses it owns.
"""
## These are iteration count tiers used when computing segment duty cycle.
dutyCycleTiers = [0, 100, 320, 1000,
3200, 10000, 32000, 100000,
320000]
## This is the alpha used in each tier. dutyCycleAlphas[n] is used when
# `iterationIdx > dutyCycleTiers[n]`.
dutyCycleAlphas = [None, 0.0032, 0.0010, 0.00032,
0.00010, 0.000032, 0.00001, 0.0000032,
0.0000010]
def __init__(self, tm, isSequenceSeg):
self.tm = tm
self.segID = tm.segID
tm.segID += 1
self.isSequenceSeg = isSequenceSeg
self.lastActiveIteration = tm.lrnIterationIdx
self.positiveActivations = 1
self.totalActivations = 1
# These are internal variables used to compute the positive activations
# duty cycle.
# Callers should use dutyCycle()
self._lastPosDutyCycle = 1.0 / tm.lrnIterationIdx
self._lastPosDutyCycleIteration = tm.lrnIterationIdx
# Each synapse is a tuple (srcCellCol, srcCellIdx, permanence)
self.syns = []
def __str__(self):
return str((self.segID, self.isSequenceSeg, self.lastActiveIteration,
self.positiveActivations, self.totalActivations, self._lastPosDutyCycle, self._lastPosDutyCycleIteration, self.syns))
def __ne__(self, s):
return not self == s
def __eq__(self, s):
if (self.segID != s.segID or
self.isSequenceSeg != s.isSequenceSeg or
self.lastActiveIteration != s.lastActiveIteration or
self.positiveActivations != s.positiveActivations or
self.totalActivations != s.totalActivations or
self._lastPosDutyCycle != s._lastPosDutyCycle or
self._lastPosDutyCycleIteration != s._lastPosDutyCycleIteration):
return False
if len(self.syns) != len(s.syns):
return False
for syn1, syn2 in zip(self.syns, s.syns):
if syn1[0] != syn2[0] or syn1[1] != syn2[1]:
return False
if abs(syn1[2] - syn2[2]) > 0.000001:
return False
return True
def write(self, proto):
proto.segID = self.segID
proto.isSequenceSeg = self.isSequenceSeg
proto.lastActiveIteration = self.lastActiveIteration
proto.positiveActivations = self.positiveActivations
proto.totalActivations = self.totalActivations
proto.lastPosDutyCycle = self._lastPosDutyCycle
proto.lastPosDutyCycleIteration = self._lastPosDutyCycleIteration
synapseListProto = proto.init("synapses", len(self.syns))
for i, syn in enumerate(self.syns):
synProto = synapseListProto[i]
synProto.srcCellCol = syn[0]
synProto.srcCellIdx = syn[1]
synProto.permanence = float(syn[2])
@classmethod
def read(cls, proto, tm):
obj = object.__new__(cls)
obj.tm = tm
obj.segID = int(proto.segID)
obj.isSequenceSeg = proto.isSequenceSeg
obj.lastActiveIteration = int(proto.lastActiveIteration)
obj.positiveActivations = int(proto.positiveActivations)
obj.totalActivations = int(proto.totalActivations)
obj._lastPosDutyCycle = proto.lastPosDutyCycle
obj._lastPosDutyCycleIteration = int(proto.lastPosDutyCycleIteration)
obj.syns = []
for synProto in proto.synapses:
obj.addSynapse(synProto.srcCellCol, synProto.srcCellIdx,
synProto.permanence)
return obj
def dutyCycle(self, active=False, readOnly=False):
"""Compute/update and return the positive activations duty cycle of
this segment. This is a measure of how often this segment is
providing good predictions.
:param active True if segment just provided a good prediction
:param readOnly If True, compute the updated duty cycle, but don't change
the cached value. This is used by debugging print statements.
:returns: The duty cycle, a measure of how often this segment is
providing good predictions.
**NOTE:** This method relies on different schemes to compute the duty cycle
based on how much history we have. In order to support this tiered
approach **IT MUST BE CALLED ON EVERY SEGMENT AT EACH DUTY CYCLE TIER**
(@ref dutyCycleTiers).
When we don't have a lot of history yet (first tier), we simply return
number of positive activations / total number of iterations
After a certain number of iterations have accumulated, it converts into
a moving average calculation, which is updated only when requested
since it can be a bit expensive to compute on every iteration (it uses
the pow() function).
The duty cycle is computed as follows:
dc[t] = (1-alpha) * dc[t-1] + alpha * value[t]
If the value[t] has been 0 for a number of steps in a row, you can apply
all of the updates at once using:
dc[t] = (1-alpha)^(t-lastT) * dc[lastT]
We use the alphas and tiers as defined in @ref dutyCycleAlphas and
@ref dutyCycleTiers.
"""
# For tier #0, compute it from total number of positive activations seen
if self.tm.lrnIterationIdx <= self.dutyCycleTiers[1]:
dutyCycle = float(self.positiveActivations) \
/ self.tm.lrnIterationIdx
if not readOnly:
self._lastPosDutyCycleIteration = self.tm.lrnIterationIdx
self._lastPosDutyCycle = dutyCycle
return dutyCycle
# How old is our update?
age = self.tm.lrnIterationIdx - self._lastPosDutyCycleIteration
# If it's already up to date, we can returned our cached value.
if age == 0 and not active:
return self._lastPosDutyCycle
# Figure out which alpha we're using
for tierIdx in range(len(self.dutyCycleTiers)-1, 0, -1):
if self.tm.lrnIterationIdx > self.dutyCycleTiers[tierIdx]:
alpha = self.dutyCycleAlphas[tierIdx]
break
# Update duty cycle
dutyCycle = pow(1.0-alpha, age) * self._lastPosDutyCycle
if active:
dutyCycle += alpha
# Update cached values if not read-only
if not readOnly:
self._lastPosDutyCycleIteration = self.tm.lrnIterationIdx
self._lastPosDutyCycle = dutyCycle
return dutyCycle
def debugPrint(self):
"""Print segment information for verbose messaging and debugging.
This uses the following format:
ID:54413 True 0.64801 (24/36) 101 [9,1]0.75 [10,1]0.75 [11,1]0.75
where:
54413 - is the unique segment id
True - is sequence segment
0.64801 - moving average duty cycle
(24/36) - (numPositiveActivations / numTotalActivations)
101 - age, number of iterations since last activated
[9,1]0.75 - synapse from column 9, cell #1, strength 0.75
[10,1]0.75 - synapse from column 10, cell #1, strength 0.75
[11,1]0.75 - synapse from column 11, cell #1, strength 0.75
"""
# Segment ID
print "ID:%-5d" % (self.segID),
# Sequence segment or pooling segment
if self.isSequenceSeg:
print "True",
else:
print "False",
# Duty cycle
print "%9.7f" % (self.dutyCycle(readOnly=True)),
# numPositive/totalActivations
print "(%4d/%-4d)" % (self.positiveActivations,
self.totalActivations),
# Age
print "%4d" % (self.tm.lrnIterationIdx - self.lastActiveIteration),
# Print each synapses on this segment as: srcCellCol/srcCellIdx/perm
# if the permanence is above connected, put [] around the synapse info
# For aid in comparing to the C++ implementation, print them in sorted
# order
sortedSyns = sorted(self.syns)
for _, synapse in enumerate(sortedSyns):
print "[%d,%d]%4.2f" % (synapse[0], synapse[1], synapse[2]),
print
def isSequenceSegment(self):
return self.isSequenceSeg
def getNumSynapses(self):
return len(self.syns)
def freeNSynapses(self, numToFree, inactiveSynapseIndices, verbosity= 0):
"""Free up some synapses in this segment. We always free up inactive
synapses (lowest permanence freed up first) before we start to free up
active ones.
:param numToFree number of synapses to free up
:param inactiveSynapseIndices list of the inactive synapse indices.
"""
# Make sure numToFree isn't larger than the total number of syns we have
assert (numToFree <= len(self.syns))
if (verbosity >= 4):
print "\nIn PY freeNSynapses with numToFree =", numToFree,
print "inactiveSynapseIndices =",
for i in inactiveSynapseIndices:
print self.syns[i][0:2],
print
# Remove the lowest perm inactive synapses first
if len(inactiveSynapseIndices) > 0:
perms = numpy.array([self.syns[i][2] for i in inactiveSynapseIndices])
candidates = numpy.array(inactiveSynapseIndices)[
perms.argsort()[0:numToFree]]
candidates = list(candidates)
else:
candidates = []
# Do we need more? if so, remove the lowest perm active synapses too
if len(candidates) < numToFree:
activeSynIndices = [i for i in xrange(len(self.syns))
if i not in inactiveSynapseIndices]
perms = numpy.array([self.syns[i][2] for i in activeSynIndices])
moreToFree = numToFree - len(candidates)
moreCandidates = numpy.array(activeSynIndices)[
perms.argsort()[0:moreToFree]]
candidates += list(moreCandidates)
if verbosity >= 4:
print "Deleting %d synapses from segment to make room for new ones:" % (
len(candidates)), candidates
print "BEFORE:",
self.debugPrint()
# Free up all the candidates now
synsToDelete = [self.syns[i] for i in candidates]
for syn in synsToDelete:
self.syns.remove(syn)
if verbosity >= 4:
print "AFTER:",
self.debugPrint()
def addSynapse(self, srcCellCol, srcCellIdx, perm):
"""Add a new synapse
:param srcCellCol source cell column
:param srcCellIdx source cell index within the column
:param perm initial permanence
"""
self.syns.append([int(srcCellCol), int(srcCellIdx), numpy.float32(perm)])
def updateSynapses(self, synapses, delta):
"""Update a set of synapses in the segment.
:param tm The owner TM
:param synapses List of synapse indices to update
:param delta How much to add to each permanence
:returns: True if synapse reached 0
"""
reached0 = False
if delta > 0:
for synapse in synapses:
self.syns[synapse][2] = newValue = self.syns[synapse][2] + delta
# Cap synapse permanence at permanenceMax
if newValue > self.tm.permanenceMax:
self.syns[synapse][2] = self.tm.permanenceMax
else:
for synapse in synapses:
self.syns[synapse][2] = newValue = self.syns[synapse][2] + delta
# Cap min synapse permanence to 0 in case there is no global decay
if newValue <= 0:
self.syns[synapse][2] = 0
reached0 = True
return reached0
# This is necessary for unpickling objects that have instances of the nested
# class since the loading process looks for the class at the top level of the
# module.
_SegmentUpdate = BacktrackingTM._SegmentUpdate
| 143,422 | Python | .py | 3,087 | 38.961127 | 129 | 0.674247 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,909 | sdr_classifier_factory.py | numenta_nupic-legacy/src/nupic/algorithms/sdr_classifier_factory.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Module providing a factory for instantiating a SDR classifier."""
from nupic.algorithms.sdr_classifier import SDRClassifier
from nupic.algorithms.sdr_classifier_diff import SDRClassifierDiff
from nupic.bindings.algorithms import SDRClassifier as FastSDRClassifier
from nupic.support.configuration import Configuration
class SDRClassifierFactory(object):
"""Factory for instantiating SDR classifiers."""
@staticmethod
def create(*args, **kwargs):
"""
Create a SDR classifier factory.
The implementation of the SDR Classifier can be specified with
the "implementation" keyword argument.
The SDRClassifierFactory uses the implementation as specified in
`Default NuPIC Configuration <default-config.html>`_.
"""
impl = kwargs.pop('implementation', None)
if impl is None:
impl = Configuration.get('nupic.opf.sdrClassifier.implementation')
if impl == 'py':
return SDRClassifier(*args, **kwargs)
elif impl == 'cpp':
return FastSDRClassifier(*args, **kwargs)
elif impl == 'diff':
return SDRClassifierDiff(*args, **kwargs)
else:
raise ValueError('Invalid classifier implementation (%r). Value must be '
'"py", "cpp" or "diff".' % impl)
@staticmethod
def read(proto):
"""
:param proto: SDRClassifierRegionProto capnproto object
"""
impl = proto.implementation
if impl == 'py':
return SDRClassifier.read(proto.sdrClassifier)
elif impl == 'cpp':
return FastSDRClassifier.read(proto.sdrClassifier)
elif impl == 'diff':
return SDRClassifierDiff.read(proto.sdrClassifier)
else:
raise ValueError('Invalid classifier implementation (%r). Value must be '
'"py", "cpp" or "diff".' % impl)
| 2,754 | Python | .py | 63 | 39.460317 | 79 | 0.693025 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,910 | backtracking_tm_shim.py | numenta_nupic-legacy/src/nupic/algorithms/backtracking_tm_shim.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
A shim for the TM class that transparently implements TemporalMemory,
for use with OPF.
"""
import numpy
from nupic.bindings.algorithms import TemporalMemory as TemporalMemoryCPP
from nupic.algorithms.monitor_mixin.temporal_memory_monitor_mixin import (
TemporalMemoryMonitorMixin)
from nupic.algorithms.temporal_memory import TemporalMemory
class MonitoredTemporalMemory(TemporalMemoryMonitorMixin,
TemporalMemory):
def __init__(self, *args, **kwargs):
TemporalMemoryMonitorMixin.__init__(self, *args, **kwargs)
TemporalMemory.__init__(self, *args, **kwargs)
@classmethod
def read(cls, proto):
"""
Intercepts TemporalMemory deserialization request in order to initialize
`TemporalMemoryMonitorMixin` state
@param proto (DynamicStructBuilder) Proto object
@return (TemporalMemory) TemporalMemory shim instance
"""
tm = super(TemporalMemoryMonitorMixin, cls).read(proto)
# initialize `TemporalMemoryMonitorMixin` attributes
tm.mmName = None
tm._mmTraces = None
tm._mmData = None
tm.mmClearHistory()
tm._mmResetActive = True
return tm
class TMShimMixin(object):
"""
TM => Temporal Memory shim class.
"""
def __init__(self,
numberOfCols=500,
cellsPerColumn=10,
initialPerm=0.11,
connectedPerm=0.50,
minThreshold=8,
newSynapseCount=15,
permanenceInc=0.10,
permanenceDec=0.10,
permanenceMax=1.0,
activationThreshold=12,
predictedSegmentDecrement=0.0,
maxSegmentsPerCell=255,
maxSynapsesPerSegment=255,
globalDecay=0.10,
maxAge=100000,
pamLength=1,
verbosity=0,
outputType="normal",
seed=42):
"""
Translate parameters and initialize member variables specific to `backtracking_tm.py`.
"""
super(TMShimMixin, self).__init__(
columnDimensions=(numberOfCols,),
cellsPerColumn=cellsPerColumn,
activationThreshold=activationThreshold,
initialPermanence=initialPerm,
connectedPermanence=connectedPerm,
minThreshold=minThreshold,
maxNewSynapseCount=newSynapseCount,
permanenceIncrement=permanenceInc,
permanenceDecrement=permanenceDec,
predictedSegmentDecrement=predictedSegmentDecrement,
maxSegmentsPerCell=maxSegmentsPerCell,
maxSynapsesPerSegment=maxSynapsesPerSegment,
seed=seed)
self.infActiveState = {"t": None}
@classmethod
def read(cls, proto):
"""
Intercepts TemporalMemory deserialization request in order to initialize
`self.infActiveState`
@param proto (DynamicStructBuilder) Proto object
@return (TemporalMemory) TemporalMemory shim instance
"""
tm = super(TMShimMixin, cls).read(proto)
tm.infActiveState = {"t": None}
return tm
def compute(self, bottomUpInput, enableLearn, computeInfOutput=None):
"""
(From `backtracking_tm.py`)
Handle one compute, possibly learning.
@param bottomUpInput The bottom-up input, typically from a spatial pooler
@param enableLearn If true, perform learning
@param computeInfOutput If None, default behavior is to disable the inference
output when enableLearn is on.
If true, compute the inference output
If false, do not compute the inference output
"""
super(TMShimMixin, self).compute(set(bottomUpInput.nonzero()[0]),
learn=enableLearn)
numberOfCells = self.numberOfCells()
activeState = numpy.zeros(numberOfCells)
activeState[self.getActiveCells()] = 1
self.infActiveState["t"] = activeState
output = numpy.zeros(numberOfCells)
output[self.getPredictiveCells()] = 1
output[self.getActiveCells()] = 1
return output
def topDownCompute(self, topDownIn=None):
"""
(From `backtracking_tm.py`)
Top-down compute - generate expected input given output of the TM
@param topDownIn top down input from the level above us
@returns best estimate of the TM input that would have generated bottomUpOut.
"""
output = numpy.zeros(self.numberOfColumns())
columns = [self.columnForCell(idx) for idx in self.getPredictiveCells()]
output[columns] = 1
return output
def getActiveState(self):
activeState = numpy.zeros(self.numberOfCells())
activeState[self.getActiveCells()] = 1
return activeState
def getPredictedState(self):
predictedState = numpy.zeros(self.numberOfCells())
predictedState[self.getPredictiveCells()] = 1
return predictedState
def getLearnActiveStateT(self):
state = numpy.zeros([self.numberOfColumns(), self.getCellsPerColumn()])
return state
class TMShim(TMShimMixin, TemporalMemory):
pass
class TMCPPShim(TMShimMixin, TemporalMemoryCPP):
pass
class MonitoredTMShim(MonitoredTemporalMemory):
"""
TM => Monitored Temporal Memory shim class.
TODO: This class is not very DRY. This whole file needs to be replaced by a
pure TemporalMemory region
(WIP at https://github.com/numenta/nupic.research/pull/247).
"""
def __init__(self,
numberOfCols=500,
cellsPerColumn=10,
initialPerm=0.11,
connectedPerm=0.50,
minThreshold=8,
newSynapseCount=15,
permanenceInc=0.10,
permanenceDec=0.10,
permanenceMax=1.0,
activationThreshold=12,
predictedSegmentDecrement=0.0,
maxSegmentsPerCell=255,
maxSynapsesPerSegment=255,
globalDecay=0.10,
maxAge=100000,
pamLength=1,
verbosity=0,
outputType="normal",
seed=42):
"""
Translate parameters and initialize member variables specific to `backtracking_tm.py`.
"""
super(MonitoredTMShim, self).__init__(
columnDimensions=(numberOfCols,),
cellsPerColumn=cellsPerColumn,
activationThreshold=activationThreshold,
initialPermanence=initialPerm,
connectedPermanence=connectedPerm,
minThreshold=minThreshold,
maxNewSynapseCount=newSynapseCount,
permanenceIncrement=permanenceInc,
permanenceDecrement=permanenceDec,
predictedSegmentDecrement=predictedSegmentDecrement,
maxSegmentsPerCell=maxSegmentsPerCell,
maxSynapsesPerSegment=maxSynapsesPerSegment,
seed=seed)
self.infActiveState = {"t": None}
@classmethod
def read(cls, proto):
"""
Intercepts TemporalMemory deserialization request in order to initialize
`self.infActiveState`
@param proto (DynamicStructBuilder) Proto object
@return (TemporalMemory) TemporalMemory shim instance
"""
tm = super(MonitoredTMShim, cls).read(proto)
tm.infActiveState = {"t": None}
return tm
def compute(self, bottomUpInput, enableLearn, computeInfOutput=None):
"""
(From `backtracking_tm.py`)
Handle one compute, possibly learning.
@param bottomUpInput The bottom-up input, typically from a spatial pooler
@param enableLearn If true, perform learning
@param computeInfOutput If None, default behavior is to disable the inference
output when enableLearn is on.
If true, compute the inference output
If false, do not compute the inference output
"""
super(MonitoredTMShim, self).compute(set(bottomUpInput.nonzero()[0]),
learn=enableLearn)
numberOfCells = self.numberOfCells()
activeState = numpy.zeros(numberOfCells)
activeState[self.getActiveCells()] = 1
self.infActiveState["t"] = activeState
output = numpy.zeros(numberOfCells)
output[self.getPredictiveCells() + self.getActiveCells()] = 1
return output
def topDownCompute(self, topDownIn=None):
"""
(From `backtracking_tm.py`)
Top-down compute - generate expected input given output of the TM
@param topDownIn top down input from the level above us
@returns best estimate of the TM input that would have generated bottomUpOut.
"""
output = numpy.zeros(self.numberOfColumns())
columns = [self.columnForCell(idx) for idx in self.getPredictiveCells()]
output[columns] = 1
return output
def getActiveState(self):
activeState = numpy.zeros(self.numberOfCells())
activeState[self.getActiveCells()] = 1
return activeState
def getPredictedState(self):
predictedState = numpy.zeros(self.numberOfCells())
predictedState[self.getPredictiveCells()] = 1
return predictedState
def getLearnActiveStateT(self):
state = numpy.zeros([self.numberOfColumns(), self.cellsPerColumn])
return state
| 9,992 | Python | .py | 248 | 32.923387 | 90 | 0.683852 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,911 | knn_classifier.py | numenta_nupic-legacy/src/nupic/algorithms/knn_classifier.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-15, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""This module implements a k nearest neighbor classifier."""
import numpy
from nupic.bindings.math import (NearestNeighbor, min_score_per_category)
from nupic.serializable import Serializable
try:
import capnp
except ImportError:
capnp = None
import numpy
if capnp:
from nupic.algorithms.knn_classifier_capnp import KNNClassifierProto
g_debugPrefix = "KNN"
KNNCLASSIFIER_VERSION = 1
EPSILON = 0.00001 # constant error threshold to check equality of floats
EPSILON_ROUND = 5 # Used to round floats
def _labeledInput(activeInputs, cellsPerCol=32):
"""Print the list of [column, cellIdx] indices for each of the active
cells in activeInputs.
"""
if cellsPerCol == 0:
cellsPerCol = 1
cols = activeInputs.size / cellsPerCol
activeInputs = activeInputs.reshape(cols, cellsPerCol)
(cols, cellIdxs) = activeInputs.nonzero()
if len(cols) == 0:
return "NONE"
items = ["(%d): " % (len(cols))]
prevCol = -1
for (col,cellIdx) in zip(cols, cellIdxs):
if col != prevCol:
if prevCol != -1:
items.append("] ")
items.append("Col %d: [" % col)
prevCol = col
items.append("%d," % cellIdx)
items.append("]")
return " ".join(items)
class KNNClassifier(Serializable):
"""
This class implements NuPIC's k Nearest Neighbor Classifier. KNN is very
useful as a basic classifier for many situations. This implementation contains
many enhancements that are useful for HTM experiments. These enhancements
include an optimized C++ class for sparse vectors, support for continuous
online learning, support for various distance methods (including Lp-norm and
raw overlap), support for performing SVD on the input vectors (very useful for
large vectors), support for a fixed-size KNN, and a mechanism to store custom
ID's for each vector.
:param k: (int) The number of nearest neighbors used in the classification
of patterns. Must be odd.
:param exact: (boolean) If true, patterns must match exactly when assigning
class labels
:param distanceNorm: (int) When distance method is "norm", this specifies
the p value of the Lp-norm
:param distanceMethod: (string) The method used to compute distance between
input patterns and prototype patterns. The possible options are:
- ``norm``: When distanceNorm is 2, this is the euclidean distance,
When distanceNorm is 1, this is the manhattan distance
In general: sum(abs(x-proto) ^ distanceNorm) ^ (1/distanceNorm)
The distances are normalized such that farthest prototype from
a given input is 1.0.
- ``rawOverlap``: Only appropriate when inputs are binary. This computes:
(width of the input) - (# bits of overlap between input
and prototype).
- ``pctOverlapOfInput``: Only appropriate for binary inputs. This computes
1.0 - (# bits overlap between input and prototype) /
(# ON bits in input)
- ``pctOverlapOfProto``: Only appropriate for binary inputs. This computes
1.0 - (# bits overlap between input and prototype) /
(# ON bits in prototype)
- ``pctOverlapOfLarger``: Only appropriate for binary inputs. This computes
1.0 - (# bits overlap between input and prototype) /
max(# ON bits in input, # ON bits in prototype)
:param distThreshold: (float) A threshold on the distance between learned
patterns and a new pattern proposed to be learned. The distance must be
greater than this threshold in order for the new pattern to be added to
the classifier's memory.
:param doBinarization: (boolean) If True, then scalar inputs will be
binarized.
:param binarizationThreshold: (float) If doBinarization is True, this
specifies the threshold for the binarization of inputs
:param useSparseMemory: (boolean) If True, classifier will use a sparse
memory matrix
:param sparseThreshold: (float) If useSparseMemory is True, input variables
whose absolute values are less than this threshold will be stored as
zero
:param relativeThreshold: (boolean) Flag specifying whether to multiply
sparseThreshold by max value in input
:param numWinners: (int) Number of elements of the input that are stored. If
0, all elements are stored
:param numSVDSamples: (int) Number of samples the must occur before a SVD
(Singular Value Decomposition) transformation will be performed. If 0,
the transformation will never be performed
:param numSVDDims: (string) Controls dimensions kept after SVD
transformation. If "adaptive", the number is chosen automatically
:param fractionOfMax: (float) If numSVDDims is "adaptive", this controls the
smallest singular value that is retained as a fraction of the largest
singular value
:param verbosity: (int) Console verbosity level where 0 is no output and
larger integers provide increasing levels of verbosity
:param maxStoredPatterns: (int) Limits the maximum number of the training
patterns stored. When KNN learns in a fixed capacity mode, the unused
patterns are deleted once the number of stored patterns is greater than
maxStoredPatterns. A value of -1 is no limit
:param replaceDuplicates: (bool) A boolean flag that determines whether,
during learning, the classifier replaces duplicates that match exactly,
even if distThreshold is 0. Should be True for online learning
:param cellsPerCol: (int) If >= 1, input is assumed to be organized into
columns, in the same manner as the temporal memory AND whenever a new
prototype is stored, only the start cell (first cell) is stored in any
bursting column
:param minSparsity: (float) If useSparseMemory is set, only vectors with
sparsity >= minSparsity will be stored during learning. A value of 0.0
implies all vectors will be stored. A value of 0.1 implies only vectors
with at least 10% sparsity will be stored
"""
def __init__(self, k=1,
exact=False,
distanceNorm=2.0,
distanceMethod="norm",
distThreshold=0,
doBinarization=False,
binarizationThreshold=0.5,
useSparseMemory=True,
sparseThreshold=0.1,
relativeThreshold=False,
numWinners=0,
numSVDSamples=0,
numSVDDims=None,
fractionOfMax=None,
verbosity=0,
maxStoredPatterns=-1,
replaceDuplicates=False,
cellsPerCol=0,
minSparsity=0.0):
self.version = KNNCLASSIFIER_VERSION
self.k = k
self.exact = exact
self.distanceNorm = distanceNorm
assert (distanceMethod in ("norm", "rawOverlap", "pctOverlapOfLarger",
"pctOverlapOfProto", "pctOverlapOfInput"))
self.distanceMethod = distanceMethod
self.distThreshold = distThreshold
self.doBinarization = doBinarization
self.binarizationThreshold = binarizationThreshold
self.useSparseMemory = useSparseMemory
self.sparseThreshold = sparseThreshold
self.relativeThreshold = relativeThreshold
self.numWinners = numWinners
self.numSVDSamples = numSVDSamples or 0
self.numSVDDims = numSVDDims
self.fractionOfMax = fractionOfMax
if self.numSVDDims=="adaptive":
self._adaptiveSVDDims = True
else:
self._adaptiveSVDDims = False
self.verbosity = verbosity
self.replaceDuplicates = replaceDuplicates
self.cellsPerCol = cellsPerCol
self.maxStoredPatterns = maxStoredPatterns
self.minSparsity = minSparsity
self.clear()
def clear(self):
"""Clears the state of the KNNClassifier."""
self._Memory = None
self._numPatterns = 0
self._M = None
self._categoryList = []
self._partitionIdList = []
self._partitionIdMap = {}
self._finishedLearning = False
self._iterationIdx = -1
# Fixed capacity KNN
if self.maxStoredPatterns > 0:
assert self.useSparseMemory, ("Fixed capacity KNN is implemented only "
"in the sparse memory mode")
self.fixedCapacity = True
self._categoryRecencyList = []
else:
self.fixedCapacity = False
# Cached value of the store prototype sizes
self._protoSizes = None
# Used by PCA
self._s = None
self._vt = None
self._nc = None
self._mean = None
# Used by Network Builder
self._specificIndexTraining = False
self._nextTrainingIndices = None
def _doubleMemoryNumRows(self):
m = 2 * self._Memory.shape[0]
n = self._Memory.shape[1]
self._Memory = numpy.resize(self._Memory,(m,n))
self._M = self._Memory[:self._numPatterns]
def _sparsifyVector(self, inputPattern, doWinners=False):
# Do sparsification, using a relative or absolute threshold
if not self.relativeThreshold:
inputPattern = inputPattern*(abs(inputPattern) > self.sparseThreshold)
elif self.sparseThreshold > 0:
inputPattern = inputPattern * \
(abs(inputPattern) > (self.sparseThreshold * abs(inputPattern).max()))
# Do winner-take-all
if doWinners:
if (self.numWinners>0) and (self.numWinners < (inputPattern > 0).sum()):
sparseInput = numpy.zeros(inputPattern.shape)
# Don't consider strongly negative numbers as winners.
sorted = inputPattern.argsort()[0:self.numWinners]
sparseInput[sorted] += inputPattern[sorted]
inputPattern = sparseInput
# Do binarization
if self.doBinarization:
# Don't binarize negative numbers to positive 1.
inputPattern = (inputPattern > self.binarizationThreshold).astype(float)
return inputPattern
def prototypeSetCategory(self, idToCategorize, newCategory):
"""
Allows ids to be assigned a category and subsequently enables users to use:
- :meth:`~.KNNClassifier.KNNClassifier.removeCategory`
- :meth:`~.KNNClassifier.KNNClassifier.closestTrainingPattern`
- :meth:`~.KNNClassifier.KNNClassifier.closestOtherTrainingPattern`
"""
if idToCategorize not in self._categoryRecencyList:
return
recordIndex = self._categoryRecencyList.index(idToCategorize)
self._categoryList[recordIndex] = newCategory
def removeIds(self, idsToRemove):
"""
There are two caveats. First, this is a potentially slow operation. Second,
pattern indices will shift if patterns before them are removed.
:param idsToRemove: A list of row indices to remove.
"""
# Form a list of all categories to remove
rowsToRemove = [k for k, rowID in enumerate(self._categoryRecencyList) \
if rowID in idsToRemove]
# Remove rows from the classifier
self._removeRows(rowsToRemove)
def removeCategory(self, categoryToRemove):
"""
There are two caveats. First, this is a potentially slow operation. Second,
pattern indices will shift if patterns before them are removed.
:param categoryToRemove: Category label to remove
"""
removedRows = 0
if self._Memory is None:
return removedRows
# The internal category indices are stored in float
# format, so we should compare with a float
catToRemove = float(categoryToRemove)
# Form a list of all categories to remove
rowsToRemove = [k for k, catID in enumerate(self._categoryList) \
if catID == catToRemove]
# Remove rows from the classifier
self._removeRows(rowsToRemove)
assert catToRemove not in self._categoryList
def _removeRows(self, rowsToRemove):
"""
A list of row indices to remove. There are two caveats. First, this is
a potentially slow operation. Second, pattern indices will shift if
patterns before them are removed.
"""
# Form a numpy array of row indices to be removed
removalArray = numpy.array(rowsToRemove)
# Remove categories
self._categoryList = numpy.delete(numpy.array(self._categoryList),
removalArray).tolist()
if self.fixedCapacity:
self._categoryRecencyList = numpy.delete(
numpy.array(self._categoryRecencyList), removalArray).tolist()
# Remove the partition ID, if any for these rows and rebuild the id map.
for row in reversed(rowsToRemove): # Go backwards
# Remove these patterns from partitionList
self._partitionIdList.pop(row)
self._rebuildPartitionIdMap(self._partitionIdList)
# Remove actual patterns
if self.useSparseMemory:
# Delete backwards
for rowIndex in rowsToRemove[::-1]:
self._Memory.deleteRow(rowIndex)
else:
self._M = numpy.delete(self._M, removalArray, 0)
numRemoved = len(rowsToRemove)
# Sanity checks
numRowsExpected = self._numPatterns - numRemoved
if self.useSparseMemory:
if self._Memory is not None:
assert self._Memory.nRows() == numRowsExpected
else:
assert self._M.shape[0] == numRowsExpected
assert len(self._categoryList) == numRowsExpected
self._numPatterns -= numRemoved
return numRemoved
def doIteration(self):
"""
Utility method to increment the iteration index. Intended for models that
don't learn each timestep.
"""
self._iterationIdx += 1
def learn(self, inputPattern, inputCategory, partitionId=None, isSparse=0,
rowID=None):
"""
Train the classifier to associate specified input pattern with a
particular category.
:param inputPattern: (list) The pattern to be assigned a category. If
isSparse is 0, this should be a dense array (both ON and OFF bits
present). Otherwise, if isSparse > 0, this should be a list of the
indices of the non-zero bits in sorted order
:param inputCategory: (int) The category to be associated to the training
pattern
:param partitionId: (int) partitionID allows you to associate an id with each
input vector. It can be used to associate input patterns stored in the
classifier with an external id. This can be useful for debugging or
visualizing. Another use case is to ignore vectors with a specific id
during inference (see description of infer() for details). There can be
at most one partitionId per stored pattern (i.e. if two patterns are
within distThreshold, only the first partitionId will be stored). This
is an optional parameter.
:param isSparse: (int) 0 if the input pattern is a dense representation.
When the input pattern is a list of non-zero indices, then isSparse
is the number of total bits (n). E.g. for the dense array
[0, 1, 1, 0, 0, 1], isSparse should be `0`. For the equivalent sparse
representation [1, 2, 5] (which specifies the indices of active bits),
isSparse should be `6`, which is the total number of bits in the input
space.
:param rowID: (int) UNKNOWN
:returns: The number of patterns currently stored in the classifier
"""
if self.verbosity >= 1:
print "%s learn:" % g_debugPrefix
print " category:", int(inputCategory)
print " active inputs:", _labeledInput(inputPattern,
cellsPerCol=self.cellsPerCol)
if isSparse > 0:
assert all(inputPattern[i] <= inputPattern[i+1]
for i in xrange(len(inputPattern)-1)), \
"Sparse inputPattern must be sorted."
assert all(bit < isSparse for bit in inputPattern), \
("Sparse inputPattern must not index outside the dense "
"representation's bounds.")
if rowID is None:
rowID = self._iterationIdx
# Dense vectors
if not self.useSparseMemory:
# Not supported
assert self.cellsPerCol == 0, "not implemented for dense vectors"
# If the input was given in sparse form, convert it to dense
if isSparse > 0:
denseInput = numpy.zeros(isSparse)
denseInput[inputPattern] = 1.0
inputPattern = denseInput
if self._specificIndexTraining and not self._nextTrainingIndices:
# Specific index mode without any index provided - skip training
return self._numPatterns
if self._Memory is None:
# Initialize memory with 100 rows and numPatterns = 0
inputWidth = len(inputPattern)
self._Memory = numpy.zeros((100,inputWidth))
self._numPatterns = 0
self._M = self._Memory[:self._numPatterns]
addRow = True
if self._vt is not None:
# Compute projection
inputPattern = numpy.dot(self._vt, inputPattern - self._mean)
if self.distThreshold > 0:
# Check if input is too close to an existing input to be accepted
dist = self._calcDistance(inputPattern)
minDist = dist.min()
addRow = (minDist >= self.distThreshold)
if addRow:
self._protoSizes = None # need to re-compute
if self._numPatterns == self._Memory.shape[0]:
# Double the size of the memory
self._doubleMemoryNumRows()
if not self._specificIndexTraining:
# Normal learning - append the new input vector
self._Memory[self._numPatterns] = inputPattern
self._numPatterns += 1
self._categoryList.append(int(inputCategory))
else:
# Specific index training mode - insert vector in specified slot
vectorIndex = self._nextTrainingIndices.pop(0)
while vectorIndex >= self._Memory.shape[0]:
self._doubleMemoryNumRows()
self._Memory[vectorIndex] = inputPattern
self._numPatterns = max(self._numPatterns, vectorIndex + 1)
if vectorIndex >= len(self._categoryList):
self._categoryList += [-1] * (vectorIndex -
len(self._categoryList) + 1)
self._categoryList[vectorIndex] = int(inputCategory)
# Set _M to the "active" part of _Memory
self._M = self._Memory[0:self._numPatterns]
self._addPartitionId(self._numPatterns-1, partitionId)
# Sparse vectors
else:
# If the input was given in sparse form, convert it to dense if necessary
if isSparse > 0 and (self._vt is not None or self.distThreshold > 0 \
or self.numSVDDims is not None or self.numSVDSamples > 0 \
or self.numWinners > 0):
denseInput = numpy.zeros(isSparse)
denseInput[inputPattern] = 1.0
inputPattern = denseInput
isSparse = 0
# Get the input width
if isSparse > 0:
inputWidth = isSparse
else:
inputWidth = len(inputPattern)
# Allocate storage if this is the first training vector
if self._Memory is None:
self._Memory = NearestNeighbor(0, inputWidth)
# Support SVD if it is on
if self._vt is not None:
inputPattern = numpy.dot(self._vt, inputPattern - self._mean)
# Threshold the input, zeroing out entries that are too close to 0.
# This is only done if we are given a dense input.
if isSparse == 0:
thresholdedInput = self._sparsifyVector(inputPattern, True)
addRow = True
# If given the layout of the cells, then turn on the logic that stores
# only the start cell for bursting columns.
if self.cellsPerCol >= 1:
burstingCols = thresholdedInput.reshape(-1,
self.cellsPerCol).min(axis=1).nonzero()[0]
for col in burstingCols:
thresholdedInput[(col * self.cellsPerCol) + 1 :
(col * self.cellsPerCol) + self.cellsPerCol] = 0
# Don't learn entries that are too close to existing entries.
if self._Memory.nRows() > 0:
dist = None
# if this vector is a perfect match for one we already learned, then
# replace the category - it may have changed with online learning on.
if self.replaceDuplicates:
dist = self._calcDistance(thresholdedInput, distanceNorm=1)
if dist.min() == 0:
rowIdx = dist.argmin()
self._categoryList[rowIdx] = int(inputCategory)
if self.fixedCapacity:
self._categoryRecencyList[rowIdx] = rowID
addRow = False
# Don't add this vector if it matches closely with another we already
# added
if self.distThreshold > 0:
if dist is None or self.distanceNorm != 1:
dist = self._calcDistance(thresholdedInput)
minDist = dist.min()
addRow = (minDist >= self.distThreshold)
if not addRow:
if self.fixedCapacity:
rowIdx = dist.argmin()
self._categoryRecencyList[rowIdx] = rowID
# If sparsity is too low, we do not want to add this vector
if addRow and self.minSparsity > 0.0:
if isSparse==0:
sparsity = ( float(len(thresholdedInput.nonzero()[0])) /
len(thresholdedInput) )
else:
sparsity = float(len(inputPattern)) / isSparse
if sparsity < self.minSparsity:
addRow = False
# Add the new sparse vector to our storage
if addRow:
self._protoSizes = None # need to re-compute
if isSparse == 0:
self._Memory.addRow(thresholdedInput)
else:
self._Memory.addRowNZ(inputPattern, [1]*len(inputPattern))
self._numPatterns += 1
self._categoryList.append(int(inputCategory))
self._addPartitionId(self._numPatterns-1, partitionId)
if self.fixedCapacity:
self._categoryRecencyList.append(rowID)
if self._numPatterns > self.maxStoredPatterns and \
self.maxStoredPatterns > 0:
leastRecentlyUsedPattern = numpy.argmin(self._categoryRecencyList)
self._Memory.deleteRow(leastRecentlyUsedPattern)
self._categoryList.pop(leastRecentlyUsedPattern)
self._categoryRecencyList.pop(leastRecentlyUsedPattern)
self._numPatterns -= 1
if self.numSVDDims is not None and self.numSVDSamples > 0 \
and self._numPatterns == self.numSVDSamples:
self.computeSVD()
return self._numPatterns
def getOverlaps(self, inputPattern):
"""
Return the degree of overlap between an input pattern and each category
stored in the classifier. The overlap is computed by computing:
.. code-block:: python
logical_and(inputPattern != 0, trainingPattern != 0).sum()
:param inputPattern: pattern to check overlap of
:returns: (overlaps, categories) Two numpy arrays of the same length, where:
* overlaps: an integer overlap amount for each category
* categories: category index for each element of overlaps
"""
assert self.useSparseMemory, "Not implemented yet for dense storage"
overlaps = self._Memory.rightVecSumAtNZ(inputPattern)
return (overlaps, self._categoryList)
def getDistances(self, inputPattern):
"""Return the distances between the input pattern and all other
stored patterns.
:param inputPattern: pattern to check distance with
:returns: (distances, categories) numpy arrays of the same length.
- overlaps: an integer overlap amount for each category
- categories: category index for each element of distances
"""
dist = self._getDistances(inputPattern)
return (dist, self._categoryList)
def infer(self, inputPattern, computeScores=True, overCategories=True,
partitionId=None):
"""Finds the category that best matches the input pattern. Returns the
winning category index as well as a distribution over all categories.
:param inputPattern: (list or array) The pattern to be classified. This
must be a dense representation of the array (e.g. [0, 0, 1, 1, 0, 1]).
:param computeScores: NO EFFECT
:param overCategories: NO EFFECT
:param partitionId: (int) If provided, all training vectors with partitionId
equal to that of the input pattern are ignored.
For example, this may be used to perform k-fold cross validation
without repopulating the classifier. First partition all the data into
k equal partitions numbered 0, 1, 2, ... and then call learn() for each
vector passing in its partitionId. Then, during inference, by passing
in the partition ID in the call to infer(), all other vectors with the
same partitionId are ignored simulating the effect of repopulating the
classifier while ommitting the training vectors in the same partition.
:returns: 4-tuple with these keys:
- ``winner``: The category with the greatest number of nearest neighbors
within the kth nearest neighbors. If the inferenceResult contains no
neighbors, the value of winner is None. This can happen, for example,
in cases of exact matching, if there are no stored vectors, or if
minSparsity is not met.
- ``inferenceResult``: A list of length numCategories, each entry contains
the number of neighbors within the top k neighbors that are in that
category.
- ``dist``: A list of length numPrototypes. Each entry is the distance
from the unknown to that prototype. All distances are between 0.0 and
1.0.
- ``categoryDist``: A list of length numCategories. Each entry is the
distance from the unknown to the nearest prototype of
that category. All distances are between 0 and 1.0.
"""
# Calculate sparsity. If sparsity is too low, we do not want to run
# inference with this vector
sparsity = 0.0
if self.minSparsity > 0.0:
sparsity = ( float(len(inputPattern.nonzero()[0])) /
len(inputPattern) )
if len(self._categoryList) == 0 or sparsity < self.minSparsity:
# No categories learned yet; i.e. first inference w/ online learning or
# insufficient sparsity
winner = None
inferenceResult = numpy.zeros(1)
dist = numpy.ones(1)
categoryDist = numpy.ones(1)
else:
maxCategoryIdx = max(self._categoryList)
inferenceResult = numpy.zeros(maxCategoryIdx+1)
dist = self._getDistances(inputPattern, partitionId=partitionId)
validVectorCount = len(self._categoryList) - self._categoryList.count(-1)
# Loop through the indices of the nearest neighbors.
if self.exact:
# Is there an exact match in the distances?
exactMatches = numpy.where(dist<0.00001)[0]
if len(exactMatches) > 0:
for i in exactMatches[:min(self.k, validVectorCount)]:
inferenceResult[self._categoryList[i]] += 1.0
else:
sorted = dist.argsort()
for j in sorted[:min(self.k, validVectorCount)]:
inferenceResult[self._categoryList[j]] += 1.0
# Prepare inference results.
if inferenceResult.any():
winner = inferenceResult.argmax()
inferenceResult /= inferenceResult.sum()
else:
winner = None
categoryDist = min_score_per_category(maxCategoryIdx,
self._categoryList, dist)
categoryDist.clip(0, 1.0, categoryDist)
if self.verbosity >= 1:
print "%s infer:" % (g_debugPrefix)
print " active inputs:", _labeledInput(inputPattern,
cellsPerCol=self.cellsPerCol)
print " winner category:", winner
print " pct neighbors of each category:", inferenceResult
print " dist of each prototype:", dist
print " dist of each category:", categoryDist
result = (winner, inferenceResult, dist, categoryDist)
return result
def getClosest(self, inputPattern, topKCategories=3):
"""Returns the index of the pattern that is closest to inputPattern,
the distances of all patterns to inputPattern, and the indices of the k
closest categories.
"""
inferenceResult = numpy.zeros(max(self._categoryList)+1)
dist = self._getDistances(inputPattern)
sorted = dist.argsort()
validVectorCount = len(self._categoryList) - self._categoryList.count(-1)
for j in sorted[:min(self.k, validVectorCount)]:
inferenceResult[self._categoryList[j]] += 1.0
winner = inferenceResult.argmax()
topNCats = []
for i in range(topKCategories):
topNCats.append((self._categoryList[sorted[i]], dist[sorted[i]] ))
return winner, dist, topNCats
def closestTrainingPattern(self, inputPattern, cat):
"""Returns the closest training pattern to inputPattern that belongs to
category "cat".
:param inputPattern: The pattern whose closest neighbor is sought
:param cat: The required category of closest neighbor
:returns: A dense version of the closest training pattern, or None if no
such patterns exist
"""
dist = self._getDistances(inputPattern)
sorted = dist.argsort()
for patIdx in sorted:
patternCat = self._categoryList[patIdx]
# If closest pattern belongs to desired category, return it
if patternCat == cat:
if self.useSparseMemory:
closestPattern = self._Memory.getRow(int(patIdx))
else:
closestPattern = self._M[patIdx]
return closestPattern
# No patterns were found!
return None
def closestOtherTrainingPattern(self, inputPattern, cat):
"""Return the closest training pattern that is *not* of the given
category "cat".
:param inputPattern: The pattern whose closest neighbor is sought
:param cat: Training patterns of this category will be ignored no matter
their distance to inputPattern
:returns: A dense version of the closest training pattern, or None if no
such patterns exist
"""
dist = self._getDistances(inputPattern)
sorted = dist.argsort()
for patIdx in sorted:
patternCat = self._categoryList[patIdx]
# If closest pattern does not belong to specified category, return it
if patternCat != cat:
if self.useSparseMemory:
closestPattern = self._Memory.getRow(int(patIdx))
else:
closestPattern = self._M[patIdx]
return closestPattern
# No patterns were found!
return None
def getPattern(self, idx, sparseBinaryForm=False, cat=None):
"""Gets a training pattern either by index or category number.
:param idx: Index of the training pattern
:param sparseBinaryForm: If true, returns a list of the indices of the
non-zero bits in the training pattern
:param cat: If not None, get the first pattern belonging to category cat. If
this is specified, idx must be None.
:returns: The training pattern with specified index
"""
if cat is not None:
assert idx is None
idx = self._categoryList.index(cat)
if not self.useSparseMemory:
pattern = self._Memory[idx]
if sparseBinaryForm:
pattern = pattern.nonzero()[0]
else:
(nz, values) = self._Memory.rowNonZeros(idx)
if not sparseBinaryForm:
pattern = numpy.zeros(self._Memory.nCols())
numpy.put(pattern, nz, 1)
else:
pattern = nz
return pattern
def getPartitionId(self, i):
"""
Gets the partition id given an index.
:param i: index of partition
:returns: the partition id associated with pattern i. Returns None if no id
is associated with it.
"""
if (i < 0) or (i >= self._numPatterns):
raise RuntimeError("index out of bounds")
partitionId = self._partitionIdList[i]
if partitionId == numpy.inf:
return None
else:
return partitionId
def getPartitionIdList(self):
"""
:returns: a list of complete partition id objects
"""
return self._partitionIdList
def getNumPartitionIds(self):
"""
:returns: the number of unique partition Ids stored.
"""
return len(self._partitionIdMap)
def getPartitionIdKeys(self):
"""
:returns: a list containing unique (non-None) partition Ids (just the keys)
"""
return self._partitionIdMap.keys()
def getPatternIndicesWithPartitionId(self, partitionId):
"""
:returns: a list of pattern indices corresponding to this partitionId.
Return an empty list if there are none.
"""
return self._partitionIdMap.get(partitionId, [])
def _addPartitionId(self, index, partitionId=None):
"""
Adds partition id for pattern index
"""
if partitionId is None:
self._partitionIdList.append(numpy.inf)
else:
self._partitionIdList.append(partitionId)
indices = self._partitionIdMap.get(partitionId, [])
indices.append(index)
self._partitionIdMap[partitionId] = indices
def _rebuildPartitionIdMap(self, partitionIdList):
"""
Rebuilds the partition Id map using the given partitionIdList
"""
self._partitionIdMap = {}
for row, partitionId in enumerate(partitionIdList):
indices = self._partitionIdMap.get(partitionId, [])
indices.append(row)
self._partitionIdMap[partitionId] = indices
def _calcDistance(self, inputPattern, distanceNorm=None):
"""Calculate the distances from inputPattern to all stored patterns. All
distances are between 0.0 and 1.0
:param inputPattern The pattern from which distances to all other patterns
are calculated
:param distanceNorm Degree of the distance norm
"""
if distanceNorm is None:
distanceNorm = self.distanceNorm
# Sparse memory
if self.useSparseMemory:
if self._protoSizes is None:
self._protoSizes = self._Memory.rowSums()
overlapsWithProtos = self._Memory.rightVecSumAtNZ(inputPattern)
inputPatternSum = inputPattern.sum()
if self.distanceMethod == "rawOverlap":
dist = inputPattern.sum() - overlapsWithProtos
elif self.distanceMethod == "pctOverlapOfInput":
dist = inputPatternSum - overlapsWithProtos
if inputPatternSum > 0:
dist /= inputPatternSum
elif self.distanceMethod == "pctOverlapOfProto":
overlapsWithProtos /= self._protoSizes
dist = 1.0 - overlapsWithProtos
elif self.distanceMethod == "pctOverlapOfLarger":
maxVal = numpy.maximum(self._protoSizes, inputPatternSum)
if maxVal.all() > 0:
overlapsWithProtos /= maxVal
dist = 1.0 - overlapsWithProtos
elif self.distanceMethod == "norm":
dist = self._Memory.vecLpDist(self.distanceNorm, inputPattern)
distMax = dist.max()
if distMax > 0:
dist /= distMax
else:
raise RuntimeError("Unimplemented distance method %s" %
self.distanceMethod)
# Dense memory
else:
if self.distanceMethod == "norm":
dist = numpy.power(numpy.abs(self._M - inputPattern), self.distanceNorm)
dist = dist.sum(1)
dist = numpy.power(dist, 1.0/self.distanceNorm)
dist /= dist.max()
else:
raise RuntimeError ("Not implemented yet for dense storage....")
return dist
def _getDistances(self, inputPattern, partitionId=None):
"""Return the distances from inputPattern to all stored patterns.
:param inputPattern The pattern from which distances to all other patterns
are returned
:param partitionId If provided, ignore all training vectors with this
partitionId.
"""
if not self._finishedLearning:
self.finishLearning()
self._finishedLearning = True
if self._vt is not None and len(self._vt) > 0:
inputPattern = numpy.dot(self._vt, inputPattern - self._mean)
sparseInput = self._sparsifyVector(inputPattern)
# Compute distances
dist = self._calcDistance(sparseInput)
# Invalidate results where category is -1
if self._specificIndexTraining:
dist[numpy.array(self._categoryList) == -1] = numpy.inf
# Ignore vectors with this partition id by setting their distances to inf
if partitionId is not None:
dist[self._partitionIdMap.get(partitionId, [])] = numpy.inf
return dist
def finishLearning(self):
"""
Used for batch scenarios. This method needs to be called between learning
and inference.
"""
if self.numSVDDims is not None and self._vt is None:
self.computeSVD()
def computeSVD(self, numSVDSamples=0, finalize=True):
"""
Compute the singular value decomposition (SVD). The SVD is a factorization
of a real or complex matrix. It factors the matrix `a` as
`u * np.diag(s) * v`, where `u` and `v` are unitary and `s` is a 1-d array
of `a`'s singular values.
**Reason for computing the SVD:**
There are cases where you want to feed a lot of vectors to the
KNNClassifier. However, this can be slow. You can speed up training by (1)
computing the SVD of the input patterns which will give you the
eigenvectors, (2) only keeping a fraction of the eigenvectors, and (3)
projecting the input patterns onto the remaining eigenvectors.
Note that all input patterns are projected onto the eigenvectors in the same
fashion. Keeping only the highest eigenvectors increases training
performance since it reduces the dimensionality of the input.
:param numSVDSamples: (int) the number of samples to use for the SVD
computation.
:param finalize: (bool) whether to apply SVD to the input patterns.
:returns: (array) The singular values for every matrix, sorted in
descending order.
"""
if numSVDSamples == 0:
numSVDSamples = self._numPatterns
if not self.useSparseMemory:
self._a = self._Memory[:self._numPatterns]
else:
self._a = self._Memory.toDense()[:self._numPatterns]
self._mean = numpy.mean(self._a, axis=0)
self._a -= self._mean
u,self._s,self._vt = numpy.linalg.svd(self._a[:numSVDSamples])
if finalize:
self._finalizeSVD()
return self._s
def getAdaptiveSVDDims(self, singularValues, fractionOfMax=0.001):
"""
Compute the number of eigenvectors (singularValues) to keep.
:param singularValues:
:param fractionOfMax:
:return:
"""
v = singularValues/singularValues[0]
idx = numpy.where(v<fractionOfMax)[0]
if len(idx):
print "Number of PCA dimensions chosen: ", idx[0], "out of ", len(v)
return idx[0]
else:
print "Number of PCA dimensions chosen: ", len(v)-1, "out of ", len(v)
return len(v)-1
def _finalizeSVD(self, numSVDDims=None):
"""
Called by finalizeLearning(). This will project all the patterns onto the
SVD eigenvectors.
:param numSVDDims: (int) number of egeinvectors used for projection.
:return:
"""
if numSVDDims is not None:
self.numSVDDims = numSVDDims
if self.numSVDDims=="adaptive":
if self.fractionOfMax is not None:
self.numSVDDims = self.getAdaptiveSVDDims(self._s, self.fractionOfMax)
else:
self.numSVDDims = self.getAdaptiveSVDDims(self._s)
if self._vt.shape[0] < self.numSVDDims:
print "******************************************************************"
print ("Warning: The requested number of PCA dimensions is more than "
"the number of pattern dimensions.")
print "Setting numSVDDims = ", self._vt.shape[0]
print "******************************************************************"
self.numSVDDims = self._vt.shape[0]
self._vt = self._vt[:self.numSVDDims]
# Added when svd is not able to decompose vectors - uses raw spare vectors
if len(self._vt) == 0:
return
self._Memory = numpy.zeros((self._numPatterns,self.numSVDDims))
self._M = self._Memory
self.useSparseMemory = False
for i in range(self._numPatterns):
self._Memory[i] = numpy.dot(self._vt, self._a[i])
self._a = None
def remapCategories(self, mapping):
"""Change the category indices.
Used by the Network Builder to keep the category indices in sync with the
ImageSensor categoryInfo when the user renames or removes categories.
:param mapping: List of new category indices. For example, mapping=[2,0,1]
would change all vectors of category 0 to be category 2, category 1 to
0, and category 2 to 1
"""
categoryArray = numpy.array(self._categoryList)
newCategoryArray = numpy.zeros(categoryArray.shape[0])
newCategoryArray.fill(-1)
for i in xrange(len(mapping)):
newCategoryArray[categoryArray==i] = mapping[i]
self._categoryList = list(newCategoryArray)
def setCategoryOfVectors(self, vectorIndices, categoryIndices):
"""Change the category associated with this vector(s).
Used by the Network Builder to move vectors between categories, to enable
categories, and to invalidate vectors by setting the category to -1.
:param vectorIndices: Single index or list of indices
:param categoryIndices: Single index or list of indices. Can also be a
single index when vectorIndices is a list, in which case the same
category will be used for all vectors
"""
if not hasattr(vectorIndices, "__iter__"):
vectorIndices = [vectorIndices]
categoryIndices = [categoryIndices]
elif not hasattr(categoryIndices, "__iter__"):
categoryIndices = [categoryIndices] * len(vectorIndices)
for i in xrange(len(vectorIndices)):
vectorIndex = vectorIndices[i]
categoryIndex = categoryIndices[i]
# Out-of-bounds is not an error, because the KNN may not have seen the
# vector yet
if vectorIndex < len(self._categoryList):
self._categoryList[vectorIndex] = categoryIndex
@staticmethod
def getSchema():
return KNNClassifierProto
@classmethod
def read(cls, proto):
if proto.version != KNNCLASSIFIER_VERSION:
raise RuntimeError("Invalid KNNClassifier Version")
knn = object.__new__(cls)
knn.version = proto.version
knn.k = proto.k
knn.exact = proto.exact
knn.distanceNorm = proto.distanceNorm
knn.distanceMethod = proto.distanceMethod
knn.distThreshold = proto.distThreshold
knn.doBinarization = proto.doBinarization
knn.binarizationThreshold = round(proto.binarizationThreshold,
EPSILON_ROUND)
knn.useSparseMemory = proto.useSparseMemory
knn.sparseThreshold = round(proto.sparseThreshold, EPSILON_ROUND)
knn.relativeThreshold = proto.relativeThreshold
knn.numWinners = proto.numWinners
knn.numSVDSamples = proto.numSVDSamples
which = proto.numSVDDims.which()
if which == "none":
knn.numSVDDims = None
elif which == "numSVDDimsText":
knn.numSVDDims = proto.numSVDDims.numSVDDimsText
else:
knn.numSVDDims = proto.numSVDDims.numSVDDimsInt
if proto.fractionOfMax != 0:
knn.fractionOfMax = round(proto.fractionOfMax, EPSILON_ROUND)
else:
knn.fractionOfMax = None
knn.verbosity = proto.verbosity
knn.maxStoredPatterns = proto.maxStoredPatterns
knn.replaceDuplicates = proto.replaceDuplicates
knn.cellsPerCol = proto.cellsPerCol
knn.minSparsity = proto.minSparsity
if knn.numSVDDims == "adaptive":
knn._adaptiveSVDDims = True
else:
knn._adaptiveSVDDims = False
# Read private state
knn.clear()
which = proto.memory.which()
if which == "ndarray":
knn._Memory = numpy.array(proto.memory.ndarray, dtype=numpy.float64)
elif which == "nearestNeighbor":
knn._Memory = NearestNeighbor()
knn._Memory.read(proto.memory.nearestNeighbor)
else:
knn._Memory = None
knn._numPatterns = proto.numPatterns
if len(proto.m) > 0:
knn._M = numpy.array(proto.m, dtype=numpy.float64)
if proto.categoryList is not None:
knn._categoryList = list(proto.categoryList)
if proto.partitionIdList is not None:
knn._partitionIdList = list(proto.partitionIdList)
knn._rebuildPartitionIdMap(knn._partitionIdList)
knn._iterationIdx = proto.iterationIdx
knn._finishedLearning = proto.finishedLearning
if len(proto.s) > 0:
knn._s = numpy.array(proto.s, dtype=numpy.float32)
if len(proto.vt) > 0:
knn._vt = numpy.array(proto.vt, dtype=numpy.float32)
if len(proto.mean):
knn._mean = numpy.array(proto.mean, dtype=numpy.float32)
return knn
def write(self, proto):
proto.version = self.version
proto.k = self.k
proto.exact = bool(self.exact)
proto.distanceNorm = self.distanceNorm
proto.distanceMethod = self.distanceMethod
proto.distThreshold = round(self.distThreshold, EPSILON_ROUND)
proto.doBinarization = bool(self.doBinarization)
proto.binarizationThreshold = round(self.binarizationThreshold,
EPSILON_ROUND)
proto.useSparseMemory = bool(self.useSparseMemory)
proto.sparseThreshold = round(self.sparseThreshold, EPSILON_ROUND)
proto.relativeThreshold = bool(self.relativeThreshold)
proto.numWinners = self.numWinners
proto.numSVDSamples = self.numSVDSamples
if self.numSVDDims is None:
proto.numSVDDims.none = None
elif isinstance(self.numSVDDims, int):
proto.numSVDDims.numSVDDimsInt = self.numSVDDims
else:
proto.numSVDDims.numSVDDimsText = self.numSVDDims
if self.fractionOfMax is not None:
proto.fractionOfMax = round(self.fractionOfMax, EPSILON_ROUND)
proto.verbosity = self.verbosity
proto.maxStoredPatterns = self.maxStoredPatterns
proto.replaceDuplicates = bool(self.replaceDuplicates)
proto.cellsPerCol = self.cellsPerCol
proto.minSparsity = self.minSparsity
# Write private state
if self._Memory is None:
proto.memory.none = None
elif isinstance(self._Memory, numpy.ndarray):
proto.memory.ndarray = self._Memory.tolist()
else:
proto.memory.init("nearestNeighbor")
self._Memory.write(proto.memory.nearestNeighbor)
proto.numPatterns = self._numPatterns
if self._M is not None:
proto.m = self._M.tolist()
if self._categoryList is not None:
proto.categoryList = self._categoryList
if self._partitionIdList is not None:
proto.partitionIdList = self._partitionIdList
proto.finishedLearning = bool(self._finishedLearning)
proto.iterationIdx = self._iterationIdx
if self._s is not None:
proto.s = self._s.tolist()
if self._vt is not None:
proto.vt = self._vt.tolist()
if self._mean is not None:
proto.mean = self._mean.tolist()
def __getstate__(self):
"""Return serializable state.
This function will return a version of the __dict__.
"""
state = self.__dict__.copy()
return state
def __setstate__(self, state):
"""Set the state of this object from a serialized state."""
if "version" not in state:
pass
elif state["version"] == 1:
pass
elif state["version"] == 2:
raise RuntimeError("Invalid deserialization of invalid KNNClassifier"
"Version")
# Backward compatibility
if "_partitionIdArray" in state:
state.pop("_partitionIdArray")
if "minSparsity" not in state:
state["minSparsity"] = 0.0
self.__dict__.update(state)
# Backward compatibility
if "_partitionIdMap" not in state:
self._rebuildPartitionIdMap(self._partitionIdList)
# Set to new version
self.version = KNNCLASSIFIER_VERSION
| 48,764 | Python | .py | 1,080 | 37.769444 | 81 | 0.681587 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,912 | temporal_memory_shim.py | numenta_nupic-legacy/src/nupic/algorithms/temporal_memory_shim.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
A shim for the TemporalMemory class that transparently implements TM,
for use with tests.
"""
import numpy
from nupic.algorithms.backtracking_tm import BacktrackingTM
from nupic.algorithms.backtracking_tm_cpp import BacktrackingTMCPP
from nupic.algorithms.connections import Connections
from nupic.math import GetNTAReal
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.algorithms.temporal_memory_shim_capnp import (
TemporalMemoryShimProto)
TMClass = BacktrackingTMCPP
dtype = GetNTAReal()
class TemporalMemoryShim(TMClass):
"""
Temporal Memory => TM shim class.
"""
def __init__(self,
columnDimensions=(2048,),
cellsPerColumn=32,
activationThreshold=13,
initialPermanence=0.21,
connectedPermanence=0.50,
minThreshold=10,
maxNewSynapseCount=20,
permanenceIncrement=0.10,
permanenceDecrement=0.10,
seed=42):
"""
Translate parameters and initialize member variables
specific to TemporalMemory
"""
numberOfCols = 1
for n in columnDimensions:
numberOfCols *= n
super(TemporalMemoryShim, self).__init__(
numberOfCols=numberOfCols,
cellsPerColumn=cellsPerColumn,
initialPerm=initialPermanence,
connectedPerm=connectedPermanence,
minThreshold=minThreshold,
newSynapseCount=maxNewSynapseCount,
permanenceInc=permanenceIncrement,
permanenceDec=permanenceDecrement,
permanenceMax=1.0,
globalDecay=0,
activationThreshold=activationThreshold,
seed=seed)
self.connections = Connections(numberOfCols * cellsPerColumn)
self.predictiveCells = set()
def compute(self, activeColumns, learn=True):
"""
Feeds input record through TM, performing inference and learning.
Updates member variables with new state.
@param activeColumns (set) Indices of active columns in `t`
"""
bottomUpInput = numpy.zeros(self.numberOfCols, dtype=dtype)
bottomUpInput[list(activeColumns)] = 1
super(TemporalMemoryShim, self).compute(bottomUpInput,
enableLearn=learn,
enableInference=True)
predictedState = self.getPredictedState()
self.predictiveCells = set(numpy.flatnonzero(predictedState))
@classmethod
def getSchema(cls):
return TemporalMemoryShimProto
@classmethod
def read(cls, proto):
"""Deserialize from proto instance.
:param proto: (TemporalMemoryShimProto) the proto instance to read from
"""
tm = super(TemporalMemoryShim, cls).read(proto.baseTM)
tm.predictiveCells = set(proto.predictedState)
tm.connections = Connections.read(proto.conncetions)
def write(self, proto):
"""Populate serialization proto instance.
:param proto: (TemporalMemoryShimProto) the proto instance to populate
"""
super(TemporalMemoryShim, self).write(proto.baseTM)
proto.connections.write(self.connections)
proto.predictiveCells = self.predictiveCells
| 4,100 | Python | .py | 106 | 32.990566 | 75 | 0.701838 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,913 | sdr_classifier.py | numenta_nupic-legacy/src/nupic/algorithms/sdr_classifier.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Implementation of a SDR classifier.
The SDR classifier takes the form of a single layer classification network
that takes SDRs as input and outputs a predicted distribution of classes.
"""
from collections import deque
import numpy
from nupic.serializable import Serializable
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.proto.SdrClassifier_capnp import SdrClassifierProto
class SDRClassifier(Serializable):
"""
The SDR Classifier accepts a binary input pattern from the
level below (the "activationPattern") and information from the sensor and
encoders (the "classification") describing the true (target) input.
The SDR classifier maps input patterns to class labels. There are as many
output units as the number of class labels or buckets (in the case of scalar
encoders). The output is a probabilistic distribution over all class labels.
During inference, the output is calculated by first doing a weighted summation
of all the inputs, and then perform a softmax nonlinear function to get
the predicted distribution of class labels
During learning, the connection weights between input units and output units
are adjusted to maximize the likelihood of the model
Example Usage:
.. code-block:: python
c = SDRClassifier(steps=[1], alpha=0.1, actValueAlpha=0.1, verbosity=0)
# learning
c.compute(recordNum=0, patternNZ=[1, 5, 9],
classification={"bucketIdx": 4, "actValue": 34.7},
learn=True, infer=False)
# inference
result = c.compute(recordNum=1, patternNZ=[1, 5, 9],
classification={"bucketIdx": 4, "actValue": 34.7},
learn=False, infer=True)
# Print the top three predictions for 1 steps out.
topPredictions = sorted(zip(result[1],
result["actualValues"]), reverse=True)[:3]
for probability, value in topPredictions:
print "Prediction of {} has probability of {}.".format(value,
probability*100.0)
References:
* Alex Graves. Supervised Sequence Labeling with Recurrent Neural Networks,
PhD Thesis, 2008
* J. S. Bridle. Probabilistic interpretation of feedforward classification
network outputs, with relationships to statistical pattern recognition
* In F. Fogleman-Soulie and J.Herault, editors, Neurocomputing: Algorithms,
Architectures and Applications, pp 227-236, Springer-Verlag, 1990
:param steps: (list) Sequence of the different steps of multi-step predictions
to learn
:param alpha: (float) The alpha used to adapt the weight matrix during
learning. A larger alpha results in faster adaptation to the data.
:param actValueAlpha: (float) Used to track the actual value within each
bucket. A lower actValueAlpha results in longer term memory
:param verbosity: (int) verbosity level, can be 0, 1, or 2
:raises: (ValueError) when record number does not increase monotonically.
"""
VERSION = 1
def __init__(self,
steps=[1],
alpha=0.001,
actValueAlpha=0.3,
verbosity=0):
if len(steps) == 0:
raise TypeError("steps cannot be empty")
if not all(isinstance(item, int) for item in steps):
raise TypeError("steps must be a list of ints")
if any(item < 0 for item in steps):
raise ValueError("steps must be a list of non-negative ints")
if alpha < 0:
raise ValueError("alpha (learning rate) must be a positive number")
if actValueAlpha < 0 or actValueAlpha >= 1:
raise ValueError("actValueAlpha be a number between 0 and 1")
# Save constructor args
self.steps = steps
self.alpha = alpha
self.actValueAlpha = actValueAlpha
self.verbosity = verbosity
# Max # of steps of prediction we need to support
self._maxSteps = max(self.steps) + 1
# History of the last _maxSteps activation patterns. We need to keep
# these so that we can associate the current iteration's classification
# with the activationPattern from N steps ago
self._patternNZHistory = deque(maxlen=self._maxSteps)
# This contains the value of the highest input number we've ever seen
# It is used to pre-allocate fixed size arrays that hold the weights
self._maxInputIdx = 0
# This contains the value of the highest bucket index we've ever seen
# It is used to pre-allocate fixed size arrays that hold the weights of
# each bucket index during inference
self._maxBucketIdx = 0
# The connection weight matrix
self._weightMatrix = dict()
for step in self.steps:
self._weightMatrix[step] = numpy.zeros(shape=(self._maxInputIdx+1,
self._maxBucketIdx+1))
# This keeps track of the actual value to use for each bucket index. We
# start with 1 bucket, no actual value so that the first infer has something
# to return
self._actualValues = [None]
# Set the version to the latest version.
# This is used for serialization/deserialization
self._version = SDRClassifier.VERSION
def compute(self, recordNum, patternNZ, classification, learn, infer):
"""
Process one input sample.
This method is called by outer loop code outside the nupic-engine. We
use this instead of the nupic engine compute() because our inputs and
outputs aren't fixed size vectors of reals.
:param recordNum: Record number of this input pattern. Record numbers
normally increase sequentially by 1 each time unless there are missing
records in the dataset. Knowing this information insures that we don't get
confused by missing records.
:param patternNZ: List of the active indices from the output below. When the
input is from TemporalMemory, this list should be the indices of the
active cells.
:param classification: Dict of the classification information where:
- bucketIdx: list of indices of the encoder bucket
- actValue: list of actual values going into the encoder
Classification could be None for inference mode.
:param learn: (bool) if true, learn this sample
:param infer: (bool) if true, perform inference
:return: Dict containing inference results, there is one entry for each
step in self.steps, where the key is the number of steps, and
the value is an array containing the relative likelihood for
each bucketIdx starting from bucketIdx 0.
There is also an entry containing the average actual value to
use for each bucket. The key is 'actualValues'.
for example:
.. code-block:: python
{1 : [0.1, 0.3, 0.2, 0.7],
4 : [0.2, 0.4, 0.3, 0.5],
'actualValues': [1.5, 3,5, 5,5, 7.6],
}
"""
if self.verbosity >= 1:
print " learn:", learn
print " recordNum:", recordNum
print " patternNZ (%d):" % len(patternNZ), patternNZ
print " classificationIn:", classification
# ensures that recordNum increases monotonically
if len(self._patternNZHistory) > 0:
if recordNum < self._patternNZHistory[-1][0]:
raise ValueError("the record number has to increase monotonically")
# Store pattern in our history if this is a new record
if len(self._patternNZHistory) == 0 or \
recordNum > self._patternNZHistory[-1][0]:
self._patternNZHistory.append((recordNum, patternNZ))
# To allow multi-class classification, we need to be able to run learning
# without inference being on. So initialize retval outside
# of the inference block.
retval = {}
# Update maxInputIdx and augment weight matrix with zero padding
if max(patternNZ) > self._maxInputIdx:
newMaxInputIdx = max(patternNZ)
for nSteps in self.steps:
self._weightMatrix[nSteps] = numpy.concatenate((
self._weightMatrix[nSteps],
numpy.zeros(shape=(newMaxInputIdx-self._maxInputIdx,
self._maxBucketIdx+1))), axis=0)
self._maxInputIdx = int(newMaxInputIdx)
# Get classification info
if classification is not None:
if type(classification["bucketIdx"]) is not list:
bucketIdxList = [classification["bucketIdx"]]
actValueList = [classification["actValue"]]
numCategory = 1
else:
bucketIdxList = classification["bucketIdx"]
actValueList = classification["actValue"]
numCategory = len(classification["bucketIdx"])
else:
if learn:
raise ValueError("classification cannot be None when learn=True")
actValueList = None
bucketIdxList = None
# ------------------------------------------------------------------------
# Inference:
# For each active bit in the activationPattern, get the classification
# votes
if infer:
retval = self.infer(patternNZ, actValueList)
if learn and classification["bucketIdx"] is not None:
for categoryI in range(numCategory):
bucketIdx = bucketIdxList[categoryI]
actValue = actValueList[categoryI]
# Update maxBucketIndex and augment weight matrix with zero padding
if bucketIdx > self._maxBucketIdx:
for nSteps in self.steps:
self._weightMatrix[nSteps] = numpy.concatenate((
self._weightMatrix[nSteps],
numpy.zeros(shape=(self._maxInputIdx+1,
bucketIdx-self._maxBucketIdx))), axis=1)
self._maxBucketIdx = int(bucketIdx)
# Update rolling average of actual values if it's a scalar. If it's
# not, it must be a category, in which case each bucket only ever
# sees one category so we don't need a running average.
while self._maxBucketIdx > len(self._actualValues) - 1:
self._actualValues.append(None)
if self._actualValues[bucketIdx] is None:
self._actualValues[bucketIdx] = actValue
else:
if (isinstance(actValue, int) or
isinstance(actValue, float) or
isinstance(actValue, long)):
self._actualValues[bucketIdx] = ((1.0 - self.actValueAlpha)
* self._actualValues[bucketIdx]
+ self.actValueAlpha * actValue)
else:
self._actualValues[bucketIdx] = actValue
for (learnRecordNum, learnPatternNZ) in self._patternNZHistory:
error = self._calculateError(recordNum, bucketIdxList)
nSteps = recordNum - learnRecordNum
if nSteps in self.steps:
for bit in learnPatternNZ:
self._weightMatrix[nSteps][bit, :] += self.alpha * error[nSteps]
# ------------------------------------------------------------------------
# Verbose print
if infer and self.verbosity >= 1:
print " inference: combined bucket likelihoods:"
print " actual bucket values:", retval["actualValues"]
for (nSteps, votes) in retval.items():
if nSteps == "actualValues":
continue
print " %d steps: " % (nSteps), _pFormatArray(votes)
bestBucketIdx = votes.argmax()
print (" most likely bucket idx: "
"%d, value: %s" % (bestBucketIdx,
retval["actualValues"][bestBucketIdx]))
print
return retval
def infer(self, patternNZ, actValueList):
"""
Return the inference value from one input sample. The actual
learning happens in compute().
:param patternNZ: list of the active indices from the output below
:param classification: dict of the classification information:
bucketIdx: index of the encoder bucket
actValue: actual value going into the encoder
:return: dict containing inference results, one entry for each step in
self.steps. The key is the number of steps, the value is an
array containing the relative likelihood for each bucketIdx
starting from bucketIdx 0.
for example:
.. code-block:: python
{'actualValues': [0.0, 1.0, 2.0, 3.0]
1 : [0.1, 0.3, 0.2, 0.7]
4 : [0.2, 0.4, 0.3, 0.5]}
"""
# Return value dict. For buckets which we don't have an actual value
# for yet, just plug in any valid actual value. It doesn't matter what
# we use because that bucket won't have non-zero likelihood anyways.
# NOTE: If doing 0-step prediction, we shouldn't use any knowledge
# of the classification input during inference.
if self.steps[0] == 0 or actValueList is None:
defaultValue = 0
else:
defaultValue = actValueList[0]
actValues = [x if x is not None else defaultValue
for x in self._actualValues]
retval = {"actualValues": actValues}
for nSteps in self.steps:
predictDist = self.inferSingleStep(patternNZ, self._weightMatrix[nSteps])
retval[nSteps] = predictDist
return retval
def inferSingleStep(self, patternNZ, weightMatrix):
"""
Perform inference for a single step. Given an SDR input and a weight
matrix, return a predicted distribution.
:param patternNZ: list of the active indices from the output below
:param weightMatrix: numpy array of the weight matrix
:return: numpy array of the predicted class label distribution
"""
outputActivation = weightMatrix[patternNZ].sum(axis=0)
# softmax normalization
outputActivation = outputActivation - numpy.max(outputActivation)
expOutputActivation = numpy.exp(outputActivation)
predictDist = expOutputActivation / numpy.sum(expOutputActivation)
return predictDist
@classmethod
def getSchema(cls):
return SdrClassifierProto
@classmethod
def read(cls, proto):
classifier = object.__new__(cls)
classifier.steps = [step for step in proto.steps]
classifier.alpha = proto.alpha
classifier.actValueAlpha = proto.actValueAlpha
classifier._patternNZHistory = deque(maxlen=max(classifier.steps) + 1)
patternNZHistoryProto = proto.patternNZHistory
recordNumHistoryProto = proto.recordNumHistory
for i in xrange(len(patternNZHistoryProto)):
classifier._patternNZHistory.append((recordNumHistoryProto[i],
list(patternNZHistoryProto[i])))
classifier._maxSteps = proto.maxSteps
classifier._maxBucketIdx = proto.maxBucketIdx
classifier._maxInputIdx = proto.maxInputIdx
classifier._weightMatrix = {}
weightMatrixProto = proto.weightMatrix
for i in xrange(len(weightMatrixProto)):
classifier._weightMatrix[weightMatrixProto[i].steps] = numpy.reshape(
weightMatrixProto[i].weight, newshape=(classifier._maxInputIdx+1,
classifier._maxBucketIdx+1))
classifier._actualValues = []
for actValue in proto.actualValues:
if actValue == 0:
classifier._actualValues.append(None)
else:
classifier._actualValues.append(actValue)
classifier._version = proto.version
classifier.verbosity = proto.verbosity
return classifier
def write(self, proto):
stepsProto = proto.init("steps", len(self.steps))
for i in xrange(len(self.steps)):
stepsProto[i] = self.steps[i]
proto.alpha = self.alpha
proto.actValueAlpha = self.actValueAlpha
# NOTE: technically, saving `_maxSteps` is redundant, since it may be
# reconstructed from `self.steps` just as in the constructor. Eliminating
# this attribute from the capnp scheme will involve coordination with
# nupic.core, where the `SdrClassifierProto` schema resides.
proto.maxSteps = self._maxSteps
# NOTE: size of history buffer may be less than `self._maxSteps` if fewer
# inputs had been processed
patternProto = proto.init("patternNZHistory", len(self._patternNZHistory))
recordNumHistoryProto = proto.init("recordNumHistory",
len(self._patternNZHistory))
for i in xrange(len(self._patternNZHistory)):
subPatternProto = patternProto.init(i, len(self._patternNZHistory[i][1]))
for j in xrange(len(self._patternNZHistory[i][1])):
subPatternProto[j] = int(self._patternNZHistory[i][1][j])
recordNumHistoryProto[i] = int(self._patternNZHistory[i][0])
weightMatrices = proto.init("weightMatrix", len(self._weightMatrix))
i = 0
for step in self.steps:
stepWeightMatrixProto = weightMatrices[i]
stepWeightMatrixProto.steps = step
stepWeightMatrixProto.weight = list(
self._weightMatrix[step].flatten().astype(type('float', (float,), {})))
i += 1
proto.maxBucketIdx = self._maxBucketIdx
proto.maxInputIdx = self._maxInputIdx
actualValuesProto = proto.init("actualValues", len(self._actualValues))
for i in xrange(len(self._actualValues)):
if self._actualValues[i] is not None:
actualValuesProto[i] = self._actualValues[i]
else:
actualValuesProto[i] = 0
proto.version = self._version
proto.verbosity = self.verbosity
def _calculateError(self, recordNum, bucketIdxList):
"""
Calculate error signal
:param bucketIdxList: list of encoder buckets
:return: dict containing error. The key is the number of steps
The value is a numpy array of error at the output layer
"""
error = dict()
targetDist = numpy.zeros(self._maxBucketIdx + 1)
numCategories = len(bucketIdxList)
for bucketIdx in bucketIdxList:
targetDist[bucketIdx] = 1.0/numCategories
for (learnRecordNum, learnPatternNZ) in self._patternNZHistory:
nSteps = recordNum - learnRecordNum
if nSteps in self.steps:
predictDist = self.inferSingleStep(learnPatternNZ,
self._weightMatrix[nSteps])
error[nSteps] = targetDist - predictDist
return error
def _pFormatArray(array_, fmt="%.2f"):
"""Return a string with pretty-print of a numpy array using the given format
for each element"""
return "[ " + " ".join(fmt % x for x in array_) + " ]"
| 19,409 | Python | .py | 397 | 40.889169 | 80 | 0.66873 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,914 | backtracking_tm_cpp.py | numenta_nupic-legacy/src/nupic/algorithms/backtracking_tm_cpp.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Temporal memory implementation in C++ wrapped by a Python class.
:class:`BacktrackingTMCPP` wraps the C++ algorithm execution by extending
:class:`~nupic.algorithms.backtracking_tm.BacktrackingTM` and overriding
:meth:`~nupic.algorithms.backtracking_tm.BacktrackingTM.compute`.
"""
try:
import capnp
except ImportError:
capnp = None
import json
import numpy
from numpy import *
if capnp:
from nupic.algorithms.backtracking_tm_cpp_capnp import (
BacktrackingTMCppProto)
from nupic.bindings.algorithms import Cells4
import nupic.math
from nupic.algorithms.backtracking_tm import BacktrackingTM
# Default verbosity while running unit tests
VERBOSITY = 0
# The numpy equivalent to the floating point type used by NTA
dtype = nupic.math.GetNTAReal()
def _extractCallingMethodArgs():
"""
Returns args dictionary from the calling method
"""
import inspect
import copy
callingFrame = inspect.stack()[1][0]
argNames, _, _, frameLocalVarDict = inspect.getargvalues(callingFrame)
argNames.remove("self")
args = copy.copy(frameLocalVarDict)
for varName in frameLocalVarDict:
if varName not in argNames:
args.pop(varName)
return args
class BacktrackingTMCPP(BacktrackingTM):
# We use the same keyword arguments as TM()
def __init__(self,
numberOfCols = 500,
cellsPerColumn = 10,
initialPerm = 0.11, # TODO: check perm numbers with Ron
connectedPerm = 0.50,
minThreshold = 8,
newSynapseCount = 15,
permanenceInc = 0.10,
permanenceDec = 0.10,
permanenceMax = 1.0, # never exceed this value
globalDecay = 0.10,
activationThreshold = 12, # 3/4 of newSynapseCount TODO make fraction
doPooling = False, # allows to turn off pooling
segUpdateValidDuration = 5,
burnIn = 2, # Used for evaluating the prediction score
collectStats = False, # If true, collect training and inference stats
seed = 42,
verbosity = VERBOSITY,
checkSynapseConsistency = False,
pamLength = 1,
maxInfBacktrack = 10,
maxLrnBacktrack = 5,
maxAge = 100000,
maxSeqLength = 32,
# Fixed size mode params
maxSegmentsPerCell = -1,
maxSynapsesPerSegment = -1,
# Output control
outputType = 'normal',
):
#---------------------------------------------------------------------------------
# Save our __init__ args for debugging
self._initArgsDict = _extractCallingMethodArgs()
#---------------------------------------------------------------------------------
# These two variables are for testing
# If set to True, Cells4 will perform (time consuming) invariance checks
self.checkSynapseConsistency = checkSynapseConsistency
# If set to False, Cells4 will *not* be treated as an ephemeral member
# and full BacktrackingTMCPP pickling is possible. This is useful for testing
# pickle/unpickle without saving Cells4 to an external file
self.makeCells4Ephemeral = False
#---------------------------------------------------------------------------------
# Store the seed for constructing Cells4
self.seed = seed
#---------------------------------------------------------------------------------
# Init the base class
BacktrackingTM.__init__(self,
numberOfCols = numberOfCols,
cellsPerColumn = cellsPerColumn,
initialPerm = initialPerm,
connectedPerm = connectedPerm,
minThreshold = minThreshold,
newSynapseCount = newSynapseCount,
permanenceInc = permanenceInc,
permanenceDec = permanenceDec,
permanenceMax = permanenceMax, # never exceed this value
globalDecay = globalDecay,
activationThreshold = activationThreshold,
doPooling = doPooling,
segUpdateValidDuration = segUpdateValidDuration,
burnIn = burnIn,
collectStats = collectStats,
seed = seed,
verbosity = verbosity,
pamLength = pamLength,
maxInfBacktrack = maxInfBacktrack,
maxLrnBacktrack = maxLrnBacktrack,
maxAge = maxAge,
maxSeqLength = maxSeqLength,
maxSegmentsPerCell = maxSegmentsPerCell,
maxSynapsesPerSegment = maxSynapsesPerSegment,
outputType = outputType,
)
if not self.makeCells4Ephemeral:
self._initCells4()
@classmethod
def getSchema(cls):
return BacktrackingTMCppProto
def write(self, proto):
"""Populate serialization proto instance.
:param proto: (BacktrackingTMCppProto) the proto instance to populate
"""
# Write base class to proto.baseTM (BacktrackingTMProto)
super(BacktrackingTMCPP, self).write(proto.baseTM)
self.cells4.write(proto.cells4)
proto.makeCells4Ephemeral = self.makeCells4Ephemeral
proto.seed = self.seed
proto.checkSynapseConsistency = self.checkSynapseConsistency
proto.initArgs = json.dumps(self._initArgsDict)
@classmethod
def read(cls, proto):
"""Deserialize from proto instance.
:param proto: (BacktrackingTMCppProto) the proto instance to read from
"""
# Use base class to create initial class from proto.baseTM
# (BacktrackingTMProto)
obj = BacktrackingTM.read(proto.baseTM)
obj.__class__ = cls
# Additional CPP-specific deserialization
newCells4 = Cells4.read(proto.cells4)
print newCells4
obj.cells4 = newCells4
obj.makeCells4Ephemeral = proto.makeCells4Ephemeral
obj.seed = proto.seed
obj.checkSynapseConsistency = proto.checkSynapseConsistency
obj._initArgsDict = json.loads(proto.initArgs)
# Convert unicode to str
obj._initArgsDict["outputType"] = str(obj._initArgsDict["outputType"])
# Initialize ephemeral attributes
obj.allocateStatesInCPP = False
obj.retrieveLearningStates = False
obj._setStatePointers()
return obj
def _initCells4(self):
self.cells4 = Cells4(self.numberOfCols,
self.cellsPerColumn,
self.activationThreshold,
self.minThreshold,
self.newSynapseCount,
self.segUpdateValidDuration,
self.initialPerm,
self.connectedPerm,
self.permanenceMax,
self.permanenceDec,
self.permanenceInc,
self.globalDecay,
self.doPooling,
self.seed,
self.allocateStatesInCPP,
self.checkSynapseConsistency)
self.cells4.setVerbosity(self.verbosity)
self.cells4.setPamLength(self.pamLength)
self.cells4.setMaxAge(self.maxAge)
self.cells4.setMaxInfBacktrack(self.maxInfBacktrack)
self.cells4.setMaxLrnBacktrack(self.maxLrnBacktrack)
self.cells4.setMaxSeqLength(self.maxSeqLength)
self.cells4.setMaxSegmentsPerCell(self.maxSegmentsPerCell)
self.cells4.setMaxSynapsesPerCell(self.maxSynapsesPerSegment)
# Reset internal C++ pointers to states
self._setStatePointers()
def __setstate__(self, state):
"""
Set the state of ourself from a serialized state.
"""
super(BacktrackingTMCPP, self).__setstate__(state)
if self.makeCells4Ephemeral:
self._initCells4()
def _getEphemeralMembers(self):
"""
List of our member variables that we don't need to be saved
"""
e = BacktrackingTM._getEphemeralMembers(self)
if self.makeCells4Ephemeral:
e.extend(['cells4'])
return e
def _initEphemerals(self):
"""
Initialize all ephemeral members after being restored to a pickled state.
"""
BacktrackingTM._initEphemerals(self)
#---------------------------------------------------------------------------------
# cells4 specific initialization
# If True, let C++ allocate memory for activeState, predictedState, and
# learnState. In this case we can retrieve copies of these states but can't
# set them directly from Python. If False, Python can allocate them as
# numpy arrays and we can pass pointers to the C++ using setStatePointers
self.allocateStatesInCPP = False
# Set this to true for debugging or accessing learning states
self.retrieveLearningStates = False
if self.makeCells4Ephemeral:
self._initCells4()
def saveToFile(self, filePath):
"""
Save Cells4 state to a file. File can be loaded with :meth:`loadFromFile`.
"""
self.cells4.saveToFile(filePath)
def loadFromFile(self, filePath):
"""
Load Cells4 state from a file saved with :meth:`saveToFile`.
"""
self._setStatePointers()
self.cells4.loadFromFile(filePath)
def __getattr__(self, name):
"""
Patch __getattr__ so that we can catch the first access to 'cells' and load.
This function is only called when we try to access an attribute that doesn't
exist. We purposely make sure that "self.cells" doesn't exist after
unpickling so that we'll hit this, then we can load it on the first access.
If this is called at any other time, it will raise an AttributeError.
That's because:
- If 'name' is "cells", after the first call, self._realCells won't exist
so we'll get an implicit AttributeError.
- If 'name' isn't "cells", I'd expect our super wouldn't have __getattr__,
so we'll raise our own Attribute error. If the super did get __getattr__,
we'll just return what it gives us.
"""
try:
return super(BacktrackingTMCPP, self).__getattr__(name)
except AttributeError:
raise AttributeError("'TM' object has no attribute '%s'" % name)
def compute(self, bottomUpInput, enableLearn, enableInference=None):
"""
Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.compute`.
"""
# The C++ TM takes 32 bit floats as input. uint32 works as well since the
# code only checks whether elements are non-zero
assert (bottomUpInput.dtype == numpy.dtype('float32')) or \
(bottomUpInput.dtype == numpy.dtype('uint32')) or \
(bottomUpInput.dtype == numpy.dtype('int32'))
self.iterationIdx = self.iterationIdx + 1
# As a speed optimization for now (until we need online learning), skip
# computing the inference output while learning
if enableInference is None:
if enableLearn:
enableInference = False
else:
enableInference = True
# ====================================================================
# Run compute and retrieve selected state and member variables
self._setStatePointers()
y = self.cells4.compute(bottomUpInput, enableInference, enableLearn)
self.currentOutput = y.reshape((self.numberOfCols, self.cellsPerColumn))
self.avgLearnedSeqLength = self.cells4.getAvgLearnedSeqLength()
self._copyAllocatedStates()
# ========================================================================
# Update the prediction score stats
# Learning always includes inference
if self.collectStats:
activeColumns = bottomUpInput.nonzero()[0]
if enableInference:
predictedState = self.infPredictedState['t-1']
else:
predictedState = self.lrnPredictedState['t-1']
self._updateStatsInferEnd(self._internalStats,
activeColumns,
predictedState,
self.colConfidence['t-1'])
# Finally return the TM output
output = self._computeOutput()
# Print diagnostic information based on the current verbosity level
self.printComputeEnd(output, learn=enableLearn)
self.resetCalled = False
return output
def _inferPhase2(self):
"""
This calls phase 2 of inference (used in multistep prediction).
"""
self._setStatePointers()
self.cells4.inferPhase2()
self._copyAllocatedStates()
def getLearnActiveStateT(self):
if self.verbosity > 1 or self.retrieveLearningStates:
return self.lrnActiveState['t']
else:
(activeT, _, _, _) = self.cells4.getLearnStates()
return activeT.reshape((self.numberOfCols, self.cellsPerColumn))
def _copyAllocatedStates(self):
"""If state is allocated in CPP, copy over the data into our numpy arrays."""
# Get learn states if we need to print them out
if self.verbosity > 1 or self.retrieveLearningStates:
(activeT, activeT1, predT, predT1) = self.cells4.getLearnStates()
self.lrnActiveState['t-1'] = activeT1.reshape((self.numberOfCols, self.cellsPerColumn))
self.lrnActiveState['t'] = activeT.reshape((self.numberOfCols, self.cellsPerColumn))
self.lrnPredictedState['t-1'] = predT1.reshape((self.numberOfCols, self.cellsPerColumn))
self.lrnPredictedState['t'] = predT.reshape((self.numberOfCols, self.cellsPerColumn))
if self.allocateStatesInCPP:
assert False
(activeT, activeT1, predT, predT1, colConfidenceT, colConfidenceT1, confidenceT,
confidenceT1) = self.cells4.getStates()
self.cellConfidence['t'] = confidenceT.reshape((self.numberOfCols, self.cellsPerColumn))
self.cellConfidence['t-1'] = confidenceT1.reshape((self.numberOfCols, self.cellsPerColumn))
self.colConfidence['t'] = colConfidenceT.reshape(self.numberOfCols)
self.colConfidence['t-1'] = colConfidenceT1.reshape(self.numberOfCols)
self.infActiveState['t-1'] = activeT1.reshape((self.numberOfCols, self.cellsPerColumn))
self.infActiveState['t'] = activeT.reshape((self.numberOfCols, self.cellsPerColumn))
self.infPredictedState['t-1'] = predT1.reshape((self.numberOfCols, self.cellsPerColumn))
self.infPredictedState['t'] = predT.reshape((self.numberOfCols, self.cellsPerColumn))
def _setStatePointers(self):
"""If we are having CPP use numpy-allocated buffers, set these buffer
pointers. This is a relatively fast operation and, for safety, should be
done before every call to the cells4 compute methods. This protects us
in situations where code can cause Python or numpy to create copies."""
if not self.allocateStatesInCPP:
self.cells4.setStatePointers(
self.infActiveState["t"], self.infActiveState["t-1"],
self.infPredictedState["t"], self.infPredictedState["t-1"],
self.colConfidence["t"], self.colConfidence["t-1"],
self.cellConfidence["t"], self.cellConfidence["t-1"])
def reset(self):
"""
Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.reset`.
"""
if self.verbosity >= 3:
print "TM Reset"
self._setStatePointers()
self.cells4.reset()
BacktrackingTM.reset(self)
def finishLearning(self):
"""
Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.finishLearning`.
"""
# Keep weakly formed synapses around because they contain confidence scores
# for paths out of learned sequenced and produce a better prediction than
# chance.
self.trimSegments(minPermanence=0.0001)
def trimSegments(self, minPermanence=None, minNumSyns=None):
"""
Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.trimSegments`.
"""
# Fill in defaults
if minPermanence is None:
minPermanence = 0.0
if minNumSyns is None:
minNumSyns = 0
# Print all cells if verbosity says to
if self.verbosity >= 5:
print "Cells, all segments:"
self.printCells(predictedOnly=False)
return self.cells4.trimSegments(minPermanence=minPermanence, minNumSyns=minNumSyns)
################################################################################
# The following print functions for debugging.
################################################################################
def printSegment(self, s):
# TODO: need to add C++ accessors to get segment details
assert False
prevAct = self._getSegmentActivityLevel(s, 't-1')
currAct = self._getSegmentActivityLevel(s, 't')
# Sequence segment or pooling segment
if s[0][1] == True:
print "S",
else:
print 'P',
# Frequency count
print s[0][2],
if self._isSegmentActive(s, 't'):
ss = '[' + str(currAct) + ']'
else:
ss = str(currAct)
ss = ss + '/'
if self._isSegmentActive(s, 't-1'):
ss = ss + '[' + str(prevAct) + ']'
else:
ss = ss + str(prevAct)
ss = ss + ':'
print ss,
for i,synapse in enumerate(s[1:]):
if synapse[2] >= self.connectedPerm:
ss = '['
else:
ss = ''
ss = ss + str(synapse[0]) + '/' + str(synapse[1])
if self.infActiveState['t'][synapse[0],synapse[1]] == 1:
ss = ss + '/ON'
ss = ss + '/'
sf = str(synapse[2])
ss = ss + sf[:4]
if synapse[2] >= self.connectedPerm:
ss = ss + ']'
if i < len(s)-2:
ss = ss + ' |'
print ss,
if self.verbosity > 3:
if self._isSegmentActive(s, 't') and \
prevAct < self.activationThreshold and currAct >= self.activationThreshold:
print "reached activation",
if prevAct < self.minThreshold and currAct >= self.minThreshold:
print "reached min threshold",
if self._isSegmentActive(s, 't-1') and \
prevAct >= self.activationThreshold and currAct < self.activationThreshold:
print "dropped below activation",
if prevAct >= self.minThreshold and currAct < self.minThreshold:
print "dropped below min",
if self._isSegmentActive(s, 't') and self._isSegmentActive(s, 't-1') and \
prevAct >= self.activationThreshold and currAct >= self.activationThreshold:
print "maintained activation",
def printSegmentUpdates(self):
"""
Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.printSegmentUpdates`.
"""
# TODO: need to add C++ accessors to implement this method
assert False
print "=== SEGMENT UPDATES ===, Num = ", len(self.segmentUpdates)
for key, updateList in self.segmentUpdates.iteritems():
c,i = key[0],key[1]
print c,i,updateList
def _slowIsSegmentActive(self, seg, timeStep):
"""
A segment is active if it has >= activationThreshold connected
synapses that are active due to infActiveState.
"""
numSyn = seg.size()
numActiveSyns = 0
for synIdx in xrange(numSyn):
if seg.getPermanence(synIdx) < self.connectedPerm:
continue
sc, si = self.getColCellIdx(seg.getSrcCellIdx(synIdx))
if self.infActiveState[timeStep][sc, si]:
numActiveSyns += 1
if numActiveSyns >= self.activationThreshold:
return True
return numActiveSyns >= self.activationThreshold
def printCell(self, c, i, onlyActiveSegments=False):
"""
Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.printCell`.
"""
nSegs = self.cells4.nSegmentsOnCell(c,i)
if nSegs > 0:
segList = self.cells4.getNonEmptySegList(c,i)
gidx = c * self.cellsPerColumn + i
print "Column", c, "Cell", i, "(%d)"%(gidx),":", nSegs, "segment(s)"
for k,segIdx in enumerate(segList):
seg = self.cells4.getSegment(c, i, segIdx)
isActive = self._slowIsSegmentActive(seg, 't')
if onlyActiveSegments and not isActive:
continue
isActiveStr = "*" if isActive else " "
print " %sSeg #%-3d" % (isActiveStr, segIdx),
print seg.size(),
print seg.isSequenceSegment(), "%9.7f" % (seg.dutyCycle(
self.cells4.getNLrnIterations(), False, True)),
# numPositive/totalActivations
print "(%4d/%-4d)" % (seg.getPositiveActivations(),
seg.getTotalActivations()),
# Age
print "%4d" % (self.cells4.getNLrnIterations()
- seg.getLastActiveIteration()),
numSyn = seg.size()
for s in xrange(numSyn):
sc, si = self.getColCellIdx(seg.getSrcCellIdx(s))
print "[%d,%d]%4.2f"%(sc, si, seg.getPermanence(s)),
print
def getAvgLearnedSeqLength(self):
"""
Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.getAvgLearnedSeqLength`.
"""
return self.cells4.getAvgLearnedSeqLength()
def getColCellIdx(self, idx):
"""
Get column and cell within column from a global cell index.
The global index is ``idx = colIdx * nCellsPerCol() + cellIdxInCol``
:param idx: (int) global cell index
:returns: (tuple) (colIdx, cellIdxInCol)
"""
c = idx//self.cellsPerColumn
i = idx - c*self.cellsPerColumn
return c,i
def getSegmentOnCell(self, c, i, segIdx):
"""
Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.getSegmentOnCell`.
"""
segList = self.cells4.getNonEmptySegList(c,i)
seg = self.cells4.getSegment(c, i, segList[segIdx])
numSyn = seg.size()
assert numSyn != 0
# Accumulate segment information
result = []
result.append([int(segIdx), bool(seg.isSequenceSegment()),
seg.getPositiveActivations(),
seg.getTotalActivations(), seg.getLastActiveIteration(),
seg.getLastPosDutyCycle(),
seg.getLastPosDutyCycleIteration()])
for s in xrange(numSyn):
sc, si = self.getColCellIdx(seg.getSrcCellIdx(s))
result.append([int(sc), int(si), seg.getPermanence(s)])
return result
def getNumSegments(self):
"""
Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.getNumSegments`.
"""
return self.cells4.nSegments()
def getNumSynapses(self):
"""
Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.getNumSynapses`.
"""
return self.cells4.nSynapses()
def getNumSegmentsInCell(self, c, i):
"""
Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.getNumSegmentsInCell`.
"""
return self.cells4.nSegmentsOnCell(c,i)
def getSegmentInfo(self, collectActiveData = False):
"""
Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.getSegmentInfo`.
"""
# Requires appropriate accessors in C++ cells4 (currently unimplemented)
assert collectActiveData == False
nSegments, nSynapses = self.getNumSegments(), self.cells4.nSynapses()
distSegSizes, distNSegsPerCell = {}, {}
nActiveSegs, nActiveSynapses = 0, 0
distPermValues = {} # Num synapses with given permanence values
numAgeBuckets = 20
distAges = []
ageBucketSize = int((self.iterationIdx+20) / 20)
for i in range(numAgeBuckets):
distAges.append(['%d-%d' % (i*ageBucketSize, (i+1)*ageBucketSize-1), 0])
for c in xrange(self.numberOfCols):
for i in xrange(self.cellsPerColumn):
# Update histogram counting cell sizes
nSegmentsThisCell = self.getNumSegmentsInCell(c,i)
if nSegmentsThisCell > 0:
if distNSegsPerCell.has_key(nSegmentsThisCell):
distNSegsPerCell[nSegmentsThisCell] += 1
else:
distNSegsPerCell[nSegmentsThisCell] = 1
# Update histogram counting segment sizes.
segList = self.cells4.getNonEmptySegList(c,i)
for segIdx in xrange(nSegmentsThisCell):
seg = self.getSegmentOnCell(c, i, segIdx)
nSynapsesThisSeg = len(seg) - 1
if nSynapsesThisSeg > 0:
if distSegSizes.has_key(nSynapsesThisSeg):
distSegSizes[nSynapsesThisSeg] += 1
else:
distSegSizes[nSynapsesThisSeg] = 1
# Accumulate permanence value histogram
for syn in seg[1:]:
p = int(syn[2]*10)
if distPermValues.has_key(p):
distPermValues[p] += 1
else:
distPermValues[p] = 1
segObj = self.cells4.getSegment(c, i, segList[segIdx])
age = self.iterationIdx - segObj.getLastActiveIteration()
ageBucket = int(age/ageBucketSize)
distAges[ageBucket][1] += 1
return (nSegments, nSynapses, nActiveSegs, nActiveSynapses, \
distSegSizes, distNSegsPerCell, distPermValues, distAges)
################################################################################
# The following methods are implemented in the base class but should never
# be called in this implementation.
################################################################################
def _isSegmentActive(self, seg, timeStep):
# Should never be called in this subclass
assert False
def _getSegmentActivityLevel(self, seg, timeStep, connectedSynapsesOnly =False,
learnState = False):
# Should never be called in this subclass
assert False
def isSequenceSegment(self, s):
# Should never be called in this subclass
assert False
def _getBestMatchingSegment(self, c, i, timeStep, learnState = False):
# Should never be called in this subclass
assert False
def _getSegmentActiveSynapses(self, c, i, s, timeStep, newSynapses =False):
# Should never be called in this subclass
assert False
def updateSynapse(self, segment, synapse, delta):
# Should never be called in this subclass
assert False
def _adaptSegment(self, update, positiveReinforcement):
# Should never be called in this subclass
assert False
| 27,024 | Python | .py | 612 | 36.199346 | 97 | 0.640751 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,915 | connections.py | numenta_nupic-legacy/src/nupic/algorithms/connections.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014-2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from bisect import bisect_left
from collections import defaultdict
from nupic.serializable import Serializable
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.proto.ConnectionsProto_capnp import ConnectionsProto
EPSILON = 0.00001 # constant error threshold to check equality of permanences to
# other floats
class Segment(object):
"""
Class containing minimal information to identify a unique segment.
:param cell: (int) Index of the cell that this segment is on.
:param flatIdx: (int) The segment's flattened list index.
:param ordinal: (long) Used to sort segments. The sort order needs to be
consistent between implementations so that tie-breaking is consistent
when finding the best matching segment.
"""
__slots__ = ["cell", "flatIdx", "_synapses", "_ordinal"]
def __init__(self, cell, flatIdx, ordinal):
self.cell = cell
self.flatIdx = flatIdx
self._synapses = set()
self._ordinal = ordinal
def __eq__(self, other):
""" Explicitly implement this for unit testing. The flatIdx is not designed
to be consistent after serialize / deserialize, and the synapses might not
enumerate in the same order.
"""
return (self.cell == other.cell and
(sorted(self._synapses, key=lambda x: x._ordinal) ==
sorted(other._synapses, key=lambda x: x._ordinal)))
class Synapse(object):
"""
Class containing minimal information to identify a unique synapse.
:param segment: (Object) Segment object that the synapse is synapsed to.
:param presynapticCell: (int) The index of the presynaptic cell of the
synapse.
:param permanence: (float) Permanence of the synapse from 0.0 to 1.0.
:param ordinal: (long) Used to sort synapses. The sort order needs to be
consistent between implementations so that tie-breaking is consistent
when finding the min permanence synapse.
"""
__slots__ = ["segment", "presynapticCell", "permanence", "_ordinal"]
def __init__(self, segment, presynapticCell, permanence, ordinal):
self.segment = segment
self.presynapticCell = presynapticCell
self.permanence = permanence
self._ordinal = ordinal
def __eq__(self, other):
""" Explicitly implement this for unit testing. Allow floating point
differences for synapse permanence.
"""
return (self.segment.cell == other.segment.cell and
self.presynapticCell == other.presynapticCell and
abs(self.permanence - other.permanence) < EPSILON)
class CellData(object):
# Class containing cell information. Internal to the Connections
__slots__ = ["_segments"]
def __init__(self):
self._segments = []
def __eq__(self, other):
return self._segments == other._segments
def __ne__(self, other):
return not self.__eq__(other)
def binSearch(arr, val):
"""
Function for running binary search on a sorted list.
:param arr: (list) a sorted list of integers to search
:param val: (int) a integer to search for in the sorted array
:returns: (int) the index of the element if it is found and -1 otherwise.
"""
i = bisect_left(arr, val)
if i != len(arr) and arr[i] == val:
return i
return -1
class Connections(Serializable):
"""
Class to hold data representing the connectivity of a collection of cells.
:param numCells: (int) Number of cells in collection.
"""
def __init__(self, numCells):
# Save member variables
self.numCells = numCells
self._cells = [CellData() for _ in xrange(numCells)]
self._synapsesForPresynapticCell = defaultdict(set)
self._segmentForFlatIdx = []
self._numSynapses = 0
self._freeFlatIdxs = []
self._nextFlatIdx = 0
# Whenever creating a new Synapse or Segment, give it a unique ordinal.
# These can be used to sort synapses or segments by age.
self._nextSynapseOrdinal = long(0)
self._nextSegmentOrdinal = long(0)
def segmentsForCell(self, cell):
"""
Returns the segments that belong to a cell.
:param cell: (int) Cell index
:returns: (list) Segment objects representing segments on the given cell.
"""
return self._cells[cell]._segments
def synapsesForSegment(self, segment):
"""
Returns the synapses on a segment.
:param segment: (int) Segment index
:returns: (set) Synapse objects representing synapses on the given segment.
"""
return segment._synapses
def dataForSynapse(self, synapse):
"""
Returns the data for a synapse.
.. note:: This method exists to match the interface of the C++ Connections.
This allows tests and tools to inspect the connections using a common
interface.
:param synapse: (:class:`Synapse`)
:returns: Synapse data
"""
return synapse
def dataForSegment(self, segment):
"""
Returns the data for a segment.
.. note:: This method exists to match the interface of the C++ Connections.
This allows tests and tools to inspect the connections using a common
interface.
:param segment (:class:`Segment`)
:returns: segment data
"""
return segment
def getSegment(self, cell, idx):
"""
Returns a :class:`Segment` object of the specified segment using data from
the ``self._cells`` array.
:param cell: (int) cell index
:param idx: (int) segment index on a cell
:returns: (:class:`Segment`) Segment object with index idx on the specified cell
"""
return self._cells[cell]._segments[idx]
def segmentForFlatIdx(self, flatIdx):
"""
Get the segment with the specified flatIdx.
:param flatIdx: (int) The segment's flattened list index.
:returns: (:class:`Segment`)
"""
return self._segmentForFlatIdx[flatIdx]
def segmentFlatListLength(self):
"""
Get the needed length for a list to hold a value for every segment's
flatIdx.
:returns: (int) Required list length
"""
return self._nextFlatIdx
def synapsesForPresynapticCell(self, presynapticCell):
"""
Returns the synapses for the source cell that they synapse on.
:param presynapticCell: (int) Source cell index
:returns: (set) :class:`Synapse` objects
"""
return self._synapsesForPresynapticCell[presynapticCell]
def createSegment(self, cell):
"""
Adds a new segment on a cell.
:param cell: (int) Cell index
:returns: (int) New segment index
"""
cellData = self._cells[cell]
if len(self._freeFlatIdxs) > 0:
flatIdx = self._freeFlatIdxs.pop()
else:
flatIdx = self._nextFlatIdx
self._segmentForFlatIdx.append(None)
self._nextFlatIdx += 1
ordinal = self._nextSegmentOrdinal
self._nextSegmentOrdinal += 1
segment = Segment(cell, flatIdx, ordinal)
cellData._segments.append(segment)
self._segmentForFlatIdx[flatIdx] = segment
return segment
def destroySegment(self, segment):
"""
Destroys a segment.
:param segment: (:class:`Segment`) representing the segment to be destroyed.
"""
# Remove the synapses from all data structures outside this Segment.
for synapse in segment._synapses:
self._removeSynapseFromPresynapticMap(synapse)
self._numSynapses -= len(segment._synapses)
# Remove the segment from the cell's list.
segments = self._cells[segment.cell]._segments
i = segments.index(segment)
del segments[i]
# Free the flatIdx and remove the final reference so the Segment can be
# garbage-collected.
self._freeFlatIdxs.append(segment.flatIdx)
self._segmentForFlatIdx[segment.flatIdx] = None
def createSynapse(self, segment, presynapticCell, permanence):
"""
Creates a new synapse on a segment.
:param segment: (:class:`Segment`) Segment object for synapse to be synapsed
to.
:param presynapticCell: (int) Source cell index.
:param permanence: (float) Initial permanence of synapse.
:returns: (:class:`Synapse`) created synapse
"""
idx = len(segment._synapses)
synapse = Synapse(segment, presynapticCell, permanence,
self._nextSynapseOrdinal)
self._nextSynapseOrdinal += 1
segment._synapses.add(synapse)
self._synapsesForPresynapticCell[presynapticCell].add(synapse)
self._numSynapses += 1
return synapse
def _removeSynapseFromPresynapticMap(self, synapse):
inputSynapses = self._synapsesForPresynapticCell[synapse.presynapticCell]
inputSynapses.remove(synapse)
if len(inputSynapses) == 0:
del self._synapsesForPresynapticCell[synapse.presynapticCell]
def destroySynapse(self, synapse):
"""
Destroys a synapse.
:param synapse: (:class:`Synapse`) synapse to destroy
"""
self._numSynapses -= 1
self._removeSynapseFromPresynapticMap(synapse)
synapse.segment._synapses.remove(synapse)
def updateSynapsePermanence(self, synapse, permanence):
"""
Updates the permanence for a synapse.
:param synapse: (class:`Synapse`) to be updated.
:param permanence: (float) New permanence.
"""
synapse.permanence = permanence
def computeActivity(self, activePresynapticCells, connectedPermanence):
"""
Compute each segment's number of active synapses for a given input.
In the returned lists, a segment's active synapse count is stored at index
``segment.flatIdx``.
:param activePresynapticCells: (iter) Active cells.
:param connectedPermanence: (float) Permanence threshold for a synapse to be
considered connected
:returns: (tuple) (``numActiveConnectedSynapsesForSegment`` [list],
``numActivePotentialSynapsesForSegment`` [list])
"""
numActiveConnectedSynapsesForSegment = [0] * self._nextFlatIdx
numActivePotentialSynapsesForSegment = [0] * self._nextFlatIdx
threshold = connectedPermanence - EPSILON
for cell in activePresynapticCells:
for synapse in self._synapsesForPresynapticCell[cell]:
flatIdx = synapse.segment.flatIdx
numActivePotentialSynapsesForSegment[flatIdx] += 1
if synapse.permanence > threshold:
numActiveConnectedSynapsesForSegment[flatIdx] += 1
return (numActiveConnectedSynapsesForSegment,
numActivePotentialSynapsesForSegment)
def numSegments(self, cell=None):
"""
Returns the number of segments.
:param cell: (int) Optional parameter to get the number of segments on a
cell.
:returns: (int) Number of segments on all cells if cell is not specified, or
on a specific specified cell
"""
if cell is not None:
return len(self._cells[cell]._segments)
return self._nextFlatIdx - len(self._freeFlatIdxs)
def numSynapses(self, segment=None):
"""
Returns the number of Synapses.
:param segment: (:class:`Segment`) Optional parameter to get the number of
synapses on a segment.
:returns: (int) Number of synapses on all segments if segment is not
specified, or on a specified segment.
"""
if segment is not None:
return len(segment._synapses)
return self._numSynapses
def segmentPositionSortKey(self, segment):
"""
Return a numeric key for sorting this segment. This can be used with the
python built-in ``sorted()`` function.
:param segment: (:class:`Segment`) within this :class:`Connections`
instance.
:returns: (float) A numeric key for sorting.
"""
return segment.cell + (segment._ordinal / float(self._nextSegmentOrdinal))
def write(self, proto):
"""
Writes serialized data to proto object.
:param proto: (DynamicStructBuilder) Proto object
"""
protoCells = proto.init('cells', self.numCells)
for i in xrange(self.numCells):
segments = self._cells[i]._segments
protoSegments = protoCells[i].init('segments', len(segments))
for j, segment in enumerate(segments):
synapses = segment._synapses
protoSynapses = protoSegments[j].init('synapses', len(synapses))
for k, synapse in enumerate(sorted(synapses, key=lambda s: s._ordinal)):
protoSynapses[k].presynapticCell = synapse.presynapticCell
protoSynapses[k].permanence = synapse.permanence
@classmethod
def getSchema(cls):
return ConnectionsProto
@classmethod
def read(cls, proto):
"""
Reads deserialized data from proto object
:param proto: (DynamicStructBuilder) Proto object
:returns: (:class:`Connections`) instance
"""
#pylint: disable=W0212
protoCells = proto.cells
connections = cls(len(protoCells))
for cellIdx, protoCell in enumerate(protoCells):
protoCell = protoCells[cellIdx]
protoSegments = protoCell.segments
connections._cells[cellIdx] = CellData()
segments = connections._cells[cellIdx]._segments
for segmentIdx, protoSegment in enumerate(protoSegments):
segment = Segment(cellIdx, connections._nextFlatIdx,
connections._nextSegmentOrdinal)
segments.append(segment)
connections._segmentForFlatIdx.append(segment)
connections._nextFlatIdx += 1
connections._nextSegmentOrdinal += 1
synapses = segment._synapses
protoSynapses = protoSegment.synapses
for synapseIdx, protoSynapse in enumerate(protoSynapses):
presynapticCell = protoSynapse.presynapticCell
synapse = Synapse(segment, presynapticCell, protoSynapse.permanence,
ordinal=connections._nextSynapseOrdinal)
connections._nextSynapseOrdinal += 1
synapses.add(synapse)
connections._synapsesForPresynapticCell[presynapticCell].add(synapse)
connections._numSynapses += 1
#pylint: enable=W0212
return connections
def __eq__(self, other):
""" Equality operator for Connections instances.
Checks if two instances are functionally identical
:param other: (:class:`Connections`) Connections instance to compare to
"""
#pylint: disable=W0212
for i in xrange(self.numCells):
segments = self._cells[i]._segments
otherSegments = other._cells[i]._segments
if len(segments) != len(otherSegments):
return False
for j in xrange(len(segments)):
segment = segments[j]
otherSegment = otherSegments[j]
synapses = segment._synapses
otherSynapses = otherSegment._synapses
if len(synapses) != len(otherSynapses):
return False
for synapse in synapses:
found = False
for candidate in otherSynapses:
if synapse == candidate:
found = True
break
if not found:
return False
if (len(self._synapsesForPresynapticCell) !=
len(self._synapsesForPresynapticCell)):
return False
for i in self._synapsesForPresynapticCell.keys():
synapses = self._synapsesForPresynapticCell[i]
otherSynapses = other._synapsesForPresynapticCell[i]
if len(synapses) != len(otherSynapses):
return False
for synapse in synapses:
found = False
for candidate in otherSynapses:
if synapse == candidate:
found = True
break
if not found:
return False
if self._numSynapses != other._numSynapses:
return False
#pylint: enable=W0212
return True
def __ne__(self, other):
"""
Non-equality operator for Connections instances.
Checks if two instances are not functionally identical
:param other: (:class:`Connections`) Connections instance to compare to
"""
return not self.__eq__(other)
| 16,786 | Python | .py | 411 | 34.664234 | 84 | 0.692526 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,916 | anomaly_likelihood.py | numenta_nupic-legacy/src/nupic/algorithms/anomaly_likelihood.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014-2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This module analyzes and estimates the distribution of averaged anomaly scores
from a given model. Given a new anomaly score ``s``, estimates
``P(score >= s)``.
The number ``P(score >= s)`` represents the likelihood of the current state of
predictability. For example, a likelihood of 0.01 or 1% means we see this much
predictability about one out of every 100 records. The number is not as unusual
as it seems. For records that arrive every minute, this means once every hour
and 40 minutes. A likelihood of 0.0001 or 0.01% means we see it once out of
10,000 records, or about once every 7 days.
USAGE
+++++
There are two ways to use the code: using the
:class:`.anomaly_likelihood.AnomalyLikelihood` helper class or using the raw
individual functions :func:`~.anomaly_likelihood.estimateAnomalyLikelihoods` and
:func:`~.anomaly_likelihood.updateAnomalyLikelihoods`.
Low-Level Function Usage
++++++++++++++++++++++++
There are two primary interface routines.
- :func:`~.anomaly_likelihood.estimateAnomalyLikelihoods`: batch routine, called
initially and once in a while
- :func:`~.anomaly_likelihood.updateAnomalyLikelihoods`: online routine, called
for every new data point
Initially:
.. code-block:: python
likelihoods, avgRecordList, estimatorParams = \\
estimateAnomalyLikelihoods(metric_data)
Whenever you get new data:
.. code-block:: python
likelihoods, avgRecordList, estimatorParams = \\
updateAnomalyLikelihoods(data2, estimatorParams)
And again (make sure you use the new estimatorParams returned in the above call
to updateAnomalyLikelihoods!).
.. code-block:: python
likelihoods, avgRecordList, estimatorParams = \\
updateAnomalyLikelihoods(data3, estimatorParams)
Every once in a while update estimator with a lot of recent data.
.. code-block:: python
likelihoods, avgRecordList, estimatorParams = \\
estimateAnomalyLikelihoods(lots_of_metric_data)
PARAMS
++++++
The parameters dict returned by the above functions has the following
structure. Note: the client does not need to know the details of this.
::
{
"distribution": # describes the distribution
{
"name": STRING, # name of the distribution, such as 'normal'
"mean": SCALAR, # mean of the distribution
"variance": SCALAR, # variance of the distribution
# There may also be some keys that are specific to the distribution
},
"historicalLikelihoods": [] # Contains the last windowSize likelihood
# values returned
"movingAverage": # stuff needed to compute a rolling average
# of the anomaly scores
{
"windowSize": SCALAR, # the size of the averaging window
"historicalValues": [], # list with the last windowSize anomaly
# scores
"total": SCALAR, # the total of the values in historicalValues
},
}
"""
import collections
import math
import numbers
import numpy
from nupic.serializable import Serializable
from nupic.utils import MovingAverage
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.algorithms.anomaly_likelihood_capnp import AnomalyLikelihoodProto
class AnomalyLikelihood(Serializable):
"""
Helper class for running anomaly likelihood computation. To use it simply
create an instance and then feed it successive anomaly scores:
.. code-block:: python
anomalyLikelihood = AnomalyLikelihood()
while still_have_data:
# Get anomaly score from model
# Compute probability that an anomaly has ocurred
anomalyProbability = anomalyLikelihood.anomalyProbability(
value, anomalyScore, timestamp)
"""
def __init__(self,
claLearningPeriod=None,
learningPeriod=288,
estimationSamples=100,
historicWindowSize=8640,
reestimationPeriod=100):
"""
NOTE: Anomaly likelihood scores are reported at a flat 0.5 for
learningPeriod + estimationSamples iterations.
claLearningPeriod and learningPeriod are specifying the same variable,
although claLearningPeriod is a deprecated name for it.
:param learningPeriod: (claLearningPeriod: deprecated) - (int) the number of
iterations required for the algorithm to learn the basic patterns in the
dataset and for the anomaly score to 'settle down'. The default is based
on empirical observations but in reality this could be larger for more
complex domains. The downside if this is too large is that real anomalies
might get ignored and not flagged.
:param estimationSamples: (int) the number of reasonable anomaly scores
required for the initial estimate of the Gaussian. The default of 100
records is reasonable - we just need sufficient samples to get a decent
estimate for the Gaussian. It's unlikely you will need to tune this since
the Gaussian is re-estimated every 10 iterations by default.
:param historicWindowSize: (int) size of sliding window of historical
data points to maintain for periodic reestimation of the Gaussian. Note:
the default of 8640 is based on a month's worth of history at 5-minute
intervals.
:param reestimationPeriod: (int) how often we re-estimate the Gaussian
distribution. The ideal is to re-estimate every iteration but this is a
performance hit. In general the system is not very sensitive to this
number as long as it is small relative to the total number of records
processed.
"""
if historicWindowSize < estimationSamples:
raise ValueError("estimationSamples exceeds historicWindowSize")
self._iteration = 0
self._historicalScores = collections.deque(maxlen=historicWindowSize)
self._distribution = None
if claLearningPeriod != None:
print("claLearningPeriod is deprecated, use learningPeriod instead.")
self._learningPeriod = claLearningPeriod
else:
self._learningPeriod = learningPeriod
self._probationaryPeriod = self._learningPeriod + estimationSamples
self._reestimationPeriod = reestimationPeriod
def __eq__(self, o):
# pylint: disable=W0212
return (isinstance(o, AnomalyLikelihood) and
self._iteration == o._iteration and
self._historicalScores == o._historicalScores and
self._distribution == o._distribution and
self._probationaryPeriod == o._probationaryPeriod and
self._learningPeriod == o._learningPeriod and
self._reestimationPeriod == o._reestimationPeriod)
# pylint: enable=W0212
def __str__(self):
return ("AnomalyLikelihood: %s %s %s %s %s %s" % (
self._iteration,
self._historicalScores,
self._distribution,
self._probationaryPeriod,
self._learningPeriod,
self._reestimationPeriod) )
@staticmethod
def computeLogLikelihood(likelihood):
"""
Compute a log scale representation of the likelihood value. Since the
likelihood computations return low probabilities that often go into four 9's
or five 9's, a log value is more useful for visualization, thresholding,
etc.
"""
# The log formula is:
# Math.log(1.0000000001 - likelihood) / Math.log(1.0 - 0.9999999999)
return math.log(1.0000000001 - likelihood) / -23.02585084720009
@staticmethod
def _calcSkipRecords(numIngested, windowSize, learningPeriod):
"""Return the value of skipRecords for passing to estimateAnomalyLikelihoods
If `windowSize` is very large (bigger than the amount of data) then this
could just return `learningPeriod`. But when some values have fallen out of
the historical sliding window of anomaly records, then we have to take those
into account as well so we return the `learningPeriod` minus the number
shifted out.
:param numIngested - (int) number of data points that have been added to the
sliding window of historical data points.
:param windowSize - (int) size of sliding window of historical data points.
:param learningPeriod - (int) the number of iterations required for the
algorithm to learn the basic patterns in the dataset and for the anomaly
score to 'settle down'.
"""
numShiftedOut = max(0, numIngested - windowSize)
return min(numIngested, max(0, learningPeriod - numShiftedOut))
@classmethod
def getSchema(cls):
return AnomalyLikelihoodProto
@classmethod
def read(cls, proto):
""" capnp deserialization method for the anomaly likelihood object
:param proto: (Object) capnp proto object specified in
nupic.regions.anomaly_likelihood.capnp
:returns: (Object) the deserialized AnomalyLikelihood object
"""
# pylint: disable=W0212
anomalyLikelihood = object.__new__(cls)
anomalyLikelihood._iteration = proto.iteration
anomalyLikelihood._historicalScores = collections.deque(
maxlen=proto.historicWindowSize)
for i, score in enumerate(proto.historicalScores):
anomalyLikelihood._historicalScores.append((i, score.value,
score.anomalyScore))
if proto.distribution.name: # is "" when there is no distribution.
anomalyLikelihood._distribution = dict()
anomalyLikelihood._distribution['distribution'] = dict()
anomalyLikelihood._distribution['distribution']["name"] = proto.distribution.name
anomalyLikelihood._distribution['distribution']["mean"] = proto.distribution.mean
anomalyLikelihood._distribution['distribution']["variance"] = proto.distribution.variance
anomalyLikelihood._distribution['distribution']["stdev"] = proto.distribution.stdev
anomalyLikelihood._distribution["movingAverage"] = {}
anomalyLikelihood._distribution["movingAverage"]["windowSize"] = proto.distribution.movingAverage.windowSize
anomalyLikelihood._distribution["movingAverage"]["historicalValues"] = []
for value in proto.distribution.movingAverage.historicalValues:
anomalyLikelihood._distribution["movingAverage"]["historicalValues"].append(value)
anomalyLikelihood._distribution["movingAverage"]["total"] = proto.distribution.movingAverage.total
anomalyLikelihood._distribution["historicalLikelihoods"] = []
for likelihood in proto.distribution.historicalLikelihoods:
anomalyLikelihood._distribution["historicalLikelihoods"].append(likelihood)
else:
anomalyLikelihood._distribution = None
anomalyLikelihood._probationaryPeriod = proto.probationaryPeriod
anomalyLikelihood._learningPeriod = proto.learningPeriod
anomalyLikelihood._reestimationPeriod = proto.reestimationPeriod
# pylint: enable=W0212
return anomalyLikelihood
def write(self, proto):
""" capnp serialization method for the anomaly likelihood object
:param proto: (Object) capnp proto object specified in
nupic.regions.anomaly_likelihood.capnp
"""
proto.iteration = self._iteration
pHistScores = proto.init('historicalScores', len(self._historicalScores))
for i, score in enumerate(list(self._historicalScores)):
_, value, anomalyScore = score
record = pHistScores[i]
record.value = float(value)
record.anomalyScore = float(anomalyScore)
if self._distribution:
proto.distribution.name = self._distribution["distribution"]["name"]
proto.distribution.mean = float(self._distribution["distribution"]["mean"])
proto.distribution.variance = float(self._distribution["distribution"]["variance"])
proto.distribution.stdev = float(self._distribution["distribution"]["stdev"])
proto.distribution.movingAverage.windowSize = float(self._distribution["movingAverage"]["windowSize"])
historicalValues = self._distribution["movingAverage"]["historicalValues"]
pHistValues = proto.distribution.movingAverage.init(
"historicalValues", len(historicalValues))
for i, value in enumerate(historicalValues):
pHistValues[i] = float(value)
#proto.distribution.movingAverage.historicalValues = self._distribution["movingAverage"]["historicalValues"]
proto.distribution.movingAverage.total = float(self._distribution["movingAverage"]["total"])
historicalLikelihoods = self._distribution["historicalLikelihoods"]
pHistLikelihoods = proto.distribution.init("historicalLikelihoods",
len(historicalLikelihoods))
for i, likelihood in enumerate(historicalLikelihoods):
pHistLikelihoods[i] = float(likelihood)
proto.probationaryPeriod = self._probationaryPeriod
proto.learningPeriod = self._learningPeriod
proto.reestimationPeriod = self._reestimationPeriod
proto.historicWindowSize = self._historicalScores.maxlen
def anomalyProbability(self, value, anomalyScore, timestamp=None):
"""
Compute the probability that the current value plus anomaly score represents
an anomaly given the historical distribution of anomaly scores. The closer
the number is to 1, the higher the chance it is an anomaly.
:param value: the current metric ("raw") input value, eg. "orange", or
'21.2' (deg. Celsius), ...
:param anomalyScore: the current anomaly score
:param timestamp: [optional] timestamp of the ocurrence,
default (None) results in using iteration step.
:returns: the anomalyLikelihood for this record.
"""
if timestamp is None:
timestamp = self._iteration
dataPoint = (timestamp, value, anomalyScore)
# We ignore the first probationaryPeriod data points
if self._iteration < self._probationaryPeriod:
likelihood = 0.5
else:
# On a rolling basis we re-estimate the distribution
if ( (self._distribution is None) or
(self._iteration % self._reestimationPeriod == 0) ):
numSkipRecords = self._calcSkipRecords(
numIngested=self._iteration,
windowSize=self._historicalScores.maxlen,
learningPeriod=self._learningPeriod)
_, _, self._distribution = estimateAnomalyLikelihoods(
self._historicalScores,
skipRecords=numSkipRecords)
likelihoods, _, self._distribution = updateAnomalyLikelihoods(
[dataPoint],
self._distribution)
likelihood = 1.0 - likelihoods[0]
# Before we exit update historical scores and iteration
self._historicalScores.append(dataPoint)
self._iteration += 1
return likelihood
def estimateAnomalyLikelihoods(anomalyScores,
averagingWindow=10,
skipRecords=0,
verbosity=0):
"""
Given a series of anomaly scores, compute the likelihood for each score. This
function should be called once on a bunch of historical anomaly scores for an
initial estimate of the distribution. It should be called again every so often
(say every 50 records) to update the estimate.
:param anomalyScores: a list of records. Each record is a list with the
following three elements: [timestamp, value, score]
Example::
[datetime.datetime(2013, 8, 10, 23, 0), 6.0, 1.0]
For best results, the list should be between 1000
and 10,000 records
:param averagingWindow: integer number of records to average over
:param skipRecords: integer specifying number of records to skip when
estimating distributions. If skip records are >=
len(anomalyScores), a very broad distribution is returned
that makes everything pretty likely.
:param verbosity: integer controlling extent of printouts for debugging
0 = none
1 = occasional information
2 = print every record
:returns: 3-tuple consisting of:
- likelihoods
numpy array of likelihoods, one for each aggregated point
- avgRecordList
list of averaged input records
- params
a small JSON dict that contains the state of the estimator
"""
if verbosity > 1:
print("In estimateAnomalyLikelihoods.")
print("Number of anomaly scores:", len(anomalyScores))
print("Skip records=", skipRecords)
print("First 20:", anomalyScores[0:min(20, len(anomalyScores))])
if len(anomalyScores) == 0:
raise ValueError("Must have at least one anomalyScore")
# Compute averaged anomaly scores
aggRecordList, historicalValues, total = _anomalyScoreMovingAverage(
anomalyScores,
windowSize = averagingWindow,
verbosity = verbosity)
s = [r[2] for r in aggRecordList]
dataValues = numpy.array(s)
# Estimate the distribution of anomaly scores based on aggregated records
if len(aggRecordList) <= skipRecords:
distributionParams = nullDistribution(verbosity = verbosity)
else:
distributionParams = estimateNormal(dataValues[skipRecords:])
# HACK ALERT! The HTMPredictionModel currently does not handle constant
# metric values very well (time of day encoder changes sometimes lead to
# unstable SDR's even though the metric is constant). Until this is
# resolved, we explicitly detect and handle completely flat metric values by
# reporting them as not anomalous.
s = [r[1] for r in aggRecordList]
# Only do this if the values are numeric
if all([isinstance(r[1], numbers.Number) for r in aggRecordList]):
metricValues = numpy.array(s)
metricDistribution = estimateNormal(metricValues[skipRecords:],
performLowerBoundCheck=False)
if metricDistribution["variance"] < 1.5e-5:
distributionParams = nullDistribution(verbosity = verbosity)
# Estimate likelihoods based on this distribution
likelihoods = numpy.array(dataValues, dtype=float)
for i, s in enumerate(dataValues):
likelihoods[i] = tailProbability(s, distributionParams)
# Filter likelihood values
filteredLikelihoods = numpy.array(
_filterLikelihoods(likelihoods) )
params = {
"distribution": distributionParams,
"movingAverage": {
"historicalValues": historicalValues,
"total": total,
"windowSize": averagingWindow,
},
"historicalLikelihoods":
list(likelihoods[-min(averagingWindow, len(likelihoods)):]),
}
if verbosity > 1:
print("Discovered params=")
print(params)
print("Number of likelihoods:", len(likelihoods))
print("First 20 likelihoods:", (
filteredLikelihoods[0:min(20, len(filteredLikelihoods))] ))
print("leaving estimateAnomalyLikelihoods")
return (filteredLikelihoods, aggRecordList, params)
def updateAnomalyLikelihoods(anomalyScores,
params,
verbosity=0):
"""
Compute updated probabilities for anomalyScores using the given params.
:param anomalyScores: a list of records. Each record is a list with the
following three elements: [timestamp, value, score]
Example::
[datetime.datetime(2013, 8, 10, 23, 0), 6.0, 1.0]
:param params: the JSON dict returned by estimateAnomalyLikelihoods
:param verbosity: integer controlling extent of printouts for debugging
:type verbosity: int
:returns: 3-tuple consisting of:
- likelihoods
numpy array of likelihoods, one for each aggregated point
- avgRecordList
list of averaged input records
- params
an updated JSON object containing the state of this metric.
"""
if verbosity > 3:
print("In updateAnomalyLikelihoods.")
print("Number of anomaly scores:", len(anomalyScores))
print("First 20:", anomalyScores[0:min(20, len(anomalyScores))])
print("Params:", params)
if len(anomalyScores) == 0:
raise ValueError("Must have at least one anomalyScore")
if not isValidEstimatorParams(params):
raise ValueError("'params' is not a valid params structure")
# For backward compatibility.
if "historicalLikelihoods" not in params:
params["historicalLikelihoods"] = [1.0]
# Compute moving averages of these new scores using the previous values
# as well as likelihood for these scores using the old estimator
historicalValues = params["movingAverage"]["historicalValues"]
total = params["movingAverage"]["total"]
windowSize = params["movingAverage"]["windowSize"]
aggRecordList = numpy.zeros(len(anomalyScores), dtype=float)
likelihoods = numpy.zeros(len(anomalyScores), dtype=float)
for i, v in enumerate(anomalyScores):
newAverage, historicalValues, total = (
MovingAverage.compute(historicalValues, total, v[2], windowSize)
)
aggRecordList[i] = newAverage
likelihoods[i] = tailProbability(newAverage, params["distribution"])
# Filter the likelihood values. First we prepend the historical likelihoods
# to the current set. Then we filter the values. We peel off the likelihoods
# to return and the last windowSize values to store for later.
likelihoods2 = params["historicalLikelihoods"] + list(likelihoods)
filteredLikelihoods = _filterLikelihoods(likelihoods2)
likelihoods[:] = filteredLikelihoods[-len(likelihoods):]
historicalLikelihoods = likelihoods2[-min(windowSize, len(likelihoods2)):]
# Update the estimator
newParams = {
"distribution": params["distribution"],
"movingAverage": {
"historicalValues": historicalValues,
"total": total,
"windowSize": windowSize,
},
"historicalLikelihoods": historicalLikelihoods,
}
assert len(newParams["historicalLikelihoods"]) <= windowSize
if verbosity > 3:
print("Number of likelihoods:", len(likelihoods))
print("First 20 likelihoods:", likelihoods[0:min(20, len(likelihoods))])
print("Leaving updateAnomalyLikelihoods.")
return (likelihoods, aggRecordList, newParams)
def _filterLikelihoods(likelihoods,
redThreshold=0.99999, yellowThreshold=0.999):
"""
Filter the list of raw (pre-filtered) likelihoods so that we only preserve
sharp increases in likelihood. 'likelihoods' can be a numpy array of floats or
a list of floats.
:returns: A new list of floats likelihoods containing the filtered values.
"""
redThreshold = 1.0 - redThreshold
yellowThreshold = 1.0 - yellowThreshold
# The first value is untouched
filteredLikelihoods = [likelihoods[0]]
for i, v in enumerate(likelihoods[1:]):
if v <= redThreshold:
# Value is in the redzone
if likelihoods[i] > redThreshold:
# Previous value is not in redzone, so leave as-is
filteredLikelihoods.append(v)
else:
filteredLikelihoods.append(yellowThreshold)
else:
# Value is below the redzone, so leave as-is
filteredLikelihoods.append(v)
return filteredLikelihoods
def _anomalyScoreMovingAverage(anomalyScores,
windowSize=10,
verbosity=0,
):
"""
Given a list of anomaly scores return a list of averaged records.
anomalyScores is assumed to be a list of records of the form:
[datetime.datetime(2013, 8, 10, 23, 0), 6.0, 1.0]
Each record in the returned list list contains:
[datetime, value, averagedScore]
*Note:* we only average the anomaly score.
"""
historicalValues = []
total = 0.0
averagedRecordList = [] # Aggregated records
for record in anomalyScores:
# Skip (but log) records without correct number of entries
if not isinstance(record, (list, tuple)) or len(record) != 3:
if verbosity >= 1:
print("Malformed record:", record)
continue
avg, historicalValues, total = (
MovingAverage.compute(historicalValues, total, record[2], windowSize)
)
averagedRecordList.append( [record[0], record[1], avg] )
if verbosity > 2:
print("Aggregating input record:", record)
print("Result:", [record[0], record[1], avg])
return averagedRecordList, historicalValues, total
def estimateNormal(sampleData, performLowerBoundCheck=True):
"""
:param sampleData:
:type sampleData: Numpy array.
:param performLowerBoundCheck:
:type performLowerBoundCheck: bool
:returns: A dict containing the parameters of a normal distribution based on
the ``sampleData``.
"""
params = {
"name": "normal",
"mean": numpy.mean(sampleData),
"variance": numpy.var(sampleData),
}
if performLowerBoundCheck:
# Handle edge case of almost no deviations and super low anomaly scores. We
# find that such low anomaly means can happen, but then the slightest blip
# of anomaly score can cause the likelihood to jump up to red.
if params["mean"] < 0.03:
params["mean"] = 0.03
# Catch all for super low variance to handle numerical precision issues
if params["variance"] < 0.0003:
params["variance"] = 0.0003
# Compute standard deviation
if params["variance"] > 0:
params["stdev"] = math.sqrt(params["variance"])
else:
params["stdev"] = 0
return params
def nullDistribution(verbosity=0):
"""
:param verbosity: integer controlling extent of printouts for debugging
:type verbosity: int
:returns: A distribution that is very broad and makes every anomaly score
between 0 and 1 pretty likely.
"""
if verbosity>0:
print("Returning nullDistribution")
return {
"name": "normal",
"mean": 0.5,
"variance": 1e6,
"stdev": 1e3,
}
def tailProbability(x, distributionParams):
"""
Given the normal distribution specified by the mean and standard deviation
in distributionParams, return the probability of getting samples further
from the mean. For values above the mean, this is the probability of getting
samples > x and for values below the mean, the probability of getting
samples < x. This is the Q-function: the tail probability of the normal distribution.
:param distributionParams: dict with 'mean' and 'stdev' of the distribution
"""
if "mean" not in distributionParams or "stdev" not in distributionParams:
raise RuntimeError("Insufficient parameters to specify the distribution.")
if x < distributionParams["mean"]:
# Gaussian is symmetrical around mean, so flip to get the tail probability
xp = 2 * distributionParams["mean"] - x
return tailProbability(xp, distributionParams)
# Calculate the Q function with the complementary error function, explained
# here: http://www.gaussianwaves.com/2012/07/q-function-and-error-functions
z = (x - distributionParams["mean"]) / distributionParams["stdev"]
return 0.5 * math.erfc(z/1.4142)
def isValidEstimatorParams(p):
"""
:returns: ``True`` if ``p`` is a valid estimator params as might be returned
by ``estimateAnomalyLikelihoods()`` or ``updateAnomalyLikelihoods``,
``False`` otherwise. Just does some basic validation.
"""
if not isinstance(p, dict):
return False
if "distribution" not in p:
return False
if "movingAverage" not in p:
return False
dist = p["distribution"]
if not ("mean" in dist and "name" in dist
and "variance" in dist and "stdev" in dist):
return False
return True
| 28,597 | Python | .py | 601 | 41.02995 | 114 | 0.707906 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,917 | utils.py | numenta_nupic-legacy/src/nupic/algorithms/utils.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy
def importAndRunFunction(
path,
moduleName,
funcName,
**keywords
):
"""
Run a named function specified by a filesystem path, module name
and function name.
Returns the value returned by the imported function.
Use this when access is needed to code that has
not been added to a package accessible from the ordinary Python
path. Encapsulates the multiple lines usually needed to
safely manipulate and restore the Python path.
Parameters
----------
path: filesystem path
Path to the directory where the desired module is stored.
This will be used to temporarily augment the Python path.
moduleName: basestring
Name of the module, without trailing extension, where the desired
function is stored. This module should be in the directory specified
with path.
funcName: basestring
Name of the function to import and call.
keywords:
Keyword arguments to be passed to the imported function.
"""
import sys
originalPath = sys.path
try:
augmentedPath = [path] + sys.path
sys.path = augmentedPath
func = getattr(__import__(moduleName, fromlist=[funcName]), funcName)
sys.path = originalPath
except:
# Restore the original path in case of an exception.
sys.path = originalPath
raise
return func(**keywords)
def getLockedHandle(runtimeElement, expression):
"""
Calls runtimeElement.interpret(expression) and wraps the result
in a call to nupic.bindings.research.lockHandle().
"""
fullExpression = '__import__("nupic.bindings.research", ' \
'fromlist=["lockHandle"]).lockHandle( ' + expression + ' )'
return runtimeElement.interpret(fullExpression)
def transferCoincidences(network, fromElementName, toElementName):
"""
Gets the coincidence matrix from one element and sets it on
another element
(using locked handles, a la nupic.bindings.research.lockHandle).
TODO: Generalize to more node types, parameter name pairs, etc.
Does not work across processes.
"""
coincidenceHandle = getLockedHandle(
runtimeElement=network.getElement(fromElementName),
# TODO: Re-purpose for use with nodes other than PMXClassifierNode.
expression="self._cd._W"
)
network.getElement(toElementName).setParameter("coincidencesAbove",
coincidenceHandle)
# print network.getElement(toElementName).interpret(
# "self._inferenceEngines[0]._coincidences")
####################################################################
# Support code for matching named algorithms with code. #
####################################################################
class DynamicImport(object):
def __init__(self, moduleName, className):
self.moduleName = moduleName
self.className = className
def __call__(self, **keywords):
module = __import__(self.moduleName, fromlist=[self.className])
factory = getattr(module, self.className)
return factory(**keywords)
class DynamicGroupingFunction(object):
def __init__(self,
moduleName,
funcName,
learningKeys=None,
):
self.moduleName = moduleName
self.funcName = funcName
self.learningKeys = learningKeys if learningKeys is not None else []
def __call__(self,
learning,
**keywords
):
module = __import__(self.moduleName, fromlist=[self.funcName])
function = getattr(module, self.funcName)
# Re-map names.
if isinstance(self.learningKeys, dict): # Safer check?
remapped = dict((k, learning[j]) for j, k in self.learningKeys.iteritems()
if j in learning)
else: # Just collect arguments.
remapped = dict((j, learning[j]) for j in self.learningKeys
if j in learning)
return GroupingFunction(function, learning=remapped, grouping=keywords)
class GroupingFunction(object):
def __init__(self, function, learning, grouping):
self.function = function
args = dict(grouping)
for k in learning:
assert k not in args
args.update(learning)
self.args = args
def group(self, model):
# combined = dict(self.args) # Shallow copy.
# for k in keywords:
# assert k not in combined
# combined.update(keywords)
# return self.function(**combined)
return self.function(model=model, **self.args)
####################################################################
# Printing and visualization. #
####################################################################
def nz(x):
from nupic.network import NodeSetStream
y = NodeSetStream()
for i in x.nonzero()[0]: y.insert(i)
return y.getSet()
def printStatesWithTitles(ts):
lw = numpy.get_printoptions()["linewidth"]
numpy.set_printoptions(linewidth=100000)
titles = [t for t, s in ts]
ml = max(len(t) for t in titles)
titles = [((" " * (ml-len(t))) + t) for t in titles]
combined = numpy.vstack([s for t, s in ts])
maxes = combined.max(1)
s = str(combined)
numpy.set_printoptions(linewidth=lw)
print "\n".join(("%s %s %f" % (t, l, m)) for t, l, m in
zip(titles, s.splitlines(), maxes))
def _viewTAM(tam, nsp):
from nupic.support.learning import printTAM
if 0:
printTAM(tam, nsp=nsp, precision=2)
else:
tamView = tam.__class__()
tamView.copy(tam)
tamView.threshold(0.1 * tam.max()[2])
printTAM(tamView, childBoundary=nsp, precision=1)
| 6,347 | Python | .py | 164 | 34.908537 | 80 | 0.671552 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,918 | __init__.py | numenta_nupic-legacy/src/nupic/algorithms/__init__.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
The nupic.algorithms package contains Python implementations of Numenta
algorithms that are used inside Numenta-supplied regions.
"""
from nupic.bindings.math import NearestNeighbor
from nupic.bindings.algorithms import svm_01, svm_dense
| 1,220 | Python | .py | 26 | 45.846154 | 72 | 0.701342 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,919 | sdr_classifier_diff.py | numenta_nupic-legacy/src/nupic/algorithms/sdr_classifier_diff.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""SDR classifier diff tool.
This class can be used just like versions of the SDR classifier but internally
creates instances of each SDR classifier. Each record is fed to both
classifiers and the results are checked for differences.
"""
import numbers
from nupic.algorithms.sdr_classifier import SDRClassifier
from nupic.bindings.algorithms import SDRClassifier as SDRClassifierCpp
CALLS_PER_SERIALIZE = 100
class SDRClassifierDiff(object):
"""Classifier-like object that diffs the output from different classifiers.
Instances of each version of the SDR classifier are created and each call to
compute is passed to each version of the classifier. The results are diffed
to make sure the there are no differences.
Optionally, the classifiers can be serialized and deserialized after a
specified number of calls to compute to ensure that serialization does not
cause discrepencies between the results.
TODO: Check internal state as well.
TODO: Provide option to write output to a file.
TODO: Provide record differences without throwing an exception.
"""
__VERSION__ = 'SDRClassifierDiffV1'
def __init__(self, steps=(1,), alpha=0.001, actValueAlpha=0.3, verbosity=0,
callsPerSerialize=CALLS_PER_SERIALIZE):
self._sdrClassifier = SDRClassifier(steps, alpha, actValueAlpha, verbosity)
self._sdrClassifierCpp = SDRClassifierCpp(steps, alpha, actValueAlpha,
verbosity)
self._calls = 0
self._callsPerSerialize = callsPerSerialize
def compute(self, recordNum, patternNZ, classification, learn, infer):
result1 = self._sdrClassifier.compute(recordNum, patternNZ, classification,
learn, infer)
result2 = self._sdrClassifierCpp.compute(recordNum, patternNZ,
classification, learn, infer)
self._calls += 1
# Check if it is time to serialize and deserialize.
if self._calls % self._callsPerSerialize == 0:
schemaPy = self._sdrClassifier.getSchema()
protoPy = schemaPy.new_message()
self._sdrClassifier.write(protoPy)
protoPy = schemaPy.from_bytes(protoPy.to_bytes())
self._sdrClassifier = SDRClassifier.read(protoPy)
schemaCpp = self._sdrClassifierCpp.getSchema()
protoCpp = schemaCpp.new_message()
self._sdrClassifierCpp.write(protoCpp)
protoCpp = schemaCpp.from_bytes(protoCpp.to_bytes())
self._sdrClassifierCpp = SDRClassifierCpp.read(protoCpp)
# Assert both results are the same type.
assert type(result1) == type(result2)
# Assert that the keys match.
assert set(result1.keys()) == set(result2.keys()), "diff detected: " \
"py result=%s, C++ result=%s" % (result1, result2)
# Assert that the values match.
for k, l in result1.iteritems():
assert type(l) == type(result2[k])
for i in xrange(len(l)):
if isinstance(classification['actValue'], numbers.Real):
assert abs(float(l[i]) - float(result2[k][i])) < 0.0000001, (
'Python SDRClassifier has value %f and C++ SDRClassifierCpp has '
'value %f.' % (l[i], result2[k][i]))
else:
assert l[i] == result2[k][i], (
'Python SDRClassifier has value %s and C++ SDRClassifierCpp has '
'value %s.' % (str(l[i]), str(result2[k][i])))
return result1
| 4,393 | Python | .py | 85 | 45.588235 | 79 | 0.685168 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,920 | fdrutilities.py | numenta_nupic-legacy/src/nupic/algorithms/fdrutilities.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy
from numpy import *
import random
import copy
import itertools
random.seed(42)
numpy.random.seed(42)
from nupic.bindings.math import (SM32, SparseBinaryMatrix)
def setRandomSeed(seed):
""" Set the random seeds. Helpful to make unit tests repeatable"""
random.seed(seed)
numpy.random.seed(seed)
def addNoise(input, noise=0.1, doForeground=True, doBackground=True):
"""
Add noise to the given input.
Parameters:
-----------------------------------------------
input: the input to add noise to
noise: how much noise to add
doForeground: If true, turn off some of the 1 bits in the input
doBackground: If true, turn on some of the 0 bits in the input
"""
if doForeground and doBackground:
return numpy.abs(input - (numpy.random.random(input.shape) < noise))
else:
if doForeground:
return numpy.logical_and(input, numpy.random.random(input.shape) > noise)
if doBackground:
return numpy.logical_or(input, numpy.random.random(input.shape) < noise)
return input
def generateCoincMatrix(nCoinc=10, length=500, activity=50):
"""
Generate a coincidence matrix. This is used to generate random inputs to the
temporal learner and to compare the predicted output against.
It generates a matrix of nCoinc rows, each row has length 'length' and has
a total of 'activity' bits on.
Parameters:
-----------------------------------------------
nCoinc: the number of rows to generate
length: the length of each row
activity: the number of ones to put into each row.
"""
coincMatrix0 = SM32(int(nCoinc), int(length))
theOnes = numpy.array([1.0] * activity, dtype=numpy.float32)
for rowIdx in xrange(nCoinc):
coinc = numpy.array(random.sample(xrange(length),
activity), dtype=numpy.uint32)
coinc.sort()
coincMatrix0.setRowFromSparse(rowIdx, coinc, theOnes)
# This is the right code to use, it's faster, but it derails the unit
# testing of the pooling for now.
coincMatrix = SM32(int(nCoinc), int(length))
coincMatrix.initializeWithFixedNNZR(activity)
return coincMatrix0
def generateVectors(numVectors=100, length=500, activity=50):
"""
Generate a list of random sparse distributed vectors. This is used to generate
training vectors to the spatial or temporal learner and to compare the predicted
output against.
It generates a list of 'numVectors' elements, each element has length 'length'
and has a total of 'activity' bits on.
Parameters:
-----------------------------------------------
numVectors: the number of vectors to generate
length: the length of each row
activity: the number of ones to put into each row.
"""
vectors = []
coinc = numpy.zeros(length, dtype='int32')
indexList = range(length)
for i in xrange(numVectors):
coinc[:] = 0
coinc[random.sample(indexList, activity)] = 1
vectors.append(coinc.copy())
return vectors
def generateSimpleSequences(nCoinc=10, seqLength=[5,6,7], nSeq=100):
"""
Generate a set of simple sequences. The elements of the sequences will be
integers from 0 to 'nCoinc'-1. The length of each sequence will be
randomly chosen from the 'seqLength' list.
Parameters:
-----------------------------------------------
nCoinc: the number of elements available to use in the sequences
seqLength: a list of possible sequence lengths. The length of each
sequence will be randomly chosen from here.
nSeq: The number of sequences to generate
retval: a list of sequences. Each sequence is itself a list
containing the coincidence indices for that sequence.
"""
coincList = range(nCoinc)
seqList = []
for i in xrange(nSeq):
if max(seqLength) <= nCoinc:
seqList.append(random.sample(coincList, random.choice(seqLength)))
else:
len = random.choice(seqLength)
seq = []
for x in xrange(len):
seq.append(random.choice(coincList))
seqList.append(seq)
return seqList
def generateHubSequences(nCoinc=10, hubs = [2,6], seqLength=[5,6,7], nSeq=100):
"""
Generate a set of hub sequences. These are sequences which contain a hub
element in the middle. The elements of the sequences will be integers
from 0 to 'nCoinc'-1. The hub elements will only appear in the middle of
each sequence. The length of each sequence will be randomly chosen from the
'seqLength' list.
Parameters:
-----------------------------------------------
nCoinc: the number of elements available to use in the sequences
hubs: which of the elements will be used as hubs.
seqLength: a list of possible sequence lengths. The length of each
sequence will be randomly chosen from here.
nSeq: The number of sequences to generate
retval: a list of sequences. Each sequence is itself a list
containing the coincidence indices for that sequence.
"""
coincList = range(nCoinc)
for hub in hubs:
coincList.remove(hub)
seqList = []
for i in xrange(nSeq):
length = random.choice(seqLength)-1
seq = random.sample(coincList,length)
seq.insert(length//2, random.choice(hubs))
seqList.append(seq)
return seqList
def genTestSeqsForLookback(nPatterns=10, patternLen=500, patternActivity=50,
seqLength=[5,6,7], nSequences=50):
"""
Generate two sets of sequences. The first set of sequences is used to train
the sequence learner till it fills up capacity. The second set is then used
to further train the system to test its generalization capability using the
one step look back idea. The second set of sequences are generated by modifying
the first set
Parameters:
-----------------------------------------------
nPatterns: the number of patterns to use in the sequences.
patternLen: The number of elements in each pattern
patternActivity: The number of elements that should be active in
each pattern
seqLength: a list of possible sequence lengths. The length of each
sequence will be randomly chosen from here.
nSequences: The number of simple sequences in the first set
retval: (seqList1, seqList2, patterns)
seqList1, seqList2: a list of sequences. Each sequence is itself a list
containing the input pattern indices for that sequence.
patterns: the input patterns used in the seqList.
"""
# Create the input patterns
patterns = generateCoincMatrix(nCoinc=nPatterns, length=patternLen,
activity=patternActivity)
#patterns = generateSimpleCoincMatrix(nCoinc=nPatterns, length=patternLen,
# activity=patternActivity)
similarity = []
for i in xrange(nPatterns):
similarity.append(patterns.rightVecProd(patterns.getRow(i)))
similarity = numpy.array(similarity, dtype='int32')
print similarity
# Create the raw sequences
seqList1 = generateSimpleSequences(nCoinc=nPatterns, seqLength=seqLength,
nSeq=nSequences)
#The second set of sequences are obtained by replacing just the first
#element in each sequence.
seqList2 = copy.deepcopy(seqList1)
for i in range(0,len(seqList2)):
seqList2[i][0] = random.randint(0,nPatterns-1)
#return ([range(6),[5,4,1,3,4]],[[7,1,2,3,4,5]],patterns)
return (seqList1, seqList2, patterns)
def generateSimpleCoincMatrix(nCoinc=10, length=500, activity=50):
"""
Generate a non overlapping coincidence matrix. This is used to generate random
inputs to the temporal learner and to compare the predicted output against.
It generates a matrix of nCoinc rows, each row has length 'length' and has
a total of 'activity' bits on.
Parameters:
-----------------------------------------------
nCoinc: the number of rows to generate
length: the length of each row
activity: the number of ones to put into each row.
"""
assert nCoinc*activity<=length, "can't generate non-overlapping coincidences"
coincMatrix = SM32(0, length)
coinc = numpy.zeros(length, dtype='int32')
for i in xrange(nCoinc):
coinc[:] = 0
coinc[i*activity:(i+1)*activity] = 1
coincMatrix.addRow(coinc)
return coincMatrix
def generateSequences(nPatterns=10, patternLen=500, patternActivity=50,
hubs=[2,6], seqLength=[5,6,7],
nSimpleSequences=50, nHubSequences=50):
"""
Generate a set of simple and hub sequences. A simple sequence contains
a randomly chosen set of elements from 0 to 'nCoinc-1'. A hub sequence
always contains a hub element in the middle of it.
Parameters:
-----------------------------------------------
nPatterns: the number of patterns to use in the sequences.
patternLen: The number of elements in each pattern
patternActivity: The number of elements that should be active in
each pattern
hubs: which of the elements will be used as hubs.
seqLength: a list of possible sequence lengths. The length of each
sequence will be randomly chosen from here.
nSimpleSequences: The number of simple sequences to generate
nHubSequences: The number of hub sequences to generate
retval: (seqList, patterns)
seqList: a list of sequences. Each sequence is itself a list
containing the input pattern indices for that sequence.
patterns: the input patterns used in the seqList.
"""
# Create the input patterns
patterns = generateCoincMatrix(nCoinc=nPatterns, length=patternLen,
activity=patternActivity)
# Create the raw sequences
seqList = generateSimpleSequences(nCoinc=nPatterns, seqLength=seqLength,
nSeq=nSimpleSequences) + \
generateHubSequences(nCoinc=nPatterns, hubs=hubs, seqLength=seqLength,
nSeq=nHubSequences)
# Return results
return (seqList, patterns)
def generateL2Sequences(nL1Patterns=10, l1Hubs=[2,6], l1SeqLength=[5,6,7],
nL1SimpleSequences=50, nL1HubSequences=50,
l1Pooling=4, perfectStability=False, spHysteresisFactor=1.0,
patternLen=500, patternActivity=50):
"""
Generate the simulated output from a spatial pooler that's sitting
on top of another spatial pooler / temporal memory pair. The average on-time
of the outputs from the simulated TM is given by the l1Pooling argument.
In this routine, L1 refers to the first spatial and temporal memory and L2
refers to the spatial pooler above that.
Parameters:
-----------------------------------------------
nL1Patterns: the number of patterns to use in the L1 sequences.
l1Hubs: which of the elements will be used as hubs.
l1SeqLength: a list of possible sequence lengths. The length of each
sequence will be randomly chosen from here.
nL1SimpleSequences: The number of simple sequences to generate for L1
nL1HubSequences: The number of hub sequences to generate for L1
l1Pooling: The number of time steps to pool over in the L1 temporal
pooler
perfectStability: If true, then the input patterns represented by the
sequences generated will have perfect stability over
l1Pooling time steps. This is the best case ideal input
to a TM. In actual situations, with an actual SP
providing input, the stability will always be less than
this.
spHystereisFactor: The hysteresisFactor to use in the L2 spatial pooler.
Only used when perfectStability is False
patternLen: The number of elements in each pattern output by L2
patternActivity: The number of elements that should be active in
each pattern
@retval: (seqList, patterns)
seqList: a list of sequences output from L2. Each sequence is
itself a list containing the input pattern indices for that
sequence.
patterns: the input patterns used in the L2 seqList.
"""
# First, generate the L1 sequences
l1SeqList = generateSimpleSequences(nCoinc=nL1Patterns, seqLength=l1SeqLength,
nSeq=nL1SimpleSequences) + \
generateHubSequences(nCoinc=nL1Patterns, hubs=l1Hubs,
seqLength=l1SeqLength, nSeq=nL1HubSequences)
# Generate the L2 SP output from those
spOutput = generateSlowSPOutput(seqListBelow = l1SeqList,
poolingTimeBelow=l1Pooling, outputWidth=patternLen,
activity=patternActivity, perfectStability=perfectStability,
spHysteresisFactor=spHysteresisFactor)
# Map the spOutput patterns into indices into a pattern matrix which we
# generate now.
outSeq = None
outSeqList = []
outPatterns = SM32(0, patternLen)
for pattern in spOutput:
# If we have a reset vector start a new sequence
if pattern.sum() == 0:
if outSeq is not None:
outSeqList.append(outSeq)
outSeq = []
continue
# See if this vector matches a pattern we've already seen before
patternIdx = None
if outPatterns.nRows() > 0:
# Find most matching 1's.
matches = outPatterns.rightVecSumAtNZ(pattern)
outCoinc = matches.argmax().astype('uint32')
# See if its number of 1's is the same in the pattern and in the
# coincidence row. If so, it is an exact match
numOnes = pattern.sum()
if matches[outCoinc] == numOnes \
and outPatterns.getRow(int(outCoinc)).sum() == numOnes:
patternIdx = outCoinc
# If no match, add this pattern to our matrix
if patternIdx is None:
outPatterns.addRow(pattern)
patternIdx = outPatterns.nRows() - 1
# Store the pattern index into the sequence
outSeq.append(patternIdx)
# Put in last finished sequence
if outSeq is not None:
outSeqList.append(outSeq)
# Return with the seqList and patterns matrix
return (outSeqList, outPatterns)
def vectorsFromSeqList(seqList, patternMatrix):
"""
Convert a list of sequences of pattern indices, and a pattern lookup table
into a an array of patterns
Parameters:
-----------------------------------------------
seq: the sequence, given as indices into the patternMatrix
patternMatrix: a SparseMatrix contaning the possible patterns used in
the sequence.
"""
totalLen = 0
for seq in seqList:
totalLen += len(seq)
vectors = numpy.zeros((totalLen, patternMatrix.shape[1]), dtype='bool')
vecOffset = 0
for seq in seqList:
seq = numpy.array(seq, dtype='uint32')
for idx,coinc in enumerate(seq):
vectors[vecOffset] = patternMatrix.getRow(int(coinc))
vecOffset += 1
return vectors
###############################################################################
# The following three functions are used in tests to compare two different
# TM instances.
def sameTMParams(tp1, tp2):
"""Given two TM instances, see if any parameters are different."""
result = True
for param in ["numberOfCols", "cellsPerColumn", "initialPerm", "connectedPerm",
"minThreshold", "newSynapseCount", "permanenceInc", "permanenceDec",
"permanenceMax", "globalDecay", "activationThreshold",
"doPooling", "segUpdateValidDuration",
"burnIn", "pamLength", "maxAge"]:
if getattr(tp1, param) != getattr(tp2,param):
print param,"is different"
print getattr(tp1, param), "vs", getattr(tp2,param)
result = False
return result
def sameSynapse(syn, synapses):
"""Given a synapse and a list of synapses, check whether this synapse
exist in the list. A synapse is represented as [col, cell, permanence].
A synapse matches if col and cell are identical and the permanence value is
within 0.001."""
for s in synapses:
if (s[0]==syn[0]) and (s[1]==syn[1]) and (abs(s[2]-syn[2]) <= 0.001):
return True
return False
def sameSegment(seg1, seg2):
"""Return True if seg1 and seg2 are identical, ignoring order of synapses"""
result = True
# check sequence segment, total activations etc. In case any are floats,
# check that they are within 0.001.
for field in [1, 2, 3, 4, 5, 6]:
if abs(seg1[0][field] - seg2[0][field]) > 0.001:
result = False
# Compare number of synapses
if len(seg1[1:]) != len(seg2[1:]):
result = False
# Now compare synapses, ignoring order of synapses
for syn in seg2[1:]:
if syn[2] <= 0:
print "A synapse with zero permanence encountered"
result = False
if result == True:
for syn in seg1[1:]:
if syn[2] <= 0:
print "A synapse with zero permanence encountered"
result = False
res = sameSynapse(syn, seg2[1:])
if res == False:
result = False
return result
def tmDiff(tm1, tm2, verbosity = 0, relaxSegmentTests =True):
"""
Given two TM instances, list the difference between them and returns False
if there is a difference. This function checks the major parameters. If this
passes (and checkLearn is true) it checks the number of segments on
each cell. If this passes, checks each synapse on each segment.
When comparing C++ and Py, the segments are usually in different orders in the
cells. tmDiff ignores segment order when comparing TM's.
"""
# First check basic parameters. If we fail here, don't continue
if sameTMParams(tm1, tm2) == False:
print "Two TM's have different parameters"
return False
result = True
# Compare states at t first, they usually diverge before the structure of the
# cells starts diverging
if (tm1.activeState['t'] != tm2.activeState['t']).any():
print 'Active states diverge', numpy.where(tm1.activeState['t'] != tm2.activeState['t'])
result = False
if (tm1.predictedState['t'] - tm2.predictedState['t']).any():
print 'Predicted states diverge', numpy.where(tm1.predictedState['t'] != tm2.predictedState['t'])
result = False
# TODO: check confidence at T (confT)
# Now check some high level learned parameters.
if tm1.getNumSegments() != tm2.getNumSegments():
print "Number of segments are different", tm1.getNumSegments(), tm2.getNumSegments()
result = False
if tm1.getNumSynapses() != tm2.getNumSynapses():
print "Number of synapses are different", tm1.getNumSynapses(), tm2.getNumSynapses()
tm1.printCells()
tm2.printCells()
result = False
# Check that each cell has the same number of segments and synapses
for c in xrange(tm1.numberOfCols):
for i in xrange(tm2.cellsPerColumn):
if tm1.getNumSegmentsInCell(c, i) != tm2.getNumSegmentsInCell(c, i):
print "Num segments different in cell:",c,i,
print tm1.getNumSegmentsInCell(c, i), tm2.getNumSegmentsInCell(c, i)
result = False
# If the above tests pass, then check each segment and report differences
# Note that segments in tm1 can be in a different order than tm2. Here we
# make sure that, for each segment in tm1, there is an identical segment
# in tm2.
if result == True and not relaxSegmentTests:
for c in xrange(tm1.numberOfCols):
for i in xrange(tm2.cellsPerColumn):
nSegs = tm1.getNumSegmentsInCell(c, i)
for segIdx in xrange(nSegs):
tm1seg = tm1.getSegmentOnCell(c, i, segIdx)
# Loop through all segments in tm2seg and see if any of them match tm1seg
res = False
for tm2segIdx in xrange(nSegs):
tm2seg = tm2.getSegmentOnCell(c, i, tm2segIdx)
if sameSegment(tm1seg, tm2seg) == True:
res = True
break
if res == False:
print "\nSegments are different for cell:",c,i
if verbosity >= 1:
print "C++"
tm1.printCell(c, i)
print "Py"
tm2.printCell(c, i)
result = False
if result == True and (verbosity > 1):
print "TM's match"
return result
def tmDiff2(tm1, tm2, verbosity = 0, relaxSegmentTests =True,
checkLearn = True, checkStates = True):
"""
Given two TM instances, list the difference between them and returns False
if there is a difference. This function checks the major parameters. If this
passes (and checkLearn is true) it checks the number of segments on each cell.
If this passes, checks each synapse on each segment.
When comparing C++ and Py, the segments are usually in different orders in the
cells. tmDiff ignores segment order when comparing TM's.
If checkLearn is True, will check learn states as well as all the segments
If checkStates is True, will check the various state arrays
"""
# First check basic parameters. If we fail here, don't continue
if sameTMParams(tm1, tm2) == False:
print "Two TM's have different parameters"
return False
tm1Label = "<tm_1 (%s)>" % tm1.__class__.__name__
tm2Label = "<tm_2 (%s)>" % tm2.__class__.__name__
result = True
if checkStates:
# Compare states at t first, they usually diverge before the structure of the
# cells starts diverging
if (tm1.infActiveState['t'] != tm2.infActiveState['t']).any():
print 'Active states diverged', numpy.where(tm1.infActiveState['t'] != tm2.infActiveState['t'])
result = False
if (tm1.infPredictedState['t'] - tm2.infPredictedState['t']).any():
print 'Predicted states diverged', numpy.where(tm1.infPredictedState['t'] != tm2.infPredictedState['t'])
result = False
if checkLearn and (tm1.lrnActiveState['t'] - tm2.lrnActiveState['t']).any():
print 'lrnActiveState[t] diverged', numpy.where(tm1.lrnActiveState['t'] != tm2.lrnActiveState['t'])
result = False
if checkLearn and (tm1.lrnPredictedState['t'] - tm2.lrnPredictedState['t']).any():
print 'lrnPredictedState[t] diverged', numpy.where(tm1.lrnPredictedState['t'] != tm2.lrnPredictedState['t'])
result = False
if checkLearn and abs(tm1.getAvgLearnedSeqLength() - tm2.getAvgLearnedSeqLength()) > 0.01:
print "Average learned sequence lengths differ: ",
print tm1.getAvgLearnedSeqLength(), " vs ", tm2.getAvgLearnedSeqLength()
result = False
# TODO: check confidence at T (confT)
# Now check some high level learned parameters.
if tm1.getNumSegments() != tm2.getNumSegments():
print "Number of segments are different", tm1.getNumSegments(), tm2.getNumSegments()
result = False
if tm1.getNumSynapses() != tm2.getNumSynapses():
print "Number of synapses are different", tm1.getNumSynapses(), tm2.getNumSynapses()
if verbosity >= 3:
print "%s: " % tm1Label,
tm1.printCells()
print "\n%s : " % tm2Label,
tm2.printCells()
#result = False
# Check that each cell has the same number of segments and synapses
for c in xrange(tm1.numberOfCols):
for i in xrange(tm2.cellsPerColumn):
if tm1.getNumSegmentsInCell(c, i) != tm2.getNumSegmentsInCell(c, i):
print "Num segments different in cell:",c,i,
print tm1.getNumSegmentsInCell(c, i), tm2.getNumSegmentsInCell(c, i)
result = False
# If the above tests pass, then check each segment and report differences
# Note that segments in tm1 can be in a different order than tm2. Here we
# make sure that, for each segment in tm1, there is an identical segment
# in tm2.
if result == True and not relaxSegmentTests and checkLearn:
for c in xrange(tm1.numberOfCols):
for i in xrange(tm2.cellsPerColumn):
nSegs = tm1.getNumSegmentsInCell(c, i)
for segIdx in xrange(nSegs):
tm1seg = tm1.getSegmentOnCell(c, i, segIdx)
# Loop through all segments in tm2seg and see if any of them match tm1seg
res = False
for tm2segIdx in xrange(nSegs):
tm2seg = tm2.getSegmentOnCell(c, i, tm2segIdx)
if sameSegment(tm1seg, tm2seg) == True:
res = True
break
if res == False:
print "\nSegments are different for cell:",c,i
result = False
if verbosity >= 0:
print "%s : " % tm1Label,
tm1.printCell(c, i)
print "\n%s : " % tm2Label,
tm2.printCell(c, i)
if result == True and (verbosity > 1):
print "TM's match"
return result
def spDiff(SP1,SP2):
"""
Function that compares two spatial pooler instances. Compares the
static variables between the two poolers to make sure that they are equivalent.
Parameters
-----------------------------------------
SP1 first spatial pooler to be compared
SP2 second spatial pooler to be compared
To establish equality, this function does the following:
1.Compares the connected synapse matrices for each coincidence
2.Compare the potential synapse matrices for each coincidence
3.Compare the permanence matrices for each coincidence
4.Compare the firing boosts between the two poolers.
5.Compare the duty cycles before and after inhibition for both poolers
"""
if(len(SP1._masterConnectedM)!=len(SP2._masterConnectedM)):
print "Connected synapse matrices are different sizes"
return False
if(len(SP1._masterPotentialM)!=len(SP2._masterPotentialM)):
print "Potential synapse matrices are different sizes"
return False
if(len(SP1._masterPermanenceM)!=len(SP2._masterPermanenceM)):
print "Permanence matrices are different sizes"
return False
#iterate over cells
for i in range(0,len(SP1._masterConnectedM)):
#grab the Coincidence Matrices and compare them
connected1 = SP1._masterConnectedM[i]
connected2 = SP2._masterConnectedM[i]
if(connected1!=connected2):
print "Connected Matrices for cell %d different" % (i)
return False
#grab permanence Matrices and compare them
permanences1 = SP1._masterPermanenceM[i];
permanences2 = SP2._masterPermanenceM[i];
if(permanences1!=permanences2):
print "Permanence Matrices for cell %d different" % (i)
return False
#grab the potential connection Matrices and compare them
potential1 = SP1._masterPotentialM[i];
potential2 = SP2._masterPotentialM[i];
if(potential1!=potential2):
print "Potential Matrices for cell %d different" % (i)
return False
#Check firing boosts
if(not numpy.array_equal(SP1._firingBoostFactors,SP2._firingBoostFactors)):
print "Firing boost factors are different between spatial poolers"
return False
#Check duty cycles after inhibiton
if(not numpy.array_equal(SP1._dutyCycleAfterInh,SP2._dutyCycleAfterInh)):
print "Duty cycles after inhibition are different between spatial poolers"
return False
#Check duty cycles before inhibition
if(not numpy.array_equal(SP1._dutyCycleBeforeInh,SP2._dutyCycleBeforeInh)):
print "Duty cycles before inhibition are different between spatial poolers"
return False
print("Spatial Poolers are equivalent")
return True
def removeSeqStarts(vectors, resets, numSteps=1):
"""
Convert a list of sequences of pattern indices, and a pattern lookup table
into a an array of patterns
Parameters:
-----------------------------------------------
vectors: the data vectors. Row 0 contains the outputs from time
step 0, row 1 from time step 1, etc.
resets: the reset signal. This is a vector of booleans
the same length as the number of rows in 'vectors'. It
has a 1 where a sequence started and a 0 otherwise. The
first 'numSteps' rows of 'vectors' of each sequence will
not be included in the return result.
numSteps Number of samples to remove from the start of each sequence
retval: copy of vectors, with the first 'numSteps' samples at the
start of each sequence removed.
"""
# Do nothing if numSteps is 0
if numSteps == 0:
return vectors
resetIndices = resets.nonzero()[0]
removeRows = resetIndices
for i in range(numSteps-1):
removeRows = numpy.hstack((removeRows, resetIndices+i+1))
return numpy.delete(vectors, removeRows, axis=0)
def _accumulateFrequencyCounts(values, freqCounts=None):
"""
Accumulate a list of values 'values' into the frequency counts 'freqCounts',
and return the updated frequency counts
For example, if values contained the following: [1,1,3,5,1,3,5], and the initial
freqCounts was None, then the return value would be:
[0,3,0,2,0,2]
which corresponds to how many of each value we saw in the input, i.e. there
were 0 0's, 3 1's, 0 2's, 2 3's, 0 4's, and 2 5's.
If freqCounts is not None, the values will be added to the existing counts and
the length of the frequency Counts will be automatically extended as necessary
Parameters:
-----------------------------------------------
values: The values to accumulate into the frequency counts
freqCounts: Accumulated frequency counts so far, or none
"""
# How big does our freqCounts vector need to be?
values = numpy.array(values)
numEntries = values.max() + 1
if freqCounts is not None:
numEntries = max(numEntries, freqCounts.size)
# Where do we accumulate the results?
if freqCounts is not None:
if freqCounts.size != numEntries:
newCounts = numpy.zeros(numEntries, dtype='int32')
newCounts[0:freqCounts.size] = freqCounts
else:
newCounts = freqCounts
else:
newCounts = numpy.zeros(numEntries, dtype='int32')
# Accumulate the new values
for v in values:
newCounts[v] += 1
return newCounts
def _listOfOnTimesInVec(vector):
"""
Returns 3 things for a vector:
* the total on time
* the number of runs
* a list of the durations of each run.
Parameters:
-----------------------------------------------
input stream: 11100000001100000000011111100000
return value: (11, 3, [3, 2, 6])
"""
# init counters
durations = []
numOnTimes = 0
totalOnTime = 0
# Find where the nonzeros are
nonzeros = numpy.array(vector).nonzero()[0]
# Nothing to do if vector is empty
if len(nonzeros) == 0:
return (0, 0, [])
# Special case of only 1 on bit
if len(nonzeros) == 1:
return (1, 1, [1])
# Count the consecutive non-zeros
prev = nonzeros[0]
onTime = 1
endIdx = nonzeros[-1]
for idx in nonzeros[1:]:
if idx != prev+1:
totalOnTime += onTime
numOnTimes += 1
durations.append(onTime)
onTime = 1
else:
onTime += 1
prev = idx
# Add in the last one
totalOnTime += onTime
numOnTimes += 1
durations.append(onTime)
return (totalOnTime, numOnTimes, durations)
def _fillInOnTimes(vector, durations):
"""
Helper function used by averageOnTimePerTimestep. 'durations' is a vector
which must be the same len as vector. For each "on" in vector, it fills in
the corresponding element of duration with the duration of that "on" signal
up until that time
Parameters:
-----------------------------------------------
vector: vector of output values over time
durations: vector same length as 'vector', initialized to 0's.
This is filled in with the durations of each 'on" signal.
Example:
vector: 11100000001100000000011111100000
durations: 12300000001200000000012345600000
"""
# Find where the nonzeros are
nonzeros = numpy.array(vector).nonzero()[0]
# Nothing to do if vector is empty
if len(nonzeros) == 0:
return
# Special case of only 1 on bit
if len(nonzeros) == 1:
durations[nonzeros[0]] = 1
return
# Count the consecutive non-zeros
prev = nonzeros[0]
onTime = 1
onStartIdx = prev
endIdx = nonzeros[-1]
for idx in nonzeros[1:]:
if idx != prev+1:
# Fill in the durations
durations[onStartIdx:onStartIdx+onTime] = range(1,onTime+1)
onTime = 1
onStartIdx = idx
else:
onTime += 1
prev = idx
# Fill in the last one
durations[onStartIdx:onStartIdx+onTime] = range(1,onTime+1)
def averageOnTimePerTimestep(vectors, numSamples=None):
"""
Computes the average on-time of the outputs that are on at each time step, and
then averages this over all time steps.
This metric is resiliant to the number of outputs that are on at each time
step. That is, if time step 0 has many more outputs on than time step 100, it
won't skew the results. This is particularly useful when measuring the
average on-time of things like the temporal memory output where you might
have many columns bursting at the start of a sequence - you don't want those
start of sequence bursts to over-influence the calculated average on-time.
Parameters:
-----------------------------------------------
vectors: the vectors for which the onTime is calculated. Row 0
contains the outputs from time step 0, row 1 from time step
1, etc.
numSamples: the number of elements for which on-time is calculated.
If not specified, then all elements are looked at.
Returns (scalar average on-time over all time steps,
list containing frequency counts of each encountered on-time)
"""
# Special case given a 1 dimensional vector: it represents a single column
if vectors.ndim == 1:
vectors.shape = (-1,1)
numTimeSteps = len(vectors)
numElements = len(vectors[0])
# How many samples will we look at?
if numSamples is not None:
import pdb; pdb.set_trace() # Test this....
countOn = numpy.random.randint(0, numElements, numSamples)
vectors = vectors[:, countOn]
# Fill in each non-zero of vectors with the on-time that that output was
# on for.
durations = numpy.zeros(vectors.shape, dtype='int32')
for col in xrange(vectors.shape[1]):
_fillInOnTimes(vectors[:,col], durations[:,col])
# Compute the average on time for each time step
sums = vectors.sum(axis=1)
sums.clip(min=1, max=numpy.inf, out=sums)
avgDurations = durations.sum(axis=1, dtype='float64') / sums
avgOnTime = avgDurations.sum() / (avgDurations > 0).sum()
# Generate the frequency counts for each duration
freqCounts = _accumulateFrequencyCounts(avgDurations)
return (avgOnTime, freqCounts)
def averageOnTime(vectors, numSamples=None):
"""
Returns the average on-time, averaged over all on-time runs.
Parameters:
-----------------------------------------------
vectors: the vectors for which the onTime is calculated. Row 0
contains the outputs from time step 0, row 1 from time step
1, etc.
numSamples: the number of elements for which on-time is calculated.
If not specified, then all elements are looked at.
Returns: (scalar average on-time of all outputs,
list containing frequency counts of each encountered on-time)
"""
# Special case given a 1 dimensional vector: it represents a single column
if vectors.ndim == 1:
vectors.shape = (-1,1)
numTimeSteps = len(vectors)
numElements = len(vectors[0])
# How many samples will we look at?
if numSamples is None:
numSamples = numElements
countOn = range(numElements)
else:
countOn = numpy.random.randint(0, numElements, numSamples)
# Compute the on-times and accumulate the frequency counts of each on-time
# encountered
sumOfLengths = 0.0
onTimeFreqCounts = None
n = 0
for i in countOn:
(onTime, segments, durations) = _listOfOnTimesInVec(vectors[:,i])
if onTime != 0.0:
sumOfLengths += onTime
n += segments
onTimeFreqCounts = _accumulateFrequencyCounts(durations, onTimeFreqCounts)
# Return the average on time of each element that was on.
if n > 0:
return (sumOfLengths/n, onTimeFreqCounts)
else:
return (0.0, onTimeFreqCounts)
def plotOutputsOverTime(vectors, buVectors=None, title='On-times'):
"""
Generate a figure that shows each output over time. Time goes left to right,
and each output is plotted on a different line, allowing you to see the overlap
in the outputs, when they turn on/off, etc.
Parameters:
------------------------------------------------------------
vectors: the vectors to plot
buVectors: These are normally specified when plotting the pooling
outputs of the temporal memory over time. The 'buVectors'
are the sequence outputs and the 'vectors' are the
pooling outputs. The buVector (sequence) outputs will be drawn
in a darker color than the vector (pooling) outputs to
distinguish where the cell is outputting due to pooling vs.
sequence memory.
title: title for the plot
avgOnTime: The average on-time measurement. If not supplied,
then it will be calculated from the passed in vectors.
"""
# Produce the plot
import pylab
pylab.ion()
pylab.figure()
imData = vectors.transpose()
if buVectors is not None:
assert(buVectors.shape == vectors.shape)
imData = imData.copy()
imData[buVectors.transpose().astype('bool')] = 2
pylab.imshow(imData, aspect='auto', cmap=pylab.cm.gray_r,
interpolation='nearest')
pylab.title(title)
def plotHistogram(freqCounts, title='On-Times Histogram', xLabel='On-Time'):
"""
This is usually used to display a histogram of the on-times encountered
in a particular output.
The freqCounts is a vector containg the frequency counts of each on-time
(starting at an on-time of 0 and going to an on-time = len(freqCounts)-1)
The freqCounts are typically generated from the averageOnTimePerTimestep
or averageOnTime methods of this module.
Parameters:
-----------------------------------------------
freqCounts: The frequency counts to plot
title: Title of the plot
"""
import pylab
pylab.ion()
pylab.figure()
pylab.bar(numpy.arange(len(freqCounts)) - 0.5, freqCounts)
pylab.title(title)
pylab.xlabel(xLabel)
def populationStability(vectors, numSamples=None):
"""
Returns the stability for the population averaged over multiple time steps
Parameters:
-----------------------------------------------
vectors: the vectors for which the stability is calculated
numSamples the number of time steps where stability is counted
At each time step, count the fraction of the active elements which are stable
from the previous step
Average all the fraction
"""
# ----------------------------------------------------------------------
# Calculate the stability
numVectors = len(vectors)
if numSamples is None:
numSamples = numVectors-1
countOn = range(numVectors-1)
else:
countOn = numpy.random.randint(0, numVectors-1, numSamples)
sigmap = 0.0
for i in countOn:
match = checkMatch(vectors[i], vectors[i+1], sparse=False)
# Ignore reset vectors (all 0's)
if match[1] != 0:
sigmap += float(match[0])/match[1]
return sigmap / numSamples
def percentOutputsStableOverNTimeSteps(vectors, numSamples=None):
"""
Returns the percent of the outputs that remain completely stable over
N time steps.
Parameters:
-----------------------------------------------
vectors: the vectors for which the stability is calculated
numSamples: the number of time steps where stability is counted
For each window of numSamples, count how many outputs are active during
the entire window.
"""
# ----------------------------------------------------------------------
# Calculate the stability
totalSamples = len(vectors)
windowSize = numSamples
# Process each window
numWindows = 0
pctStable = 0
for wStart in range(0, totalSamples-windowSize+1):
# Count how many elements are active for the entire time
data = vectors[wStart:wStart+windowSize]
outputSums = data.sum(axis=0)
stableOutputs = (outputSums == windowSize).sum()
# Accumulated
samplePctStable = float(stableOutputs) / data[0].sum()
print samplePctStable
pctStable += samplePctStable
numWindows += 1
# Return percent average over all possible windows
return float(pctStable) / numWindows
def computeSaturationLevels(outputs, outputsShape, sparseForm=False):
"""
Compute the saturation for a continuous level. This breaks the level into
multiple regions and computes the saturation level for each region.
Parameters:
--------------------------------------------
outputs: output of the level. If sparseForm is True, this is a list of
the non-zeros. If sparseForm is False, it is the dense
representation
outputsShape: The shape of the outputs of the level (height, width)
retval: (sat, innerSat):
sat: list of the saturation levels of each non-empty
region of the level (each 0 -> 1.0)
innerSat: list of the saturation level of each non-empty region
that is not near an edge (each 0 -> 1.0)
"""
# Get the outputs into a SparseBinaryMatrix
if not sparseForm:
outputs = outputs.reshape(outputsShape)
spOut = SM32(outputs)
else:
if len(outputs) > 0:
assert (outputs.max() < outputsShape[0] * outputsShape[1])
spOut = SM32(1, outputsShape[0] * outputsShape[1])
spOut.setRowFromSparse(0, outputs, [1]*len(outputs))
spOut.reshape(outputsShape[0], outputsShape[1])
# Get the activity in each local region using the nNonZerosPerBox method
# This method takes a list of the end row indices and a list of the end
# column indices.
# We will use regions that are 15x15, which give us about a 1/225 (.4%) resolution
# on saturation.
regionSize = 15
rows = xrange(regionSize+1, outputsShape[0]+1, regionSize)
cols = xrange(regionSize+1, outputsShape[1]+1, regionSize)
regionSums = spOut.nNonZerosPerBox(rows, cols)
# Get all the nonzeros out - those are our saturation sums
(locations, values) = regionSums.tolist()
values /= float(regionSize * regionSize)
sat = list(values)
# Now, to compute which are the inner regions, we will only take the ones that
# are surrounded by activity above, below, left and right
innerSat = []
locationSet = set(locations)
for (location, value) in itertools.izip(locations, values):
(row, col) = location
if (row-1,col) in locationSet and (row, col-1) in locationSet \
and (row+1, col) in locationSet and (row, col+1) in locationSet:
innerSat.append(value)
return (sat, innerSat)
def checkMatch(input, prediction, sparse=True, verbosity=0):
"""
Compares the actual input with the predicted input and returns results
Parameters:
-----------------------------------------------
input: The actual input
prediction: the predicted input
verbosity: If > 0, print debugging messages
sparse: If true, they are in sparse form (list of
active indices)
retval (foundInInput, totalActiveInInput, missingFromInput,
totalActiveInPrediction)
foundInInput: The number of predicted active elements that were
found in the actual input
totalActiveInInput: The total number of active elements in the input.
missingFromInput: The number of predicted active elements that were not
found in the actual input
totalActiveInPrediction: The total number of active elements in the prediction
"""
if sparse:
activeElementsInInput = set(input)
activeElementsInPrediction = set(prediction)
else:
activeElementsInInput = set(input.nonzero()[0])
activeElementsInPrediction = set(prediction.nonzero()[0])
totalActiveInPrediction = len(activeElementsInPrediction)
totalActiveInInput = len(activeElementsInInput)
foundInInput = len(activeElementsInPrediction.intersection(activeElementsInInput))
missingFromInput = len(activeElementsInPrediction.difference(activeElementsInInput))
missingFromPrediction = len(activeElementsInInput.difference(activeElementsInPrediction))
if verbosity >= 1:
print "preds. found in input:", foundInInput, "out of", totalActiveInPrediction,
print "; preds. missing from input:", missingFromInput, "out of", \
totalActiveInPrediction,
print "; unexpected active in input:", missingFromPrediction, "out of", \
totalActiveInInput
return (foundInInput, totalActiveInInput, missingFromInput,
totalActiveInPrediction)
def predictionExtent(inputs, resets, outputs, minOverlapPct=100.0):
"""
Computes the predictive ability of a temporal memory (TM). This routine returns
a value which is the average number of time steps of prediction provided
by the TM. It accepts as input the inputs, outputs, and resets provided to
the TM as well as a 'minOverlapPct' used to evalulate whether or not a
prediction is a good enough match to the actual input.
The 'outputs' are the pooling outputs of the TM. This routine treats each output
as a "manifold" that includes the active columns that should be present in the
next N inputs. It then looks at each successive input and sees if it's active
columns are within the manifold. For each output sample, it computes how
many time steps it can go forward on the input before the input overlap with
the manifold is less then 'minOverlapPct'. It returns the average number of
time steps calculated for each output.
Parameters:
-----------------------------------------------
inputs: The inputs to the TM. Row 0 contains the inputs from time
step 0, row 1 from time step 1, etc.
resets: The reset input to the TM. Element 0 contains the reset from
time step 0, element 1 from time step 1, etc.
outputs: The pooling outputs from the TM. Row 0 contains the outputs
from time step 0, row 1 from time step 1, etc.
minOverlapPct: How much each input's columns must overlap with the pooling
output's columns to be considered a valid prediction.
retval: (Average number of time steps of prediction over all output
samples,
Average number of time steps of prediction when we aren't
cut short by the end of the sequence,
List containing frequency counts of each encountered
prediction time)
"""
# List of how many times we encountered each prediction amount. Element 0
# is how many times we successfully predicted 0 steps in advance, element 1
# is how many times we predicted 1 step in advance, etc.
predCounts = None
# Total steps of prediction over all samples
predTotal = 0
# Total number of samples
nSamples = len(outputs)
# Total steps of prediction for samples at the start of the sequence, or
# for samples whose prediction runs aren't cut short by the end of the
# sequence.
predTotalNotLimited = 0
nSamplesNotLimited = 0
# Compute how many cells/column we have
nCols = len(inputs[0])
nCellsPerCol = len(outputs[0]) // nCols
# Evalulate prediction for each output sample
for idx in xrange(nSamples):
# What are the active columns for this output?
activeCols = outputs[idx].reshape(nCols, nCellsPerCol).max(axis=1)
# How many steps of prediction do we have?
steps = 0
while (idx+steps+1 < nSamples) and (resets[idx+steps+1] == 0):
overlap = numpy.logical_and(inputs[idx+steps+1], activeCols)
overlapPct = 100.0 * float(overlap.sum()) / inputs[idx+steps+1].sum()
if overlapPct >= minOverlapPct:
steps += 1
else:
break
# print "idx:", idx, "steps:", steps
# Accumulate into our total
predCounts = _accumulateFrequencyCounts([steps], predCounts)
predTotal += steps
# If this sample was not cut short by the end of the sequence, include
# it into the "NotLimited" runs
if resets[idx] or \
((idx+steps+1 < nSamples) and (not resets[idx+steps+1])):
predTotalNotLimited += steps
nSamplesNotLimited += 1
# Return results
return (float(predTotal) / nSamples,
float(predTotalNotLimited) / nSamplesNotLimited,
predCounts)
def getCentreAndSpreadOffsets(spaceShape,
spreadShape,
stepSize=1):
"""
Generates centre offsets and spread offsets for block-mode based training
regimes - star, cross, block.
Parameters:
-----------------------------------------------
spaceShape: The (height, width) of the 2-D space to explore. This
sets the number of center-points.
spreadShape: The shape (height, width) of the area around each center-point
to explore.
stepSize: The step size. How big each step is, in pixels. This controls
*both* the spacing of the center-points within the block and the
points we explore around each center-point
retval: (centreOffsets, spreadOffsets)
"""
from nupic.math.cross import cross
# =====================================================================
# Init data structures
# What is the range on the X and Y offsets of the center points?
shape = spaceShape
# If the shape is (1,1), special case of just 1 center point
if shape[0] == 1 and shape[1] == 1:
centerOffsets = [(0,0)]
else:
xMin = -1 * (shape[1] // 2)
xMax = xMin + shape[1] - 1
xPositions = range(stepSize * xMin, stepSize * xMax + 1, stepSize)
yMin = -1 * (shape[0] // 2)
yMax = yMin + shape[0] - 1
yPositions = range(stepSize * yMin, stepSize * yMax + 1, stepSize)
centerOffsets = list(cross(yPositions, xPositions))
numCenterOffsets = len(centerOffsets)
print "centerOffsets:", centerOffsets
# What is the range on the X and Y offsets of the spread points?
shape = spreadShape
# If the shape is (1,1), special case of no spreading around each center
# point
if shape[0] == 1 and shape[1] == 1:
spreadOffsets = [(0,0)]
else:
xMin = -1 * (shape[1] // 2)
xMax = xMin + shape[1] - 1
xPositions = range(stepSize * xMin, stepSize * xMax + 1, stepSize)
yMin = -1 * (shape[0] // 2)
yMax = yMin + shape[0] - 1
yPositions = range(stepSize * yMin, stepSize * yMax + 1, stepSize)
spreadOffsets = list(cross(yPositions, xPositions))
# Put the (0,0) entry first
spreadOffsets.remove((0,0))
spreadOffsets.insert(0, (0,0))
numSpreadOffsets = len(spreadOffsets)
print "spreadOffsets:", spreadOffsets
return centerOffsets, spreadOffsets
def makeCloneMap(columnsShape, outputCloningWidth, outputCloningHeight=-1):
"""Make a two-dimensional clone map mapping columns to clone master.
This makes a map that is (numColumnsHigh, numColumnsWide) big that can
be used to figure out which clone master to use for each column. Here are
a few sample calls
>>> makeCloneMap(columnsShape=(10, 6), outputCloningWidth=4)
(array([[ 0, 1, 2, 3, 0, 1],
[ 4, 5, 6, 7, 4, 5],
[ 8, 9, 10, 11, 8, 9],
[12, 13, 14, 15, 12, 13],
[ 0, 1, 2, 3, 0, 1],
[ 4, 5, 6, 7, 4, 5],
[ 8, 9, 10, 11, 8, 9],
[12, 13, 14, 15, 12, 13],
[ 0, 1, 2, 3, 0, 1],
[ 4, 5, 6, 7, 4, 5]], dtype=uint32), 16)
>>> makeCloneMap(columnsShape=(7, 8), outputCloningWidth=3)
(array([[0, 1, 2, 0, 1, 2, 0, 1],
[3, 4, 5, 3, 4, 5, 3, 4],
[6, 7, 8, 6, 7, 8, 6, 7],
[0, 1, 2, 0, 1, 2, 0, 1],
[3, 4, 5, 3, 4, 5, 3, 4],
[6, 7, 8, 6, 7, 8, 6, 7],
[0, 1, 2, 0, 1, 2, 0, 1]], dtype=uint32), 9)
>>> makeCloneMap(columnsShape=(7, 11), outputCloningWidth=5)
(array([[ 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0],
[ 5, 6, 7, 8, 9, 5, 6, 7, 8, 9, 5],
[10, 11, 12, 13, 14, 10, 11, 12, 13, 14, 10],
[15, 16, 17, 18, 19, 15, 16, 17, 18, 19, 15],
[20, 21, 22, 23, 24, 20, 21, 22, 23, 24, 20],
[ 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0],
[ 5, 6, 7, 8, 9, 5, 6, 7, 8, 9, 5]], dtype=uint32), 25)
>>> makeCloneMap(columnsShape=(7, 8), outputCloningWidth=3, outputCloningHeight=4)
(array([[ 0, 1, 2, 0, 1, 2, 0, 1],
[ 3, 4, 5, 3, 4, 5, 3, 4],
[ 6, 7, 8, 6, 7, 8, 6, 7],
[ 9, 10, 11, 9, 10, 11, 9, 10],
[ 0, 1, 2, 0, 1, 2, 0, 1],
[ 3, 4, 5, 3, 4, 5, 3, 4],
[ 6, 7, 8, 6, 7, 8, 6, 7]], dtype=uint32), 12)
The basic idea with this map is that, if you imagine things stretching off
to infinity, every instance of a given clone master is seeing the exact
same thing in all directions. That includes:
- All neighbors must be the same
- The "meaning" of the input to each of the instances of the same clone
master must be the same. If input is pixels and we have translation
invariance--this is easy. At higher levels where input is the output
of lower levels, this can be much harder.
- The "meaning" of the inputs to neighbors of a clone master must be the
same for each instance of the same clone master.
The best way to think of this might be in terms of 'inputCloningWidth' and
'outputCloningWidth'.
- The 'outputCloningWidth' is the number of columns you'd have to move
horizontally (or vertically) before you get back to the same the same
clone that you started with. MUST BE INTEGRAL!
- The 'inputCloningWidth' is the 'outputCloningWidth' of the node below us.
If we're getting input from an sensor where every element just represents
a shift of every other element, this is 1.
At a conceptual level, it means that if two different inputs are shown
to the node and the only difference between them is that one is shifted
horizontally (or vertically) by this many pixels, it means we are looking
at the exact same real world input, but shifted by some number of pixels
(doesn't have to be 1). MUST BE INTEGRAL!
At level 1, I think you could have this:
* inputCloningWidth = 1
* sqrt(coincToInputRatio^2) = 2.5
* outputCloningWidth = 5
...in this case, you'd end up with 25 masters.
Let's think about this case:
input: - - - 0 1 2 3 4 5 - - - - -
columns: 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4
...in other words, input 0 is fed to both column 0 and column 1. Input 1
is fed to columns 2, 3, and 4, etc. Hopefully, you can see that you'll
get the exact same output (except shifted) with:
input: - - - - - 0 1 2 3 4 5 - - -
columns: 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4
...in other words, we've shifted the input 2 spaces and the output shifted
5 spaces.
*** The outputCloningWidth MUST ALWAYS be an integral multiple of the ***
*** inputCloningWidth in order for all of our rules to apply. ***
*** NOTE: inputCloningWidth isn't passed here, so it's the caller's ***
*** responsibility to ensure that this is true. ***
*** The outputCloningWidth MUST ALWAYS be an integral multiple of ***
*** sqrt(coincToInputRatio^2), too. ***
@param columnsShape The shape (height, width) of the columns.
@param outputCloningWidth See docstring above.
@param outputCloningHeight If non-negative, can be used to make
rectangular (instead of square) cloning fields.
@return cloneMap An array (numColumnsHigh, numColumnsWide) that
contains the clone index to use for each
column.
@return numDistinctClones The number of distinct clones in the map. This
is just outputCloningWidth*outputCloningHeight.
"""
if outputCloningHeight < 0:
outputCloningHeight = outputCloningWidth
columnsHeight, columnsWidth = columnsShape
numDistinctMasters = outputCloningWidth * outputCloningHeight
a = numpy.empty((columnsHeight, columnsWidth), 'uint32')
for row in xrange(columnsHeight):
for col in xrange(columnsWidth):
a[row, col] = (col % outputCloningWidth) + \
(row % outputCloningHeight) * outputCloningWidth
return a, numDistinctMasters
def numpyStr(array, format='%f', includeIndices=False, includeZeros=True):
""" Pretty print a numpy matrix using the given format string for each
value. Return the string representation
Parameters:
------------------------------------------------------------
array: The numpy array to print. This can be either a 1D vector or 2D matrix
format: The format string to use for each value
includeIndices: If true, include [row,col] label for each value
includeZeros: Can only be set to False if includeIndices is on.
If True, include 0 values in the print-out
If False, exclude 0 values from the print-out.
"""
shape = array.shape
assert (len(shape) <= 2)
items = ['[']
if len(shape) == 1:
if includeIndices:
format = '%d:' + format
if includeZeros:
rowItems = [format % (c,x) for (c,x) in enumerate(array)]
else:
rowItems = [format % (c,x) for (c,x) in enumerate(array) if x != 0]
else:
rowItems = [format % (x) for x in array]
items.extend(rowItems)
else:
(rows, cols) = shape
if includeIndices:
format = '%d,%d:' + format
for r in xrange(rows):
if includeIndices:
rowItems = [format % (r,c,x) for c,x in enumerate(array[r])]
else:
rowItems = [format % (x) for x in array[r]]
if r > 0:
items.append('')
items.append('[')
items.extend(rowItems)
if r < rows-1:
items.append(']\n')
else:
items.append(']')
items.append(']')
return ' '.join(items)
if __name__=='__main__':
testStability(numOrigVectors=10, length=500, activity=50,morphTime=3)
from IPython.Shell import IPShellEmbed; IPShellEmbed()()
| 60,038 | Python | .py | 1,303 | 39.936301 | 114 | 0.660483 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,921 | temporal_memory.py | numenta_nupic-legacy/src/nupic/algorithms/temporal_memory.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014-2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Temporal Memory implementation in Python. See
`numenta.com <https://numenta.com/temporal-memory-algorithm/>`_ for details.
"""
from collections import defaultdict
from nupic.bindings.math import Random
from operator import mul
from nupic.algorithms.connections import Connections, binSearch
from nupic.serializable import Serializable
from nupic.support.group_by import groupby2
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.proto.TemporalMemoryProto_capnp import TemporalMemoryProto
EPSILON = 0.00001 # constant error threshold to check equality of permanences to
# other floats
EPSILON_ROUND = 5 # Used to round floats
class TemporalMemory(Serializable):
"""
Class implementing the Temporal Memory algorithm.
.. note::
``predictedSegmentDecrement``: A good value is just a bit larger than
(the column-level sparsity * permanenceIncrement). So, if column-level
sparsity is 2% and permanenceIncrement is 0.01, this parameter should be
something like 4% * 0.01 = 0.0004).
:param columnDimensions: (list or tuple) Dimensions of the column space.
Default value ``[2048]``.
:param cellsPerColumn: (int) Number of cells per column. Default value ``32``.
:param activationThreshold: (int) If the number of active connected synapses
on a segment is at least this threshold, the segment is said to be
active. Default value ``13``.
:param initialPermanence: (float) Initial permanence of a new synapse. Default
value ``0.21``.
:param connectedPermanence: (float) If the permanence value for a synapse is
greater than this value, it is said to be connected. Default value
``0.5``.
:param minThreshold: (int) If the number of potential synapses active on a
segment is at least this threshold, it is said to be "matching" and
is eligible for learning. Default value ``10``.
:param maxNewSynapseCount: (int) The maximum number of synapses added to a
segment during learning. Default value ``20``.
:param permanenceIncrement: (float) Amount by which permanences of synapses
are incremented during learning. Default value ``0.1``.
:param permanenceDecrement: (float) Amount by which permanences of synapses
are decremented during learning. Default value ``0.1``.
:param predictedSegmentDecrement: (float) Amount by which segments are
punished for incorrect predictions. Default value ``0.0``.
:param seed: (int) Seed for the random number generator. Default value ``42``.
:param maxSegmentsPerCell: (int) The maximum number of segments per cell.
Default value ``255``.
:param maxSynapsesPerSegment: (int) The maximum number of synapses per
segment. Default value ``255``.
"""
def __init__(self,
columnDimensions=(2048,),
cellsPerColumn=32,
activationThreshold=13,
initialPermanence=0.21,
connectedPermanence=0.50,
minThreshold=10,
maxNewSynapseCount=20,
permanenceIncrement=0.10,
permanenceDecrement=0.10,
predictedSegmentDecrement=0.0,
maxSegmentsPerCell=255,
maxSynapsesPerSegment=255,
seed=42,
**kwargs):
# Error checking
if not len(columnDimensions):
raise ValueError("Number of column dimensions must be greater than 0")
if cellsPerColumn <= 0:
raise ValueError("Number of cells per column must be greater than 0")
if minThreshold > activationThreshold:
raise ValueError(
"The min threshold can't be greater than the activation threshold")
# TODO: Validate all parameters (and add validation tests)
# Save member variables
self.columnDimensions = columnDimensions
self.cellsPerColumn = cellsPerColumn
self.activationThreshold = activationThreshold
self.initialPermanence = initialPermanence
self.connectedPermanence = connectedPermanence
self.minThreshold = minThreshold
self.maxNewSynapseCount = maxNewSynapseCount
self.permanenceIncrement = permanenceIncrement
self.permanenceDecrement = permanenceDecrement
self.predictedSegmentDecrement = predictedSegmentDecrement
self.maxSegmentsPerCell = maxSegmentsPerCell
self.maxSynapsesPerSegment = maxSynapsesPerSegment
# Initialize member variables
self.connections = self.connectionsFactory(self.numberOfCells())
self._random = Random(seed)
self.activeCells = []
self.winnerCells = []
self.activeSegments = []
self.matchingSegments = []
self.numActiveConnectedSynapsesForSegment = []
self.numActivePotentialSynapsesForSegment = []
self.iteration = 0
self.lastUsedIterationForSegment = []
@staticmethod
def connectionsFactory(*args, **kwargs):
"""
Create a :class:`~nupic.algorithms.connections.Connections` instance.
:class:`TemporalMemory` subclasses may override this method to choose a
different :class:`~nupic.algorithms.connections.Connections` implementation,
or to augment the instance otherwise returned by the default
:class:`~nupic.algorithms.connections.Connections` implementation.
See :class:`~nupic.algorithms.connections.Connections` for constructor
signature and usage.
:returns: :class:`~nupic.algorithms.connections.Connections` instance
"""
return Connections(*args, **kwargs)
# ==============================
# Main methods
# ==============================
def compute(self, activeColumns, learn=True):
"""
Perform one time step of the Temporal Memory algorithm.
This method calls :meth:`activateCells`, then calls
:meth:`activateDendrites`. Using :class:`TemporalMemory` via its
:meth:`compute` method ensures that you'll always be able to call
:meth:`getPredictiveCells` to get predictions for the next time step.
:param activeColumns: (iter) Indices of active columns.
:param learn: (bool) Whether or not learning is enabled.
"""
self.activateCells(sorted(activeColumns), learn)
self.activateDendrites(learn)
def activateCells(self, activeColumns, learn=True):
"""
Calculate the active cells, using the current active columns and dendrite
segments. Grow and reinforce synapses.
:param activeColumns: (iter) A sorted list of active column indices.
:param learn: (bool) If true, reinforce / punish / grow synapses.
**Pseudocode:**
::
for each column
if column is active and has active distal dendrite segments
call activatePredictedColumn
if column is active and doesn't have active distal dendrite segments
call burstColumn
if column is inactive and has matching distal dendrite segments
call punishPredictedColumn
"""
prevActiveCells = self.activeCells
prevWinnerCells = self.winnerCells
self.activeCells = []
self.winnerCells = []
segToCol = lambda segment: int(segment.cell / self.cellsPerColumn)
identity = lambda x: x
for columnData in groupby2(activeColumns, identity,
self.activeSegments, segToCol,
self.matchingSegments, segToCol):
(column,
activeColumns,
columnActiveSegments,
columnMatchingSegments) = columnData
if activeColumns is not None:
if columnActiveSegments is not None:
cellsToAdd = self.activatePredictedColumn(column,
columnActiveSegments,
columnMatchingSegments,
prevActiveCells,
prevWinnerCells,
learn)
self.activeCells += cellsToAdd
self.winnerCells += cellsToAdd
else:
(cellsToAdd,
winnerCell) = self.burstColumn(column,
columnMatchingSegments,
prevActiveCells,
prevWinnerCells,
learn)
self.activeCells += cellsToAdd
self.winnerCells.append(winnerCell)
else:
if learn:
self.punishPredictedColumn(column,
columnActiveSegments,
columnMatchingSegments,
prevActiveCells,
prevWinnerCells)
def activateDendrites(self, learn=True):
"""
Calculate dendrite segment activity, using the current active cells.
:param learn: (bool) If true, segment activations will be recorded. This
information is used during segment cleanup.
**Pseudocode:**
::
for each distal dendrite segment with activity >= activationThreshold
mark the segment as active
for each distal dendrite segment with unconnected activity >= minThreshold
mark the segment as matching
"""
(numActiveConnected,
numActivePotential) = self.connections.computeActivity(
self.activeCells,
self.connectedPermanence)
activeSegments = (
self.connections.segmentForFlatIdx(i)
for i in xrange(len(numActiveConnected))
if numActiveConnected[i] >= self.activationThreshold
)
matchingSegments = (
self.connections.segmentForFlatIdx(i)
for i in xrange(len(numActivePotential))
if numActivePotential[i] >= self.minThreshold
)
self.activeSegments = sorted(activeSegments,
key=self.connections.segmentPositionSortKey)
self.matchingSegments = sorted(matchingSegments,
key=self.connections.segmentPositionSortKey)
self.numActiveConnectedSynapsesForSegment = numActiveConnected
self.numActivePotentialSynapsesForSegment = numActivePotential
if learn:
for segment in self.activeSegments:
self.lastUsedIterationForSegment[segment.flatIdx] = self.iteration
self.iteration += 1
def reset(self):
"""
Indicates the start of a new sequence. Clears any predictions and makes sure
synapses don't grow to the currently active cells in the next time step.
"""
self.activeCells = []
self.winnerCells = []
self.activeSegments = []
self.matchingSegments = []
# ==============================
# Extension points
# These methods are designed to be overridden.
# ==============================
def activatePredictedColumn(self, column, columnActiveSegments,
columnMatchingSegments, prevActiveCells,
prevWinnerCells, learn):
"""
Determines which cells in a predicted column should be added to winner cells
list, and learns on the segments that correctly predicted this column.
:param column: (int) Index of bursting column.
:param columnActiveSegments: (iter) Active segments in this column.
:param columnMatchingSegments: (iter) Matching segments in this column.
:param prevActiveCells: (list) Active cells in ``t-1``.
:param prevWinnerCells: (list) Winner cells in ``t-1``.
:param learn: (bool) If true, grow and reinforce synapses.
:returns: (list) A list of predicted cells that will be added to
active cells and winner cells.
"""
return self._activatePredictedColumn(
self.connections, self._random,
columnActiveSegments, prevActiveCells, prevWinnerCells,
self.numActivePotentialSynapsesForSegment,
self.maxNewSynapseCount, self.initialPermanence,
self.permanenceIncrement, self.permanenceDecrement,
self.maxSynapsesPerSegment, learn)
def burstColumn(self, column, columnMatchingSegments, prevActiveCells,
prevWinnerCells, learn):
"""
Activates all of the cells in an unpredicted active column, chooses a winner
cell, and, if learning is turned on, learns on one segment, growing a new
segment if necessary.
:param column: (int) Index of bursting column.
:param columnMatchingSegments: (iter) Matching segments in this column, or
None if there aren't any.
:param prevActiveCells: (list) Active cells in ``t-1``.
:param prevWinnerCells: (list) Winner cells in ``t-1``.
:param learn: (bool) Whether or not learning is enabled.
:returns: (tuple) Contains (``cells`` [iter], ``winnerCell`` [int])
"""
start = self.cellsPerColumn * column
cellsForColumn = xrange(start, start + self.cellsPerColumn)
return self._burstColumn(
self.connections, self._random, self.lastUsedIterationForSegment, column,
columnMatchingSegments, prevActiveCells, prevWinnerCells, cellsForColumn,
self.numActivePotentialSynapsesForSegment, self.iteration,
self.maxNewSynapseCount, self.initialPermanence, self.permanenceIncrement,
self.permanenceDecrement, self.maxSegmentsPerCell,
self.maxSynapsesPerSegment, learn)
def punishPredictedColumn(self, column, columnActiveSegments,
columnMatchingSegments, prevActiveCells,
prevWinnerCells):
"""
Punishes the Segments that incorrectly predicted a column to be active.
:param column: (int) Index of bursting column.
:param columnActiveSegments: (iter) Active segments for this column, or None
if there aren't any.
:param columnMatchingSegments: (iter) Matching segments for this column, or
None if there aren't any.
:param prevActiveCells: (list) Active cells in ``t-1``.
:param prevWinnerCells: (list) Winner cells in ``t-1``.
"""
self._punishPredictedColumn(
self.connections, columnMatchingSegments, prevActiveCells,
self.predictedSegmentDecrement)
def createSegment(self, cell):
"""
Create a :class:`~nupic.algorithms.connections.Segment` on the specified
cell. This method calls
:meth:`~nupic.algorithms.connections.Connections.createSegment` on the
underlying :class:`~nupic.algorithms.connections.Connections`, and it does
some extra bookkeeping. Unit tests should call this method, and not
:meth:`~nupic.algorithms.connections.Connections.createSegment`.
:param cell: (int) Index of cell to create a segment on.
:returns: (:class:`~nupic.algorithms.connections.Segment`) The created
segment.
"""
return self._createSegment(
self.connections, self.lastUsedIterationForSegment, cell, self.iteration,
self.maxSegmentsPerCell)
# ==============================
# Helper methods
#
# These class methods use the following parameter ordering convention:
#
# 1. Output / mutated params
# 2. Traditional parameters to the method, i.e. the ones that would still
# exist if this were in instance method.
# 3. Model state (not mutated)
# 4. Model parameters (including "learn")
# ==============================
@classmethod
def _activatePredictedColumn(cls, connections, random, columnActiveSegments,
prevActiveCells, prevWinnerCells,
numActivePotentialSynapsesForSegment,
maxNewSynapseCount, initialPermanence,
permanenceIncrement, permanenceDecrement,
maxSynapsesPerSegment, learn):
"""
:param connections: (Object)
Connections for the TM. Gets mutated.
:param random: (Object)
Random number generator. Gets mutated.
:param columnActiveSegments: (iter)
Active segments in this column.
:param prevActiveCells: (list)
Active cells in `t-1`.
:param prevWinnerCells: (list)
Winner cells in `t-1`.
:param numActivePotentialSynapsesForSegment: (list)
Number of active potential synapses per segment, indexed by the segment's
flatIdx.
:param maxNewSynapseCount: (int)
The maximum number of synapses added to a segment during learning
:param initialPermanence: (float)
Initial permanence of a new synapse.
@permanenceIncrement (float)
Amount by which permanences of synapses are incremented during learning.
@permanenceDecrement (float)
Amount by which permanences of synapses are decremented during learning.
:param maxSynapsesPerSegment: (int)
The maximum number of synapses per segment.
:param learn: (bool)
If true, grow and reinforce synapses.
:returns: cellsToAdd (list)
A list of predicted cells that will be added to active cells and winner
cells.
Pseudocode:
for each cell in the column that has an active distal dendrite segment
mark the cell as active
mark the cell as a winner cell
(learning) for each active distal dendrite segment
strengthen active synapses
weaken inactive synapses
grow synapses to previous winner cells
"""
cellsToAdd = []
previousCell = None
for segment in columnActiveSegments:
if segment.cell != previousCell:
cellsToAdd.append(segment.cell)
previousCell = segment.cell
if learn:
cls._adaptSegment(connections, segment, prevActiveCells,
permanenceIncrement, permanenceDecrement)
active = numActivePotentialSynapsesForSegment[segment.flatIdx]
nGrowDesired = maxNewSynapseCount - active
if nGrowDesired > 0:
cls._growSynapses(connections, random, segment, nGrowDesired,
prevWinnerCells, initialPermanence,
maxSynapsesPerSegment)
return cellsToAdd
@classmethod
def _burstColumn(cls, connections, random, lastUsedIterationForSegment,
column, columnMatchingSegments, prevActiveCells,
prevWinnerCells, cellsForColumn,
numActivePotentialSynapsesForSegment, iteration,
maxNewSynapseCount, initialPermanence, permanenceIncrement,
permanenceDecrement, maxSegmentsPerCell,
maxSynapsesPerSegment, learn):
"""
:param connections: (Object)
Connections for the TM. Gets mutated.
:param random: (Object)
Random number generator. Gets mutated.
:param lastUsedIterationForSegment: (list)
Last used iteration for each segment, indexed by the segment's flatIdx.
Gets mutated.
:param column: (int)
Index of bursting column.
:param columnMatchingSegments: (iter)
Matching segments in this column.
:param prevActiveCells: (list)
Active cells in `t-1`.
:param prevWinnerCells: (list)
Winner cells in `t-1`.
:param cellsForColumn: (sequence)
Range of cell indices on which to operate.
:param numActivePotentialSynapsesForSegment: (list)
Number of active potential synapses per segment, indexed by the segment's
flatIdx.
:param iteration: (int)
The current timestep.
:param maxNewSynapseCount: (int)
The maximum number of synapses added to a segment during learning.
:param initialPermanence: (float)
Initial permanence of a new synapse.
:param permanenceIncrement: (float)
Amount by which permanences of synapses are incremented during learning.
:param permanenceDecrement: (float)
Amount by which permanences of synapses are decremented during learning.
:param maxSegmentsPerCell: (int)
The maximum number of segments per cell.
:param maxSynapsesPerSegment: (int)
The maximum number of synapses per segment.
:param learn: (bool)
Whether or not learning is enabled.
:returns: (tuple) Contains:
`cells` (iter),
`winnerCell` (int),
Pseudocode:
mark all cells as active
if there are any matching distal dendrite segments
find the most active matching segment
mark its cell as a winner cell
(learning)
grow and reinforce synapses to previous winner cells
else
find the cell with the least segments, mark it as a winner cell
(learning)
(optimization) if there are prev winner cells
add a segment to this winner cell
grow synapses to previous winner cells
"""
if columnMatchingSegments is not None:
numActive = lambda s: numActivePotentialSynapsesForSegment[s.flatIdx]
bestMatchingSegment = max(columnMatchingSegments, key=numActive)
winnerCell = bestMatchingSegment.cell
if learn:
cls._adaptSegment(connections, bestMatchingSegment, prevActiveCells,
permanenceIncrement, permanenceDecrement)
nGrowDesired = maxNewSynapseCount - numActive(bestMatchingSegment)
if nGrowDesired > 0:
cls._growSynapses(connections, random, bestMatchingSegment,
nGrowDesired, prevWinnerCells, initialPermanence,
maxSynapsesPerSegment)
else:
winnerCell = cls._leastUsedCell(random, cellsForColumn, connections)
if learn:
nGrowExact = min(maxNewSynapseCount, len(prevWinnerCells))
if nGrowExact > 0:
segment = cls._createSegment(connections,
lastUsedIterationForSegment, winnerCell,
iteration, maxSegmentsPerCell)
cls._growSynapses(connections, random, segment, nGrowExact,
prevWinnerCells, initialPermanence,
maxSynapsesPerSegment)
return cellsForColumn, winnerCell
@classmethod
def _punishPredictedColumn(cls, connections, columnMatchingSegments,
prevActiveCells, predictedSegmentDecrement):
"""
:param connections: (Object)
Connections for the TM. Gets mutated.
:param columnMatchingSegments: (iter)
Matching segments for this column.
:param prevActiveCells: (list)
Active cells in `t-1`.
:param predictedSegmentDecrement: (float)
Amount by which segments are punished for incorrect predictions.
Pseudocode:
for each matching segment in the column
weaken active synapses
"""
if predictedSegmentDecrement > 0.0 and columnMatchingSegments is not None:
for segment in columnMatchingSegments:
cls._adaptSegment(connections, segment, prevActiveCells,
-predictedSegmentDecrement, 0.0)
@classmethod
def _createSegment(cls, connections, lastUsedIterationForSegment, cell,
iteration, maxSegmentsPerCell):
"""
Create a segment on the connections, enforcing the maxSegmentsPerCell
parameter.
"""
# Enforce maxSegmentsPerCell.
while connections.numSegments(cell) >= maxSegmentsPerCell:
leastRecentlyUsedSegment = min(
connections.segmentsForCell(cell),
key=lambda segment : lastUsedIterationForSegment[segment.flatIdx])
connections.destroySegment(leastRecentlyUsedSegment)
# Create the segment.
segment = connections.createSegment(cell)
# Do TM-specific bookkeeping for the segment.
if segment.flatIdx == len(lastUsedIterationForSegment):
lastUsedIterationForSegment.append(iteration)
elif segment.flatIdx < len(lastUsedIterationForSegment):
# A flatIdx was recycled.
lastUsedIterationForSegment[segment.flatIdx] = iteration
else:
raise AssertionError(
"All segments should be created with the TM createSegment method.")
return segment
@classmethod
def _destroyMinPermanenceSynapses(cls, connections, random, segment,
nDestroy, excludeCells):
"""
Destroy nDestroy synapses on the specified segment, but don't destroy
synapses to the "excludeCells".
"""
destroyCandidates = sorted(
(synapse for synapse in connections.synapsesForSegment(segment)
if synapse.presynapticCell not in excludeCells),
key=lambda s: s._ordinal
)
for _ in xrange(nDestroy):
if len(destroyCandidates) == 0:
break
minSynapse = None
minPermanence = float("inf")
for synapse in destroyCandidates:
if synapse.permanence < minPermanence - EPSILON:
minSynapse = synapse
minPermanence = synapse.permanence
connections.destroySynapse(minSynapse)
destroyCandidates.remove(minSynapse)
@classmethod
def _leastUsedCell(cls, random, cells, connections):
"""
Gets the cell with the smallest number of segments.
Break ties randomly.
:param random: (Object)
Random number generator. Gets mutated.
:param cells: (list)
Indices of cells.
:param connections: (Object)
Connections instance for the TM.
:returns: (int) Cell index.
"""
leastUsedCells = []
minNumSegments = float("inf")
for cell in cells:
numSegments = connections.numSegments(cell)
if numSegments < minNumSegments:
minNumSegments = numSegments
leastUsedCells = []
if numSegments == minNumSegments:
leastUsedCells.append(cell)
i = random.getUInt32(len(leastUsedCells))
return leastUsedCells[i]
@classmethod
def _growSynapses(cls, connections, random, segment, nDesiredNewSynapes,
prevWinnerCells, initialPermanence, maxSynapsesPerSegment):
"""
Creates nDesiredNewSynapes synapses on the segment passed in if
possible, choosing random cells from the previous winner cells that are
not already on the segment.
:param connections: (Object) Connections instance for the tm
:param random: (Object) TM object used to generate random
numbers
:param segment: (int) Segment to grow synapses on.
:param nDesiredNewSynapes: (int) Desired number of synapses to grow
:param prevWinnerCells: (list) Winner cells in `t-1`
:param initialPermanence: (float) Initial permanence of a new synapse.
"""
candidates = list(prevWinnerCells)
for synapse in connections.synapsesForSegment(segment):
i = binSearch(candidates, synapse.presynapticCell)
if i != -1:
del candidates[i]
nActual = min(nDesiredNewSynapes, len(candidates))
# Check if we're going to surpass the maximum number of synapses.
overrun = connections.numSynapses(segment) + nActual - maxSynapsesPerSegment
if overrun > 0:
cls._destroyMinPermanenceSynapses(connections, random, segment, overrun,
prevWinnerCells)
# Recalculate in case we weren't able to destroy as many synapses as needed.
nActual = min(nActual,
maxSynapsesPerSegment - connections.numSynapses(segment))
for _ in range(nActual):
i = random.getUInt32(len(candidates))
connections.createSynapse(segment, candidates[i], initialPermanence)
del candidates[i]
@classmethod
def _adaptSegment(cls, connections, segment, prevActiveCells,
permanenceIncrement, permanenceDecrement):
"""
Updates synapses on segment.
Strengthens active synapses; weakens inactive synapses.
:param connections: (Object) Connections instance for the tm
:param segment: (int) Segment to adapt
:param prevActiveCells: (list) Active cells in `t-1`
:param permanenceIncrement: (float) Amount to increment active synapses
:param permanenceDecrement: (float) Amount to decrement inactive synapses
"""
# Destroying a synapse modifies the set that we're iterating through.
synapsesToDestroy = []
for synapse in connections.synapsesForSegment(segment):
permanence = synapse.permanence
if binSearch(prevActiveCells, synapse.presynapticCell) != -1:
permanence += permanenceIncrement
else:
permanence -= permanenceDecrement
# Keep permanence within min/max bounds
permanence = max(0.0, min(1.0, permanence))
if permanence < EPSILON:
synapsesToDestroy.append(synapse)
else:
connections.updateSynapsePermanence(synapse, permanence)
for synapse in synapsesToDestroy:
connections.destroySynapse(synapse)
if connections.numSynapses(segment) == 0:
connections.destroySegment(segment)
def columnForCell(self, cell):
"""
Returns the index of the column that a cell belongs to.
:param cell: (int) Cell index
:returns: (int) Column index
"""
self._validateCell(cell)
return int(cell / self.cellsPerColumn)
def cellsForColumn(self, column):
"""
Returns the indices of cells that belong to a column.
:param column: (int) Column index
:returns: (list) Cell indices
"""
self._validateColumn(column)
start = self.cellsPerColumn * column
end = start + self.cellsPerColumn
return range(start, end)
def numberOfColumns(self):
"""
Returns the number of columns in this layer.
:returns: (int) Number of columns
"""
return reduce(mul, self.columnDimensions, 1)
def numberOfCells(self):
"""
Returns the number of cells in this layer.
:returns: (int) Number of cells
"""
return self.numberOfColumns() * self.cellsPerColumn
def mapCellsToColumns(self, cells):
"""
Maps cells to the columns they belong to.
:param cells: (set) Cells
:returns: (dict) Mapping from columns to their cells in `cells`
"""
cellsForColumns = defaultdict(set)
for cell in cells:
column = self.columnForCell(cell)
cellsForColumns[column].add(cell)
return cellsForColumns
def getActiveCells(self):
"""
Returns the indices of the active cells.
:returns: (list) Indices of active cells.
"""
return self.getCellIndices(self.activeCells)
def getPredictiveCells(self):
""" Returns the indices of the predictive cells.
:returns: (list) Indices of predictive cells.
"""
previousCell = None
predictiveCells = []
for segment in self.activeSegments:
if segment.cell != previousCell:
predictiveCells.append(segment.cell)
previousCell = segment.cell
return predictiveCells
def getWinnerCells(self):
"""
Returns the indices of the winner cells.
:returns: (list) Indices of winner cells.
"""
return self.getCellIndices(self.winnerCells)
def getActiveSegments(self):
"""
Returns the active segments.
:returns: (list) Active segments
"""
return self.activeSegments
def getMatchingSegments(self):
"""
Returns the matching segments.
:returns: (list) Matching segments
"""
return self.matchingSegments
def getCellsPerColumn(self):
"""
Returns the number of cells per column.
:returns: (int) The number of cells per column.
"""
return self.cellsPerColumn
def getColumnDimensions(self):
"""
Returns the dimensions of the columns in the region.
:returns: (tuple) Column dimensions
"""
return self.columnDimensions
def getActivationThreshold(self):
"""
Returns the activation threshold.
:returns: (int) The activation threshold.
"""
return self.activationThreshold
def setActivationThreshold(self, activationThreshold):
"""
Sets the activation threshold.
:param activationThreshold: (int) activation threshold.
"""
self.activationThreshold = activationThreshold
def getInitialPermanence(self):
"""
Get the initial permanence.
:returns: (float) The initial permanence.
"""
return self.initialPermanence
def setInitialPermanence(self, initialPermanence):
"""
Sets the initial permanence.
:param initialPermanence: (float) The initial permanence.
"""
self.initialPermanence = initialPermanence
def getMinThreshold(self):
"""
Returns the min threshold.
:returns: (int) The min threshold.
"""
return self.minThreshold
def setMinThreshold(self, minThreshold):
"""
Sets the min threshold.
:param minThreshold: (int) min threshold.
"""
self.minThreshold = minThreshold
def getMaxNewSynapseCount(self):
"""
Returns the max new synapse count.
:returns: (int) The max new synapse count.
"""
return self.maxNewSynapseCount
def setMaxNewSynapseCount(self, maxNewSynapseCount):
"""
Sets the max new synapse count.
:param maxNewSynapseCount: (int) Max new synapse count.
"""
self.maxNewSynapseCount = maxNewSynapseCount
def getPermanenceIncrement(self):
"""
Get the permanence increment.
:returns: (float) The permanence increment.
"""
return self.permanenceIncrement
def setPermanenceIncrement(self, permanenceIncrement):
"""
Sets the permanence increment.
:param permanenceIncrement: (float) The permanence increment.
"""
self.permanenceIncrement = permanenceIncrement
def getPermanenceDecrement(self):
"""
Get the permanence decrement.
:returns: (float) The permanence decrement.
"""
return self.permanenceDecrement
def setPermanenceDecrement(self, permanenceDecrement):
"""
Sets the permanence decrement.
:param permanenceDecrement: (float) The permanence decrement.
"""
self.permanenceDecrement = permanenceDecrement
def getPredictedSegmentDecrement(self):
"""
Get the predicted segment decrement.
:returns: (float) The predicted segment decrement.
"""
return self.predictedSegmentDecrement
def setPredictedSegmentDecrement(self, predictedSegmentDecrement):
"""
Sets the predicted segment decrement.
:param predictedSegmentDecrement: (float) The predicted segment decrement.
"""
self.predictedSegmentDecrement = predictedSegmentDecrement
def getConnectedPermanence(self):
"""
Get the connected permanence.
:returns: (float) The connected permanence.
"""
return self.connectedPermanence
def setConnectedPermanence(self, connectedPermanence):
"""
Sets the connected permanence.
:param connectedPermanence: (float) The connected permanence.
"""
self.connectedPermanence = connectedPermanence
def getMaxSegmentsPerCell(self):
"""
Get the maximum number of segments per cell
:returns: (int) max number of segments per cell
"""
return self.maxSegmentsPerCell
def getMaxSynapsesPerSegment(self):
"""
Get the maximum number of synapses per segment.
:returns: (int) max number of synapses per segment
"""
return self.maxSynapsesPerSegment
def write(self, proto):
"""
Writes serialized data to proto object.
:param proto: (DynamicStructBuilder) Proto object
"""
# capnp fails to save a tuple. Let's force columnDimensions to list.
proto.columnDimensions = list(self.columnDimensions)
proto.cellsPerColumn = self.cellsPerColumn
proto.activationThreshold = self.activationThreshold
proto.initialPermanence = round(self.initialPermanence, EPSILON_ROUND)
proto.connectedPermanence = round(self.connectedPermanence, EPSILON_ROUND)
proto.minThreshold = self.minThreshold
proto.maxNewSynapseCount = self.maxNewSynapseCount
proto.permanenceIncrement = round(self.permanenceIncrement, EPSILON_ROUND)
proto.permanenceDecrement = round(self.permanenceDecrement, EPSILON_ROUND)
proto.predictedSegmentDecrement = self.predictedSegmentDecrement
proto.maxSegmentsPerCell = self.maxSegmentsPerCell
proto.maxSynapsesPerSegment = self.maxSynapsesPerSegment
self.connections.write(proto.connections)
self._random.write(proto.random)
proto.activeCells = list(self.activeCells)
proto.winnerCells = list(self.winnerCells)
protoActiveSegments = proto.init("activeSegments", len(self.activeSegments))
for i, segment in enumerate(self.activeSegments):
protoActiveSegments[i].cell = segment.cell
idx = self.connections.segmentsForCell(segment.cell).index(segment)
protoActiveSegments[i].idxOnCell = idx
protoMatchingSegments = proto.init("matchingSegments",
len(self.matchingSegments))
for i, segment in enumerate(self.matchingSegments):
protoMatchingSegments[i].cell = segment.cell
idx = self.connections.segmentsForCell(segment.cell).index(segment)
protoMatchingSegments[i].idxOnCell = idx
protoNumActivePotential = proto.init(
"numActivePotentialSynapsesForSegment",
len(self.numActivePotentialSynapsesForSegment))
for i, numActivePotentialSynapses in enumerate(
self.numActivePotentialSynapsesForSegment):
segment = self.connections.segmentForFlatIdx(i)
if segment is not None:
protoNumActivePotential[i].cell = segment.cell
idx = self.connections.segmentsForCell(segment.cell).index(segment)
protoNumActivePotential[i].idxOnCell = idx
protoNumActivePotential[i].number = numActivePotentialSynapses
proto.iteration = self.iteration
protoLastUsedIteration = proto.init(
"lastUsedIterationForSegment",
len(self.numActivePotentialSynapsesForSegment))
for i, lastUsed in enumerate(self.lastUsedIterationForSegment):
segment = self.connections.segmentForFlatIdx(i)
if segment is not None:
protoLastUsedIteration[i].cell = segment.cell
idx = self.connections.segmentsForCell(segment.cell).index(segment)
protoLastUsedIteration[i].idxOnCell = idx
protoLastUsedIteration[i].number = lastUsed
@classmethod
def getSchema(cls):
return TemporalMemoryProto
@classmethod
def read(cls, proto):
"""
Reads deserialized data from proto object.
:param proto: (DynamicStructBuilder) Proto object
:returns: (:class:TemporalMemory) TemporalMemory instance
"""
tm = object.__new__(cls)
# capnp fails to save a tuple, so proto.columnDimensions was forced to
# serialize as a list. We prefer a tuple, however, because columnDimensions
# should be regarded as immutable.
tm.columnDimensions = tuple(proto.columnDimensions)
tm.cellsPerColumn = int(proto.cellsPerColumn)
tm.activationThreshold = int(proto.activationThreshold)
tm.initialPermanence = round(proto.initialPermanence, EPSILON_ROUND)
tm.connectedPermanence = round(proto.connectedPermanence, EPSILON_ROUND)
tm.minThreshold = int(proto.minThreshold)
tm.maxNewSynapseCount = int(proto.maxNewSynapseCount)
tm.permanenceIncrement = round(proto.permanenceIncrement, EPSILON_ROUND)
tm.permanenceDecrement = round(proto.permanenceDecrement, EPSILON_ROUND)
tm.predictedSegmentDecrement = round(proto.predictedSegmentDecrement,
EPSILON_ROUND)
tm.maxSegmentsPerCell = int(proto.maxSegmentsPerCell)
tm.maxSynapsesPerSegment = int(proto.maxSynapsesPerSegment)
tm.connections = Connections.read(proto.connections)
#pylint: disable=W0212
tm._random = Random()
tm._random.read(proto.random)
#pylint: enable=W0212
tm.activeCells = [int(x) for x in proto.activeCells]
tm.winnerCells = [int(x) for x in proto.winnerCells]
flatListLength = tm.connections.segmentFlatListLength()
tm.numActiveConnectedSynapsesForSegment = [0] * flatListLength
tm.numActivePotentialSynapsesForSegment = [0] * flatListLength
tm.lastUsedIterationForSegment = [0] * flatListLength
tm.activeSegments = []
tm.matchingSegments = []
for protoSegment in proto.activeSegments:
tm.activeSegments.append(
tm.connections.getSegment(protoSegment.cell,
protoSegment.idxOnCell))
for protoSegment in proto.matchingSegments:
tm.matchingSegments.append(
tm.connections.getSegment(protoSegment.cell,
protoSegment.idxOnCell))
for protoSegment in proto.numActivePotentialSynapsesForSegment:
segment = tm.connections.getSegment(protoSegment.cell,
protoSegment.idxOnCell)
tm.numActivePotentialSynapsesForSegment[segment.flatIdx] = (
int(protoSegment.number))
tm.iteration = long(proto.iteration)
for protoSegment in proto.lastUsedIterationForSegment:
segment = tm.connections.getSegment(protoSegment.cell,
protoSegment.idxOnCell)
tm.lastUsedIterationForSegment[segment.flatIdx] = (
long(protoSegment.number))
return tm
def __eq__(self, other):
"""
Non-equality operator for TemporalMemory instances.
Checks if two instances are functionally identical
(might have different internal state).
:param other: (TemporalMemory) TemporalMemory instance to compare to
"""
if self.columnDimensions != other.columnDimensions:
return False
if self.cellsPerColumn != other.cellsPerColumn:
return False
if self.activationThreshold != other.activationThreshold:
return False
if abs(self.initialPermanence - other.initialPermanence) > EPSILON:
return False
if abs(self.connectedPermanence - other.connectedPermanence) > EPSILON:
return False
if self.minThreshold != other.minThreshold:
return False
if self.maxNewSynapseCount != other.maxNewSynapseCount:
return False
if abs(self.permanenceIncrement - other.permanenceIncrement) > EPSILON:
return False
if abs(self.permanenceDecrement - other.permanenceDecrement) > EPSILON:
return False
if abs(self.predictedSegmentDecrement -
other.predictedSegmentDecrement) > EPSILON:
return False
if self.connections != other.connections:
return False
if self.activeCells != other.activeCells:
return False
if self.winnerCells != other.winnerCells:
return False
if self.matchingSegments != other.matchingSegments:
return False
if self.activeSegments != other.activeSegments:
return False
return True
def __ne__(self, other):
"""
Non-equality operator for TemporalMemory instances.
Checks if two instances are not functionally identical
(might have different internal state).
:param other: (TemporalMemory) TemporalMemory instance to compare to
"""
return not self.__eq__(other)
def _validateColumn(self, column):
"""
Raises an error if column index is invalid.
:param column: (int) Column index
"""
if column >= self.numberOfColumns() or column < 0:
raise IndexError("Invalid column")
def _validateCell(self, cell):
"""
Raises an error if cell index is invalid.
:param cell: (int) Cell index
"""
if cell >= self.numberOfCells() or cell < 0:
raise IndexError("Invalid cell")
@classmethod
def getCellIndices(cls, cells):
"""
Returns the indices of the cells passed in.
:param cells: (list) cells to find the indices of
"""
return [cls.getCellIndex(c) for c in cells]
@staticmethod
def getCellIndex(cell):
"""
Returns the index of the cell.
:param cell: (int) cell to find the index of
"""
return cell
| 44,590 | Python | .py | 1,030 | 35.646602 | 81 | 0.696696 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,922 | anomaly.py | numenta_nupic-legacy/src/nupic/algorithms/anomaly.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014-2016, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Anomaly-related algorithms."""
import numpy
from nupic.algorithms.anomaly_likelihood import AnomalyLikelihood
from nupic.utils import MovingAverage
def computeRawAnomalyScore(activeColumns, prevPredictedColumns):
"""Computes the raw anomaly score.
The raw anomaly score is the fraction of active columns not predicted.
:param activeColumns: array of active column indices
:param prevPredictedColumns: array of columns indices predicted in prev step
:returns: anomaly score 0..1 (float)
"""
nActiveColumns = len(activeColumns)
if nActiveColumns > 0:
# Test whether each element of a 1-D array is also present in a second
# array. Sum to get the total # of columns that are active and were
# predicted.
score = numpy.in1d(activeColumns, prevPredictedColumns).sum()
# Get the percent of active columns that were NOT predicted, that is
# our anomaly score.
score = (nActiveColumns - score) / float(nActiveColumns)
else:
# There are no active columns.
score = 0.0
return score
class Anomaly(object):
"""Utility class for generating anomaly scores in different ways.
:param slidingWindowSize: [optional] - how many elements are summed up;
enables moving average on final anomaly score; int >= 0
:param mode: (string) [optional] how to compute anomaly, one of:
- :const:`nupic.algorithms.anomaly.Anomaly.MODE_PURE`
- :const:`nupic.algorithms.anomaly.Anomaly.MODE_LIKELIHOOD`
- :const:`nupic.algorithms.anomaly.Anomaly.MODE_WEIGHTED`
:param binaryAnomalyThreshold: [optional] if set [0,1] anomaly score
will be discretized to 1/0 (1 if >= binaryAnomalyThreshold)
The transformation is applied after moving average is computed.
"""
# anomaly modes supported
MODE_PURE = "pure"
"""
Default mode. The raw anomaly score as computed by
:func:`~.anomaly_likelihood.computeRawAnomalyScore`
"""
MODE_LIKELIHOOD = "likelihood"
"""
Uses the :class:`~.anomaly_likelihood.AnomalyLikelihood` class, which models
probability of receiving this value and anomalyScore
"""
MODE_WEIGHTED = "weighted"
"""
Multiplies the likelihood result with the raw anomaly score that was used to
generate the likelihood (anomaly * likelihood)
"""
_supportedModes = (MODE_PURE, MODE_LIKELIHOOD, MODE_WEIGHTED)
def __init__(self,
slidingWindowSize=None,
mode=MODE_PURE,
binaryAnomalyThreshold=None):
self._mode = mode
if slidingWindowSize is not None:
self._movingAverage = MovingAverage(windowSize=slidingWindowSize)
else:
self._movingAverage = None
if (self._mode == Anomaly.MODE_LIKELIHOOD or
self._mode == Anomaly.MODE_WEIGHTED):
self._likelihood = AnomalyLikelihood() # probabilistic anomaly
else:
self._likelihood = None
if not self._mode in self._supportedModes:
raise ValueError("Invalid anomaly mode; only supported modes are: "
"Anomaly.MODE_PURE, Anomaly.MODE_LIKELIHOOD, "
"Anomaly.MODE_WEIGHTED; you used: %r" % self._mode)
self._binaryThreshold = binaryAnomalyThreshold
if binaryAnomalyThreshold is not None and (
not isinstance(binaryAnomalyThreshold, float) or
binaryAnomalyThreshold >= 1.0 or
binaryAnomalyThreshold <= 0.0 ):
raise ValueError("Anomaly: binaryAnomalyThreshold must be from (0,1) "
"or None if disabled.")
def compute(self, activeColumns, predictedColumns,
inputValue=None, timestamp=None):
"""Compute the anomaly score as the percent of active columns not predicted.
:param activeColumns: array of active column indices
:param predictedColumns: array of columns indices predicted in this step
(used for anomaly in step T+1)
:param inputValue: (optional) value of current input to encoders
(eg "cat" for category encoder)
(used in anomaly-likelihood)
:param timestamp: (optional) date timestamp when the sample occured
(used in anomaly-likelihood)
:returns: the computed anomaly score; float 0..1
"""
# Start by computing the raw anomaly score.
anomalyScore = computeRawAnomalyScore(activeColumns, predictedColumns)
# Compute final anomaly based on selected mode.
if self._mode == Anomaly.MODE_PURE:
score = anomalyScore
elif self._mode == Anomaly.MODE_LIKELIHOOD:
if inputValue is None:
raise ValueError("Selected anomaly mode 'Anomaly.MODE_LIKELIHOOD' "
"requires 'inputValue' as parameter to compute() method. ")
probability = self._likelihood.anomalyProbability(
inputValue, anomalyScore, timestamp)
# low likelihood -> hi anomaly
score = 1 - probability
elif self._mode == Anomaly.MODE_WEIGHTED:
probability = self._likelihood.anomalyProbability(
inputValue, anomalyScore, timestamp)
score = anomalyScore * (1 - probability)
# Last, do moving-average if windowSize was specified.
if self._movingAverage is not None:
score = self._movingAverage.next(score)
# apply binary discretization if required
if self._binaryThreshold is not None:
if score >= self._binaryThreshold:
score = 1.0
else:
score = 0.0
return score
def __str__(self):
windowSize = 0
if self._movingAverage is not None:
windowSize = self._movingAverage.windowSize
return "Anomaly:\tmode=%s\twindowSize=%r" % (self._mode, windowSize)
def __eq__(self, other):
return (isinstance(other, Anomaly) and
other._mode == self._mode and
other._binaryThreshold == self._binaryThreshold and
other._movingAverage == self._movingAverage and
other._likelihood == self._likelihood)
def __setstate__(self, state):
"""deserialization"""
self.__dict__.update(state)
if not hasattr(self, '_mode'):
self._mode = Anomaly.MODE_PURE
if not hasattr(self, '_movingAverage'):
self._movingAverage = None
if not hasattr(self, '_binaryThreshold'):
self._binaryThreshold = None
| 7,246 | Python | .py | 158 | 39.468354 | 80 | 0.687048 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,923 | spatial_pooler.py | numenta_nupic-legacy/src/nupic/algorithms/spatial_pooler.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
try:
import capnp
except ImportError:
capnp = None
import numpy
from nupic.bindings.math import (SM32 as SparseMatrix,
SM_01_32_32 as SparseBinaryMatrix,
GetNTAReal,
Random as NupicRandom)
if capnp:
from nupic.proto.SpatialPoolerProto_capnp import SpatialPoolerProto
from nupic.math import topology
from nupic.serializable import Serializable
realDType = GetNTAReal()
uintType = "uint32"
VERSION = 3
PERMANENCE_EPSILON = 0.000001
EPSILON_ROUND = 5
class InvalidSPParamValueError(ValueError):
"""The user passed an invalid value for a SpatialPooler parameter"""
pass
class _SparseMatrixCorticalColumnAdapter(object):
""" Many functions in SpatialPooler operate on a columnIndex but use an
underlying storage implementation based on a Sparse Matrix in which cortical
columns are represented as rows. This can be confusing to someone trying to
follow the algorithm, confusing terminology between matrix math and cortical
columns. This class is provided to abstract away some of the details of the
underlying implementation, providing a cleaner API that isn't specific to
sparse matrices.
"""
def __getitem__(self, columnIndex):
""" Wraps getRow() such that instances may be indexed by columnIndex."""
return super(_SparseMatrixCorticalColumnAdapter, self).getRow(columnIndex)
def replace(self, columnIndex, bitmap):
""" Wraps replaceSparseRow()"""
return super(_SparseMatrixCorticalColumnAdapter, self).replaceSparseRow(
columnIndex, bitmap
)
def update(self, columnIndex, vector):
""" Wraps setRowFromDense()"""
return super(_SparseMatrixCorticalColumnAdapter, self).setRowFromDense(
columnIndex, vector
)
class CorticalColumns(_SparseMatrixCorticalColumnAdapter, SparseMatrix):
""" SparseMatrix variant of _SparseMatrixCorticalColumnAdapter. Use in cases
where column connections are represented as float values, such as permanence
values
"""
pass
class BinaryCorticalColumns(_SparseMatrixCorticalColumnAdapter,
SparseBinaryMatrix):
""" SparseBinaryMatrix variant of _SparseMatrixCorticalColumnAdapter. Use in
cases where column connections are represented as bitmaps.
"""
pass
class SpatialPooler(Serializable):
"""
This class implements the spatial pooler. It is in charge of handling the
relationships between the columns of a region and the inputs bits. The
primary public interface to this function is the "compute" method, which
takes in an input vector and returns a list of activeColumns columns.
.. code-block:: python
sp = SpatialPooler(...)
for line in file:
inputVector = numpy.array(line)
sp.compute(inputVector)
...
:param inputDimensions: (iter)
A sequence representing the dimensions of the input vector. Format is
(height, width, depth, ...), where each value represents the size of the
dimension. For a topology of one dimension with 100 inputs use 100, or
(100,). For a two dimensional topology of 10x5 use (10,5).
Default ``(32, 32)``.
:param columnDimensions: (iter)
A sequence representing the dimensions of the columns in the region.
Format is (height, width, depth, ...), where each value represents the
size of the dimension. For a topology of one dimension with 2000 columns
use 2000, or (2000,). For a three dimensional topology of 32x64x16 use
(32, 64, 16). Default ``(64, 64)``.
:param potentialRadius: (int)
This parameter determines the extent of the input that each column can
potentially be connected to. This can be thought of as the input bits
that are visible to each column, or a 'receptiveField' of the field of
vision. A large enough value will result in 'global coverage', meaning
that each column can potentially be connected to every input bit. This
parameter defines a square (or hyper
square) area: a column will have a max square potential pool with sides of
length 2 * potentialRadius + 1. Default ``16``.
:param potentialPct: (float)
The percent of the inputs, within a column's potential radius, that a
column can be connected to. If set to 1, the column will be connected
to every input within its potential radius. This parameter is used to
give each column a unique potential pool when a large potentialRadius
causes overlap between the columns. At initialization time we choose
((2*potentialRadius + 1)^(# inputDimensions) * potentialPct) input bits
to comprise the column's potential pool. Default ``0.5``.
:param globalInhibition: (bool)
If true, then during inhibition phase the winning columns are selected
as the most active columns from the region as a whole. Otherwise, the
winning columns are selected with respect to their local neighborhoods.
Using global inhibition boosts performance x60. Default ``False``.
:param localAreaDensity: (float)
The desired density of active columns within a local inhibition area
(the size of which is set by the internally calculated inhibitionRadius,
which is in turn determined from the average size of the connected
potential pools of all columns). The inhibition logic will insure that
at most N columns remain ON within a local inhibition area, where
N = localAreaDensity * (total number of columns in inhibition area).
Default ``-1.0``.
:param numActiveColumnsPerInhArea: (float)
An alternate way to control the density of the active columns. If
numActiveColumnsPerInhArea is specified then localAreaDensity must be
less than 0, and vice versa. When using numActiveColumnsPerInhArea, the
inhibition logic will insure that at most 'numActiveColumnsPerInhArea'
columns remain ON within a local inhibition area (the size of which is
set by the internally calculated inhibitionRadius, which is in turn
determined from the average size of the connected receptive fields of all
columns). When using this method, as columns learn and grow their
effective receptive fields, the inhibitionRadius will grow, and hence the
net density of the active columns will *decrease*. This is in contrast to
the localAreaDensity method, which keeps the density of active columns
the same regardless of the size of their receptive fields. Default ``10.0``.
:param stimulusThreshold: (int)
This is a number specifying the minimum number of synapses that must be
on in order for a columns to turn ON. The purpose of this is to prevent
noise input from activating columns. Specified as a percent of a fully
grown synapse. Default ``0``.
:param synPermInactiveDec: (float)
The amount by which an inactive synapse is decremented in each round.
Specified as a percent of a fully grown synapse. Default ``0.008``.
:param synPermActiveInc: (float)
The amount by which an active synapse is incremented in each round.
Specified as a percent of a fully grown synapse. Default ``0.05``.
:param synPermConnected: (float)
The default connected threshold. Any synapse whose permanence value is
above the connected threshold is a "connected synapse", meaning it can
contribute to the cell's firing. Default ``0.1``.
:param minPctOverlapDutyCycle: (float)
A number between 0 and 1.0, used to set a floor on how often a column
should have at least stimulusThreshold active inputs. Periodically, each
column looks at the overlap duty cycle of all other columns within its
inhibition radius and sets its own internal minimal acceptable duty cycle
to: minPctDutyCycleBeforeInh * max(other columns' duty cycles). On each
iteration, any column whose overlap duty cycle falls below this computed
value will get all of its permanence values boosted up by
synPermActiveInc. Raising all permanences in response to a sub-par duty
cycle before inhibition allows a cell to search for new inputs when
either its previously learned inputs are no longer ever active, or when
the vast majority of them have been "hijacked" by other columns. Default
``0.001``.
:param dutyCyclePeriod: (int)
The period used to calculate duty cycles. Higher values make it take
longer to respond to changes in boost or synPerConnectedCell. Shorter
values make it more unstable and likely to oscillate. Default ``1000``.
:param boostStrength: (float)
A number greater or equal than 0.0, used to control the strength of
boosting. No boosting is applied if it is set to 0. Boosting strength
increases as a function of boostStrength. Boosting encourages columns to
have similar activeDutyCycles as their neighbors, which will lead to more
efficient use of columns. However, too much boosting may also lead to
instability of SP outputs. Default ``0.0``.
:param seed: (int)
Seed for our own pseudo-random number generator. Default ``-1``.
:param spVerbosity: (int)
spVerbosity level: 0, 1, 2, or 3. Default ``0``.
:param wrapAround: (bool)
Determines if inputs at the beginning and end of an input dimension should
be considered neighbors when mapping columns to inputs. Default ``True``.
"""
def __init__(self,
inputDimensions=(32, 32),
columnDimensions=(64, 64),
potentialRadius=16,
potentialPct=0.5,
globalInhibition=False,
localAreaDensity=-1.0,
numActiveColumnsPerInhArea=10.0,
stimulusThreshold=0,
synPermInactiveDec=0.008,
synPermActiveInc=0.05,
synPermConnected=0.10,
minPctOverlapDutyCycle=0.001,
dutyCyclePeriod=1000,
boostStrength=0.0,
seed=-1,
spVerbosity=0,
wrapAround=True
):
if (numActiveColumnsPerInhArea == 0 and
(localAreaDensity == 0 or localAreaDensity > 0.5)):
raise InvalidSPParamValueError("Inhibition parameters are invalid")
columnDimensions = numpy.array(columnDimensions, ndmin=1)
numColumns = columnDimensions.prod()
if not isinstance(numColumns, (int, long)) or numColumns <= 0:
raise InvalidSPParamValueError("Invalid number of columns ({})"
.format(repr(numColumns)))
inputDimensions = numpy.array(inputDimensions, ndmin=1)
numInputs = inputDimensions.prod()
if not isinstance(numInputs, (int, long)) or numInputs <= 0:
raise InvalidSPParamValueError("Invalid number of inputs ({}"
.format(repr(numInputs)))
if inputDimensions.size != columnDimensions.size:
raise InvalidSPParamValueError(
"Input dimensions must match column dimensions")
if boostStrength < 0.0:
raise InvalidSPParamValueError("boostStrength must be >= 0.0")
self._seed(seed)
self._numInputs = int(numInputs)
self._numColumns = int(numColumns)
self._columnDimensions = columnDimensions
self._inputDimensions = inputDimensions
self._potentialRadius = int(min(potentialRadius, numInputs))
self._potentialPct = potentialPct
self._globalInhibition = globalInhibition
self._numActiveColumnsPerInhArea = int(numActiveColumnsPerInhArea)
self._localAreaDensity = localAreaDensity
self._stimulusThreshold = stimulusThreshold
self._synPermInactiveDec = synPermInactiveDec
self._synPermActiveInc = synPermActiveInc
self._synPermBelowStimulusInc = synPermConnected / 10.0
self._synPermConnected = synPermConnected
self._minPctOverlapDutyCycles = minPctOverlapDutyCycle
self._dutyCyclePeriod = dutyCyclePeriod
self._boostStrength = boostStrength
self._spVerbosity = spVerbosity
self._wrapAround = wrapAround
self._synPermMin = 0.0
self._synPermMax = 1.0
self._synPermTrimThreshold = synPermActiveInc / 2.0
self._overlaps = numpy.zeros(self._numColumns, dtype=realDType)
self._boostedOverlaps = numpy.zeros(self._numColumns, dtype=realDType)
if self._synPermTrimThreshold >= self._synPermConnected:
raise InvalidSPParamValueError(
"synPermTrimThreshold ({}) must be less than synPermConnected ({})"
.format(repr(self._synPermTrimThreshold),
repr(self._synPermConnected)))
self._updatePeriod = 50
initConnectedPct = 0.5
self._version = VERSION
self._iterationNum = 0
self._iterationLearnNum = 0
# Store the set of all inputs within each columns potential pool as a
# single adjacency matrix such that matrix rows map to cortical columns,
# and matrix columns map to input buts. If potentialPools[i][j] == 1,
# then input bit 'j' is in column 'i's potential pool. A column can only be
# connected to inputs in its potential pool. Here, BinaryCorticalColumns
# is used to provide cortical column-centric semantics for what is
# otherwise a sparse binary matrix implementation. Sparse binary matrix is
# used as an optimization since a column will only be connected to a small
# fraction of input bits.
self._potentialPools = BinaryCorticalColumns(numInputs)
self._potentialPools.resize(numColumns, numInputs)
# Initialize the permanences for each column. Similar to the
# 'self._potentialPools', the permanences are stored in a matrix whose rows
# represent the cortical columns, and whose columns represent the input
# bits. If self._permanences[i][j] = 0.2, then the synapse connecting
# cortical column 'i' to input bit 'j' has a permanence of 0.2. Here,
# CorticalColumns is used to provide cortical column-centric semantics for
# what is otherwise a sparse matrix implementation. Sparse matrix is used
# as an optimization to improve computation time of alforithms that
# require iterating over the data structure. This permanence matrix is
# only allowed to have non-zero elements where the potential pool is
# non-zero.
self._permanences = CorticalColumns(numColumns, numInputs)
# Initialize a tiny random tie breaker. This is used to determine winning
# columns where the overlaps are identical.
self._tieBreaker = numpy.array([0.01 * self._random.getReal64() for i in
xrange(self._numColumns)],
dtype=realDType)
# 'self._connectedSynapses' is a similar matrix to 'self._permanences'
# (rows represent cortical columns, columns represent input bits) whose
# entries represent whether the cortical column is connected to the input
# bit, i.e. its permanence value is greater than 'synPermConnected'. While
# this information is readily available from the 'self._permanence' matrix,
# it is stored separately for efficiency purposes.
self._connectedSynapses = BinaryCorticalColumns(numInputs)
self._connectedSynapses.resize(numColumns, numInputs)
# Stores the number of connected synapses for each column. This is simply
# a sum of each row of 'self._connectedSynapses'. again, while this
# information is readily available from 'self._connectedSynapses', it is
# stored separately for efficiency purposes.
self._connectedCounts = numpy.zeros(numColumns, dtype=realDType)
# Initialize the set of permanence values for each column. Ensure that
# each column is connected to enough input bits to allow it to be
# activated.
for columnIndex in xrange(numColumns):
potential = self._mapPotential(columnIndex)
self._potentialPools.replace(columnIndex, potential.nonzero()[0])
perm = self._initPermanence(potential, initConnectedPct)
self._updatePermanencesForColumn(perm, columnIndex, raisePerm=True)
self._overlapDutyCycles = numpy.zeros(numColumns, dtype=realDType)
self._activeDutyCycles = numpy.zeros(numColumns, dtype=realDType)
self._minOverlapDutyCycles = numpy.zeros(numColumns,
dtype=realDType)
self._boostFactors = numpy.ones(numColumns, dtype=realDType)
# The inhibition radius determines the size of a column's local
# neighborhood. A cortical column must overcome the overlap score of
# columns in its neighborhood in order to become active. This radius is
# updated every learning round. It grows and shrinks with the average
# number of connected synapses per column.
self._inhibitionRadius = 0
self._updateInhibitionRadius()
if self._spVerbosity > 0:
self.printParameters()
def getColumnDimensions(self):
"""
:returns: (iter) the dimensions of the columns in the region
"""
return self._columnDimensions
def getInputDimensions(self):
"""
:returns: (iter) the dimensions of the input vector
"""
return self._inputDimensions
def getNumColumns(self):
"""
:returns: (int) the total number of columns
"""
return self._numColumns
def getNumInputs(self):
"""
:returns: (int) the total number of inputs.
"""
return self._numInputs
def getPotentialRadius(self):
"""
:returns: (float) the potential radius
"""
return self._potentialRadius
def setPotentialRadius(self, potentialRadius):
"""
:param potentialRadius: (float) value to set
"""
self._potentialRadius = potentialRadius
def getPotentialPct(self):
"""
:returns: (float) the potential percent
"""
return self._potentialPct
def setPotentialPct(self, potentialPct):
"""
:param potentialPct: (float) value to set
"""
self._potentialPct = potentialPct
def getGlobalInhibition(self):
"""
:returns: (bool) whether global inhibition is enabled.
"""
return self._globalInhibition
def setGlobalInhibition(self, globalInhibition):
"""
:param globalInhibition: (bool) value to set.
"""
self._globalInhibition = globalInhibition
def getNumActiveColumnsPerInhArea(self):
"""
:returns: (float) the number of active columns per inhibition area. Returns
a value less than 0 if parameter is unused.
"""
return self._numActiveColumnsPerInhArea
def setNumActiveColumnsPerInhArea(self, numActiveColumnsPerInhArea):
"""
Sets the number of active columns per inhibition area. Invalidates the
``localAreaDensity`` parameter
:param numActiveColumnsPerInhArea: (float) value to set
"""
assert(numActiveColumnsPerInhArea > 0)
self._numActiveColumnsPerInhArea = numActiveColumnsPerInhArea
self._localAreaDensity = 0
def getLocalAreaDensity(self):
"""
:returns: (float) the local area density. Returns a value less than 0 if
parameter is unused.
"""
return self._localAreaDensity
def setLocalAreaDensity(self, localAreaDensity):
"""
Sets the local area density. Invalidates the 'numActiveColumnsPerInhArea'
parameter
:param localAreaDensity: (float) value to set
"""
assert(localAreaDensity > 0 and localAreaDensity <= 1)
self._localAreaDensity = localAreaDensity
self._numActiveColumnsPerInhArea = 0
def getStimulusThreshold(self):
"""
:returns: (int) the stimulus threshold
"""
return self._stimulusThreshold
def setStimulusThreshold(self, stimulusThreshold):
"""
:param stimulusThreshold: (float) value to set.
"""
self._stimulusThreshold = stimulusThreshold
def getInhibitionRadius(self):
"""
:returns: (int) the inhibition radius
"""
return self._inhibitionRadius
def setInhibitionRadius(self, inhibitionRadius):
"""
:param inhibitionRadius: (int) value to set
"""
self._inhibitionRadius = inhibitionRadius
def getDutyCyclePeriod(self):
"""
:returns: (int) the duty cycle period
"""
return self._dutyCyclePeriod
def setDutyCyclePeriod(self, dutyCyclePeriod):
"""
:param dutyCyclePeriod: (int) value to set.
"""
self._dutyCyclePeriod = dutyCyclePeriod
def getBoostStrength(self):
"""
:returns: (float) the maximum boost value used.
"""
return self._boostStrength
def setBoostStrength(self, boostStrength):
"""
Sets the maximum boost value.
:param boostStrength: (float) value to set
"""
self._boostStrength = boostStrength
def getIterationNum(self):
"""
:returns: the iteration number"""
return self._iterationNum
def setIterationNum(self, iterationNum):
"""
:param iterationNum: (int) value to set
"""
self._iterationNum = iterationNum
def getIterationLearnNum(self):
"""
:returns: (int) The number of iterations that have been learned.
"""
return self._iterationLearnNum
def setIterationLearnNum(self, iterationLearnNum):
"""
:param iterationLearnNum: (int) value to set
"""
self._iterationLearnNum = iterationLearnNum
def getSpVerbosity(self):
"""
:returns: (int) the verbosity level, larger is more verbose.
"""
return self._spVerbosity
def setSpVerbosity(self, spVerbosity):
"""
:param spVerbosity: (int) value to set, larger is more verbose.
"""
self._spVerbosity = spVerbosity
def getUpdatePeriod(self):
"""
:returns: (int) The period at which active duty cycles are updated.
"""
return self._updatePeriod
def setUpdatePeriod(self, updatePeriod):
"""
:param updatePeriod: (int) The period at which active duty cycles are
updated.
"""
self._updatePeriod = updatePeriod
def getSynPermTrimThreshold(self):
"""
Sparsity is enforced by trimming out all permanence values below this value.
:returns: (float) the permanence trim threshold
"""
return self._synPermTrimThreshold
def setSynPermTrimThreshold(self, synPermTrimThreshold):
"""
Sparsity is enforced by trimming out all permanence values below this value.
:param synPermTrimThreshold: (float) the permanence trim threshold
"""
self._synPermTrimThreshold = synPermTrimThreshold
def getSynPermActiveInc(self):
"""
:returns: (float) the permanence increment amount for active synapses inputs
"""
return self._synPermActiveInc
def setSynPermActiveInc(self, synPermActiveInc):
"""
Sets the permanence increment amount for active synapses.
:param synPermActiveInc: (float) value to set.
"""
self._synPermActiveInc = synPermActiveInc
def getSynPermInactiveDec(self):
"""
:returns: (float) the permanence decrement amount for inactive synapses.
"""
return self._synPermInactiveDec
def setSynPermInactiveDec(self, synPermInactiveDec):
"""
Sets the permanence decrement amount for inactive synapses.
:param synPermInactiveDec: (float) value to set.
"""
self._synPermInactiveDec = synPermInactiveDec
def getSynPermBelowStimulusInc(self):
"""
:returns: (float) the permanence increment amount for columns that have not
been recently active.
"""
return self._synPermBelowStimulusInc
def setSynPermBelowStimulusInc(self, synPermBelowStimulusInc):
"""
Sets the permanence increment amount for columns that have not been
recently active.
:param synPermBelowStimulusInc: (float) value to set.
"""
self._synPermBelowStimulusInc = synPermBelowStimulusInc
def getSynPermConnected(self):
"""
:returns: (float) the permanence amount that qualifies a synapse as being
connected.
"""
return self._synPermConnected
def setSynPermConnected(self, synPermConnected):
"""
Sets the permanence amount that qualifies a synapse as being
connected.
:param synPermConnected: (float) value to set.
"""
self._synPermConnected = synPermConnected
def getMinPctOverlapDutyCycles(self):
"""
:returns: (float) the minimum tolerated overlaps, given as percent of
neighbors overlap score
"""
return self._minPctOverlapDutyCycles
def setMinPctOverlapDutyCycles(self, minPctOverlapDutyCycles):
"""
Sets the minimum tolerated activity duty cycle, given as percent of
neighbors' activity duty cycle.
:param minPctOverlapDutyCycles: (float) value to set.
"""
self._minPctOverlapDutyCycles = minPctOverlapDutyCycles
def getBoostFactors(self, boostFactors):
"""
Gets the boost factors for all columns. Input list will be overwritten.
:param boostFactors: (list) size must match number of columns.
"""
boostFactors[:] = self._boostFactors[:]
def setBoostFactors(self, boostFactors):
"""
Sets the boost factors for all columns. ``boostFactors`` size must match
the number of columns.
:param boostFactors: (iter) value to set."""
self._boostFactors[:] = boostFactors[:]
def getOverlapDutyCycles(self, overlapDutyCycles):
"""
Gets the overlap duty cycles for all columns. ``overlapDutyCycles``
size must match the number of columns.
:param overlapDutyCycles: (list) will be overwritten.
"""
overlapDutyCycles[:] = self._overlapDutyCycles[:]
def setOverlapDutyCycles(self, overlapDutyCycles):
"""
Sets the overlap duty cycles for all columns. ``overlapDutyCycles``
size must match the number of columns.
:param overlapDutyCycles: (list) value to set.
"""
self._overlapDutyCycles[:] = overlapDutyCycles
def getActiveDutyCycles(self, activeDutyCycles):
"""
Gets the activity duty cycles for all columns. Input list will be
overwritten.
:param activeDutyCycles: (list) size must match number of columns.
"""
activeDutyCycles[:] = self._activeDutyCycles[:]
def setActiveDutyCycles(self, activeDutyCycles):
"""
Sets the activity duty cycles for all columns. ``activeDutyCycles`` size
must match the number of columns.
:param activeDutyCycles: (list) value to set.
"""
self._activeDutyCycles[:] = activeDutyCycles
def getMinOverlapDutyCycles(self, minOverlapDutyCycles):
"""
:returns: (list) the minimum overlap duty cycles for all columns.
``minOverlapDutyCycles`` size must match the number of columns.
"""
minOverlapDutyCycles[:] = self._minOverlapDutyCycles[:]
def setMinOverlapDutyCycles(self, minOverlapDutyCycles):
"""
Sets the minimum overlap duty cycles for all columns.
``minOverlapDutyCycles`` size must match the number of columns.
:param minOverlapDutyCycles: (iter) value to set.
"""
self._minOverlapDutyCycles[:] = minOverlapDutyCycles[:]
def getPotential(self, columnIndex, potential):
"""
:param columnIndex: (int) column index to get potential for.
:param potential: (list) will be overwritten with column potentials. Must
match the number of inputs.
"""
assert(columnIndex < self._numColumns)
potential[:] = self._potentialPools[columnIndex]
def setPotential(self, columnIndex, potential):
"""
Sets the potential mapping for a given column. ``potential`` size must match
the number of inputs, and must be greater than ``stimulusThreshold``.
:param columnIndex: (int) column index to set potential for.
:param potential: (list) value to set.
"""
assert(columnIndex < self._numColumns)
potentialSparse = numpy.where(potential > 0)[0]
if len(potentialSparse) < self._stimulusThreshold:
raise Exception("This is likely due to a " +
"value of stimulusThreshold that is too large relative " +
"to the input size.")
self._potentialPools.replace(columnIndex, potentialSparse)
def getPermanence(self, columnIndex, permanence):
"""
Returns the permanence values for a given column. ``permanence`` size
must match the number of inputs.
:param columnIndex: (int) column index to get permanence for.
:param permanence: (list) will be overwritten with permanences.
"""
assert(columnIndex < self._numColumns)
permanence[:] = self._permanences[columnIndex]
def setPermanence(self, columnIndex, permanence):
"""
Sets the permanence values for a given column. ``permanence`` size must
match the number of inputs.
:param columnIndex: (int) column index to set permanence for.
:param permanence: (list) value to set.
"""
assert(columnIndex < self._numColumns)
self._updatePermanencesForColumn(permanence, columnIndex, raisePerm=False)
def getConnectedSynapses(self, columnIndex, connectedSynapses):
"""
:param connectedSynapses: (list) will be overwritten
:returns: (iter) the connected synapses for a given column.
``connectedSynapses`` size must match the number of inputs"""
assert(columnIndex < self._numColumns)
connectedSynapses[:] = self._connectedSynapses[columnIndex]
def getConnectedCounts(self, connectedCounts):
"""
:param connectedCounts: (list) will be overwritten
:returns: (int) the number of connected synapses for all columns.
``connectedCounts`` size must match the number of columns.
"""
connectedCounts[:] = self._connectedCounts[:]
def getOverlaps(self):
"""
:returns: (iter) the overlap score for each column.
"""
return self._overlaps
def getBoostedOverlaps(self):
"""
:returns: (list) the boosted overlap score for each column.
"""
return self._boostedOverlaps
def compute(self, inputVector, learn, activeArray):
"""
This is the primary public method of the SpatialPooler class. This
function takes a input vector and outputs the indices of the active columns.
If 'learn' is set to True, this method also updates the permanences of the
columns.
:param inputVector: A numpy array of 0's and 1's that comprises the input
to the spatial pooler. The array will be treated as a one dimensional
array, therefore the dimensions of the array do not have to match the
exact dimensions specified in the class constructor. In fact, even a
list would suffice. The number of input bits in the vector must,
however, match the number of bits specified by the call to the
constructor. Therefore there must be a '0' or '1' in the array for
every input bit.
:param learn: A boolean value indicating whether learning should be
performed. Learning entails updating the permanence values of the
synapses, and hence modifying the 'state' of the model. Setting
learning to 'off' freezes the SP and has many uses. For example, you
might want to feed in various inputs and examine the resulting SDR's.
:param activeArray: An array whose size is equal to the number of columns.
Before the function returns this array will be populated with 1's at
the indices of the active columns, and 0's everywhere else.
"""
if not isinstance(inputVector, numpy.ndarray):
raise TypeError("Input vector must be a numpy array, not %s" %
str(type(inputVector)))
if inputVector.size != self._numInputs:
raise ValueError(
"Input vector dimensions don't match. Expecting %s but got %s" % (
inputVector.size, self._numInputs))
self._updateBookeepingVars(learn)
inputVector = numpy.array(inputVector, dtype=realDType)
inputVector.reshape(-1)
self._overlaps = self._calculateOverlap(inputVector)
# Apply boosting when learning is on
if learn:
self._boostedOverlaps = self._boostFactors * self._overlaps
else:
self._boostedOverlaps = self._overlaps
# Apply inhibition to determine the winning columns
activeColumns = self._inhibitColumns(self._boostedOverlaps)
if learn:
self._adaptSynapses(inputVector, activeColumns)
self._updateDutyCycles(self._overlaps, activeColumns)
self._bumpUpWeakColumns()
self._updateBoostFactors()
if self._isUpdateRound():
self._updateInhibitionRadius()
self._updateMinDutyCycles()
activeArray.fill(0)
activeArray[activeColumns] = 1
def stripUnlearnedColumns(self, activeArray):
"""
Removes the set of columns who have never been active from the set of
active columns selected in the inhibition round. Such columns cannot
represent learned pattern and are therefore meaningless if only inference
is required. This should not be done when using a random, unlearned SP
since you would end up with no active columns.
:param activeArray: An array whose size is equal to the number of columns.
Any columns marked as active with an activeDutyCycle of 0 have
never been activated before and therefore are not active due to
learning. Any of these (unlearned) columns will be disabled (set to 0).
"""
neverLearned = numpy.where(self._activeDutyCycles == 0)[0]
activeArray[neverLearned] = 0
def _updateMinDutyCycles(self):
"""
Updates the minimum duty cycles defining normal activity for a column. A
column with activity duty cycle below this minimum threshold is boosted.
"""
if self._globalInhibition or self._inhibitionRadius > self._numInputs:
self._updateMinDutyCyclesGlobal()
else:
self._updateMinDutyCyclesLocal()
def _updateMinDutyCyclesGlobal(self):
"""
Updates the minimum duty cycles in a global fashion. Sets the minimum duty
cycles for the overlap all columns to be a percent of the maximum in the
region, specified by minPctOverlapDutyCycle. Functionality it is equivalent
to _updateMinDutyCyclesLocal, but this function exploits the globality of
the computation to perform it in a straightforward, and efficient manner.
"""
self._minOverlapDutyCycles.fill(
self._minPctOverlapDutyCycles * self._overlapDutyCycles.max()
)
def _updateMinDutyCyclesLocal(self):
"""
Updates the minimum duty cycles. The minimum duty cycles are determined
locally. Each column's minimum duty cycles are set to be a percent of the
maximum duty cycles in the column's neighborhood. Unlike
_updateMinDutyCyclesGlobal, here the values can be quite different for
different columns.
"""
for column in xrange(self._numColumns):
neighborhood = self._getColumnNeighborhood(column)
maxActiveDuty = self._activeDutyCycles[neighborhood].max()
maxOverlapDuty = self._overlapDutyCycles[neighborhood].max()
self._minOverlapDutyCycles[column] = (maxOverlapDuty *
self._minPctOverlapDutyCycles)
def _updateDutyCycles(self, overlaps, activeColumns):
"""
Updates the duty cycles for each column. The OVERLAP duty cycle is a moving
average of the number of inputs which overlapped with the each column. The
ACTIVITY duty cycles is a moving average of the frequency of activation for
each column.
Parameters:
----------------------------
:param overlaps:
An array containing the overlap score for each column.
The overlap score for a column is defined as the number
of synapses in a "connected state" (connected synapses)
that are connected to input bits which are turned on.
:param activeColumns:
An array containing the indices of the active columns,
the sparse set of columns which survived inhibition
"""
overlapArray = numpy.zeros(self._numColumns, dtype=realDType)
activeArray = numpy.zeros(self._numColumns, dtype=realDType)
overlapArray[overlaps > 0] = 1
activeArray[activeColumns] = 1
period = self._dutyCyclePeriod
if (period > self._iterationNum):
period = self._iterationNum
self._overlapDutyCycles = self._updateDutyCyclesHelper(
self._overlapDutyCycles,
overlapArray,
period
)
self._activeDutyCycles = self._updateDutyCyclesHelper(
self._activeDutyCycles,
activeArray,
period
)
def _updateInhibitionRadius(self):
"""
Update the inhibition radius. The inhibition radius is a measure of the
square (or hypersquare) of columns that each a column is "connected to"
on average. Since columns are are not connected to each other directly, we
determine this quantity by first figuring out how many *inputs* a column is
connected to, and then multiplying it by the total number of columns that
exist for each input. For multiple dimension the aforementioned
calculations are averaged over all dimensions of inputs and columns. This
value is meaningless if global inhibition is enabled.
"""
if self._globalInhibition:
self._inhibitionRadius = int(self._columnDimensions.max())
return
avgConnectedSpan = numpy.average(
[self._avgConnectedSpanForColumnND(i)
for i in xrange(self._numColumns)]
)
columnsPerInput = self._avgColumnsPerInput()
diameter = avgConnectedSpan * columnsPerInput
radius = (diameter - 1) / 2.0
radius = max(1.0, radius)
self._inhibitionRadius = int(radius + 0.5)
def _avgColumnsPerInput(self):
"""
The average number of columns per input, taking into account the topology
of the inputs and columns. This value is used to calculate the inhibition
radius. This function supports an arbitrary number of dimensions. If the
number of column dimensions does not match the number of input dimensions,
we treat the missing, or phantom dimensions as 'ones'.
"""
#TODO: extend to support different number of dimensions for inputs and
# columns
numDim = max(self._columnDimensions.size, self._inputDimensions.size)
colDim = numpy.ones(numDim)
colDim[:self._columnDimensions.size] = self._columnDimensions
inputDim = numpy.ones(numDim)
inputDim[:self._inputDimensions.size] = self._inputDimensions
columnsPerInput = colDim.astype(realDType) / inputDim
return numpy.average(columnsPerInput)
def _avgConnectedSpanForColumn1D(self, columnIndex):
"""
The range of connected synapses for column. This is used to
calculate the inhibition radius. This variation of the function only
supports a 1 dimensional column topology.
Parameters:
----------------------------
:param columnIndex: The index identifying a column in the permanence,
potential and connectivity matrices
"""
assert(self._inputDimensions.size == 1)
connected = self._connectedSynapses[columnIndex].nonzero()[0]
if connected.size == 0:
return 0
else:
return max(connected) - min(connected) + 1
def _avgConnectedSpanForColumn2D(self, columnIndex):
"""
The range of connectedSynapses per column, averaged for each dimension.
This value is used to calculate the inhibition radius. This variation of
the function only supports a 2 dimensional column topology.
Parameters:
----------------------------
:param columnIndex: The index identifying a column in the permanence,
potential and connectivity matrices
"""
assert(self._inputDimensions.size == 2)
connected = self._connectedSynapses[columnIndex]
(rows, cols) = connected.reshape(self._inputDimensions).nonzero()
if rows.size == 0 and cols.size == 0:
return 0
rowSpan = rows.max() - rows.min() + 1
colSpan = cols.max() - cols.min() + 1
return numpy.average([rowSpan, colSpan])
def _avgConnectedSpanForColumnND(self, columnIndex):
"""
The range of connectedSynapses per column, averaged for each dimension.
This value is used to calculate the inhibition radius. This variation of
the function supports arbitrary column dimensions.
Parameters:
----------------------------
:param index: The index identifying a column in the permanence, potential
and connectivity matrices.
"""
dimensions = self._inputDimensions
connected = self._connectedSynapses[columnIndex].nonzero()[0]
if connected.size == 0:
return 0
maxCoord = numpy.empty(self._inputDimensions.size)
minCoord = numpy.empty(self._inputDimensions.size)
maxCoord.fill(-1)
minCoord.fill(max(self._inputDimensions))
for i in connected:
maxCoord = numpy.maximum(maxCoord, numpy.unravel_index(i, dimensions))
minCoord = numpy.minimum(minCoord, numpy.unravel_index(i, dimensions))
return numpy.average(maxCoord - minCoord + 1)
def _adaptSynapses(self, inputVector, activeColumns):
"""
The primary method in charge of learning. Adapts the permanence values of
the synapses based on the input vector, and the chosen columns after
inhibition round. Permanence values are increased for synapses connected to
input bits that are turned on, and decreased for synapses connected to
inputs bits that are turned off.
Parameters:
----------------------------
:param inputVector:
A numpy array of 0's and 1's that comprises the input to
the spatial pooler. There exists an entry in the array
for every input bit.
:param activeColumns:
An array containing the indices of the columns that
survived inhibition.
"""
inputIndices = numpy.where(inputVector > 0)[0]
permChanges = numpy.zeros(self._numInputs, dtype=realDType)
permChanges.fill(-1 * self._synPermInactiveDec)
permChanges[inputIndices] = self._synPermActiveInc
for columnIndex in activeColumns:
perm = self._permanences[columnIndex]
maskPotential = numpy.where(self._potentialPools[columnIndex] > 0)[0]
perm[maskPotential] += permChanges[maskPotential]
self._updatePermanencesForColumn(perm, columnIndex, raisePerm=True)
def _bumpUpWeakColumns(self):
"""
This method increases the permanence values of synapses of columns whose
activity level has been too low. Such columns are identified by having an
overlap duty cycle that drops too much below those of their peers. The
permanence values for such columns are increased.
"""
weakColumns = numpy.where(self._overlapDutyCycles
< self._minOverlapDutyCycles)[0]
for columnIndex in weakColumns:
perm = self._permanences[columnIndex].astype(realDType)
maskPotential = numpy.where(self._potentialPools[columnIndex] > 0)[0]
perm[maskPotential] += self._synPermBelowStimulusInc
self._updatePermanencesForColumn(perm, columnIndex, raisePerm=False)
def _raisePermanenceToThreshold(self, perm, mask):
"""
This method ensures that each column has enough connections to input bits
to allow it to become active. Since a column must have at least
'self._stimulusThreshold' overlaps in order to be considered during the
inhibition phase, columns without such minimal number of connections, even
if all the input bits they are connected to turn on, have no chance of
obtaining the minimum threshold. For such columns, the permanence values
are increased until the minimum number of connections are formed.
Parameters:
----------------------------
:param perm: An array of permanence values for a column. The array is
"dense", i.e. it contains an entry for each input bit, even
if the permanence value is 0.
:param mask: the indices of the columns whose permanences need to be
raised.
"""
if len(mask) < self._stimulusThreshold:
raise Exception("This is likely due to a " +
"value of stimulusThreshold that is too large relative " +
"to the input size. [len(mask) < self._stimulusThreshold]")
numpy.clip(perm, self._synPermMin, self._synPermMax, out=perm)
while True:
numConnected = numpy.nonzero(
perm > self._synPermConnected - PERMANENCE_EPSILON)[0].size
if numConnected >= self._stimulusThreshold:
return
perm[mask] += self._synPermBelowStimulusInc
def _updatePermanencesForColumn(self, perm, columnIndex, raisePerm=True):
"""
This method updates the permanence matrix with a column's new permanence
values. The column is identified by its index, which reflects the row in
the matrix, and the permanence is given in 'dense' form, i.e. a full
array containing all the zeros as well as the non-zero values. It is in
charge of implementing 'clipping' - ensuring that the permanence values are
always between 0 and 1 - and 'trimming' - enforcing sparsity by zeroing out
all permanence values below '_synPermTrimThreshold'. It also maintains
the consistency between 'self._permanences' (the matrix storing the
permanence values), 'self._connectedSynapses', (the matrix storing the bits
each column is connected to), and 'self._connectedCounts' (an array storing
the number of input bits each column is connected to). Every method wishing
to modify the permanence matrix should do so through this method.
Parameters:
----------------------------
:param perm: An array of permanence values for a column. The array is
"dense", i.e. it contains an entry for each input bit, even
if the permanence value is 0.
:param index: The index identifying a column in the permanence, potential
and connectivity matrices
:param raisePerm: A boolean value indicating whether the permanence values
should be raised until a minimum number are synapses are in
a connected state. Should be set to 'false' when a direct
assignment is required.
"""
maskPotential = numpy.where(self._potentialPools[columnIndex] > 0)[0]
if raisePerm:
self._raisePermanenceToThreshold(perm, maskPotential)
perm[perm < self._synPermTrimThreshold] = 0
numpy.clip(perm, self._synPermMin, self._synPermMax, out=perm)
newConnected = numpy.where(perm >=
self._synPermConnected - PERMANENCE_EPSILON)[0]
self._permanences.update(columnIndex, perm)
self._connectedSynapses.replace(columnIndex, newConnected)
self._connectedCounts[columnIndex] = newConnected.size
def _initPermConnected(self):
"""
Returns a randomly generated permanence value for a synapses that is
initialized in a connected state. The basic idea here is to initialize
permanence values very close to synPermConnected so that a small number of
learning steps could make it disconnected or connected.
Note: experimentation was done a long time ago on the best way to initialize
permanence values, but the history for this particular scheme has been lost.
"""
p = self._synPermConnected + (
self._synPermMax - self._synPermConnected)*self._random.getReal64()
# Ensure we don't have too much unnecessary precision. A full 64 bits of
# precision causes numerical stability issues across platforms and across
# implementations
p = int(p*100000) / 100000.0
return p
def _initPermNonConnected(self):
"""
Returns a randomly generated permanence value for a synapses that is to be
initialized in a non-connected state.
"""
p = self._synPermConnected * self._random.getReal64()
# Ensure we don't have too much unnecessary precision. A full 64 bits of
# precision causes numerical stability issues across platforms and across
# implementations
p = int(p*100000) / 100000.0
return p
def _initPermanence(self, potential, connectedPct):
"""
Initializes the permanences of a column. The method
returns a 1-D array the size of the input, where each entry in the
array represents the initial permanence value between the input bit
at the particular index in the array, and the column represented by
the 'index' parameter.
Parameters:
----------------------------
:param potential: A numpy array specifying the potential pool of the column.
Permanence values will only be generated for input bits
corresponding to indices for which the mask value is 1.
:param connectedPct: A value between 0 or 1 governing the chance, for each
permanence, that the initial permanence value will
be a value that is considered connected.
"""
# Determine which inputs bits will start out as connected
# to the inputs. Initially a subset of the input bits in a
# column's potential pool will be connected. This number is
# given by the parameter "connectedPct"
perm = numpy.zeros(self._numInputs, dtype=realDType)
for i in xrange(self._numInputs):
if (potential[i] < 1):
continue
if (self._random.getReal64() <= connectedPct):
perm[i] = self._initPermConnected()
else:
perm[i] = self._initPermNonConnected()
# Clip off low values. Since we use a sparse representation
# to store the permanence values this helps reduce memory
# requirements.
perm[perm < self._synPermTrimThreshold] = 0
return perm
def _mapColumn(self, index):
"""
Maps a column to its respective input index, keeping to the topology of
the region. It takes the index of the column as an argument and determines
what is the index of the flattened input vector that is to be the center of
the column's potential pool. It distributes the columns over the inputs
uniformly. The return value is an integer representing the index of the
input bit. Examples of the expected output of this method:
* If the topology is one dimensional, and the column index is 0, this
method will return the input index 0. If the column index is 1, and there
are 3 columns over 7 inputs, this method will return the input index 3.
* If the topology is two dimensional, with column dimensions [3, 5] and
input dimensions [7, 11], and the column index is 3, the method
returns input index 8.
Parameters:
----------------------------
:param index: The index identifying a column in the permanence, potential
and connectivity matrices.
:param wrapAround: A boolean value indicating that boundaries should be
ignored.
"""
columnCoords = numpy.unravel_index(index, self._columnDimensions)
columnCoords = numpy.array(columnCoords, dtype=realDType)
ratios = columnCoords / self._columnDimensions
inputCoords = self._inputDimensions * ratios
inputCoords += 0.5 * self._inputDimensions / self._columnDimensions
inputCoords = inputCoords.astype(int)
inputIndex = numpy.ravel_multi_index(inputCoords, self._inputDimensions)
return inputIndex
def _mapPotential(self, index):
"""
Maps a column to its input bits. This method encapsulates the topology of
the region. It takes the index of the column as an argument and determines
what are the indices of the input vector that are located within the
column's potential pool. The return value is a list containing the indices
of the input bits. The current implementation of the base class only
supports a 1 dimensional topology of columns with a 1 dimensional topology
of inputs. To extend this class to support 2-D topology you will need to
override this method. Examples of the expected output of this method:
* If the potentialRadius is greater than or equal to the largest input
dimension then each column connects to all of the inputs.
* If the topology is one dimensional, the input space is divided up evenly
among the columns and each column is centered over its share of the
inputs. If the potentialRadius is 5, then each column connects to the
input it is centered above as well as the 5 inputs to the left of that
input and the five inputs to the right of that input, wrapping around if
wrapAround=True.
* If the topology is two dimensional, the input space is again divided up
evenly among the columns and each column is centered above its share of
the inputs. If the potentialRadius is 5, the column connects to a square
that has 11 inputs on a side and is centered on the input that the column
is centered above.
Parameters:
----------------------------
:param index: The index identifying a column in the permanence, potential
and connectivity matrices.
"""
centerInput = self._mapColumn(index)
columnInputs = self._getInputNeighborhood(centerInput).astype(uintType)
# Select a subset of the receptive field to serve as the
# the potential pool
numPotential = int(columnInputs.size * self._potentialPct + 0.5)
selectedInputs = numpy.empty(numPotential, dtype=uintType)
self._random.sample(columnInputs, selectedInputs)
potential = numpy.zeros(self._numInputs, dtype=uintType)
potential[selectedInputs] = 1
return potential
@staticmethod
def _updateDutyCyclesHelper(dutyCycles, newInput, period):
"""
Updates a duty cycle estimate with a new value. This is a helper
function that is used to update several duty cycle variables in
the Column class, such as: overlapDutyCucle, activeDutyCycle,
minPctDutyCycleBeforeInh, minPctDutyCycleAfterInh, etc. returns
the updated duty cycle. Duty cycles are updated according to the following
formula:
(period - 1)*dutyCycle + newValue
dutyCycle := ----------------------------------
period
Parameters:
----------------------------
:param dutyCycles: An array containing one or more duty cycle values that need
to be updated
:param newInput: A new numerical value used to update the duty cycle
:param period: The period of the duty cycle
"""
assert(period >= 1)
return (dutyCycles * (period -1.0) + newInput) / period
def _updateBoostFactors(self):
"""
Update the boost factors for all columns. The boost factors are used to
increase the overlap of inactive columns to improve their chances of
becoming active, and hence encourage participation of more columns in the
learning process. The boosting function is a curve defined as:
boostFactors = exp[ - boostStrength * (dutyCycle - targetDensity)]
Intuitively this means that columns that have been active at the target
activation level have a boost factor of 1, meaning their overlap is not
boosted. Columns whose active duty cycle drops too much below that of their
neighbors are boosted depending on how infrequently they have been active.
Columns that has been active more than the target activation level have
a boost factor below 1, meaning their overlap is suppressed
The boostFactor depends on the activeDutyCycle via an exponential function:
boostFactor
^
|
|\
| \
1 _ | \
| _
| _ _
| _ _ _ _
+--------------------> activeDutyCycle
|
targetDensity
"""
if self._globalInhibition:
self._updateBoostFactorsGlobal()
else:
self._updateBoostFactorsLocal()
def _updateBoostFactorsGlobal(self):
"""
Update boost factors when global inhibition is used
"""
# When global inhibition is enabled, the target activation level is
# the sparsity of the spatial pooler
if (self._localAreaDensity > 0):
targetDensity = self._localAreaDensity
else:
inhibitionArea = ((2 * self._inhibitionRadius + 1)
** self._columnDimensions.size)
inhibitionArea = min(self._numColumns, inhibitionArea)
targetDensity = float(self._numActiveColumnsPerInhArea) / inhibitionArea
targetDensity = min(targetDensity, 0.5)
self._boostFactors = numpy.exp(
(targetDensity - self._activeDutyCycles) * self._boostStrength)
def _updateBoostFactorsLocal(self):
"""
Update boost factors when local inhibition is used
"""
# Determine the target activation level for each column
# The targetDensity is the average activeDutyCycles of the neighboring
# columns of each column.
targetDensity = numpy.zeros(self._numColumns, dtype=realDType)
for i in xrange(self._numColumns):
maskNeighbors = self._getColumnNeighborhood(i)
targetDensity[i] = numpy.mean(self._activeDutyCycles[maskNeighbors])
self._boostFactors = numpy.exp(
(targetDensity - self._activeDutyCycles) * self._boostStrength)
def _updateBookeepingVars(self, learn):
"""
Updates counter instance variables each round.
Parameters:
----------------------------
:param learn: a boolean value indicating whether learning should be
performed. Learning entails updating the permanence
values of the synapses, and hence modifying the 'state'
of the model. setting learning to 'off' might be useful
for indicating separate training vs. testing sets.
"""
self._iterationNum += 1
if learn:
self._iterationLearnNum += 1
def _calculateOverlap(self, inputVector):
"""
This function determines each column's overlap with the current input
vector. The overlap of a column is the number of synapses for that column
that are connected (permanence value is greater than '_synPermConnected')
to input bits which are turned on. The implementation takes advantage of
the SparseBinaryMatrix class to perform this calculation efficiently.
Parameters:
----------------------------
:param inputVector: a numpy array of 0's and 1's that comprises the input to
the spatial pooler.
"""
overlaps = numpy.zeros(self._numColumns, dtype=realDType)
self._connectedSynapses.rightVecSumAtNZ_fast(inputVector.astype(realDType),
overlaps)
return overlaps
def _calculateOverlapPct(self, overlaps):
return overlaps.astype(realDType) / self._connectedCounts
def _inhibitColumns(self, overlaps):
"""
Performs inhibition. This method calculates the necessary values needed to
actually perform inhibition and then delegates the task of picking the
active columns to helper functions.
Parameters:
----------------------------
:param overlaps: an array containing the overlap score for each column.
The overlap score for a column is defined as the number
of synapses in a "connected state" (connected synapses)
that are connected to input bits which are turned on.
"""
# determine how many columns should be selected in the inhibition phase.
# This can be specified by either setting the 'numActiveColumnsPerInhArea'
# parameter or the 'localAreaDensity' parameter when initializing the class
if (self._localAreaDensity > 0):
density = self._localAreaDensity
else:
inhibitionArea = ((2*self._inhibitionRadius + 1)
** self._columnDimensions.size)
inhibitionArea = min(self._numColumns, inhibitionArea)
density = float(self._numActiveColumnsPerInhArea) / inhibitionArea
density = min(density, 0.5)
if self._globalInhibition or \
self._inhibitionRadius > max(self._columnDimensions):
return self._inhibitColumnsGlobal(overlaps, density)
else:
return self._inhibitColumnsLocal(overlaps, density)
def _inhibitColumnsGlobal(self, overlaps, density):
"""
Perform global inhibition. Performing global inhibition entails picking the
top 'numActive' columns with the highest overlap score in the entire
region. At most half of the columns in a local neighborhood are allowed to
be active. Columns with an overlap score below the 'stimulusThreshold' are
always inhibited.
:param overlaps: an array containing the overlap score for each column.
The overlap score for a column is defined as the number
of synapses in a "connected state" (connected synapses)
that are connected to input bits which are turned on.
:param density: The fraction of columns to survive inhibition.
@return list with indices of the winning columns
"""
#calculate num active per inhibition area
numActive = int(density * self._numColumns)
# Calculate winners using stable sort algorithm (mergesort)
# for compatibility with C++
sortedWinnerIndices = numpy.argsort(overlaps, kind='mergesort')
# Enforce the stimulus threshold
start = len(sortedWinnerIndices) - numActive
while start < len(sortedWinnerIndices):
i = sortedWinnerIndices[start]
if overlaps[i] >= self._stimulusThreshold:
break
else:
start += 1
return sortedWinnerIndices[start:][::-1]
def _inhibitColumnsLocal(self, overlaps, density):
"""
Performs local inhibition. Local inhibition is performed on a column by
column basis. Each column observes the overlaps of its neighbors and is
selected if its overlap score is within the top 'numActive' in its local
neighborhood. At most half of the columns in a local neighborhood are
allowed to be active. Columns with an overlap score below the
'stimulusThreshold' are always inhibited.
:param overlaps: an array containing the overlap score for each column.
The overlap score for a column is defined as the number
of synapses in a "connected state" (connected synapses)
that are connected to input bits which are turned on.
:param density: The fraction of columns to survive inhibition. This
value is only an intended target. Since the surviving
columns are picked in a local fashion, the exact fraction
of surviving columns is likely to vary.
@return list with indices of the winning columns
"""
activeArray = numpy.zeros(self._numColumns, dtype="bool")
for column, overlap in enumerate(overlaps):
if overlap >= self._stimulusThreshold:
neighborhood = self._getColumnNeighborhood(column)
neighborhoodOverlaps = overlaps[neighborhood]
numBigger = numpy.count_nonzero(neighborhoodOverlaps > overlap)
# When there is a tie, favor neighbors that are already selected as
# active.
ties = numpy.where(neighborhoodOverlaps == overlap)
tiedNeighbors = neighborhood[ties]
numTiesLost = numpy.count_nonzero(activeArray[tiedNeighbors])
numActive = int(0.5 + density * len(neighborhood))
if numBigger + numTiesLost < numActive:
activeArray[column] = True
return activeArray.nonzero()[0]
def _isUpdateRound(self):
"""
returns true if enough rounds have passed to warrant updates of
duty cycles
"""
return (self._iterationNum % self._updatePeriod) == 0
def _getColumnNeighborhood(self, centerColumn):
"""
Gets a neighborhood of columns.
Simply calls topology.neighborhood or topology.wrappingNeighborhood
A subclass can insert different topology behavior by overriding this method.
:param centerColumn (int)
The center of the neighborhood.
@returns (1D numpy array of integers)
The columns in the neighborhood.
"""
if self._wrapAround:
return topology.wrappingNeighborhood(centerColumn,
self._inhibitionRadius,
self._columnDimensions)
else:
return topology.neighborhood(centerColumn,
self._inhibitionRadius,
self._columnDimensions)
def _getInputNeighborhood(self, centerInput):
"""
Gets a neighborhood of inputs.
Simply calls topology.wrappingNeighborhood or topology.neighborhood.
A subclass can insert different topology behavior by overriding this method.
:param centerInput (int)
The center of the neighborhood.
@returns (1D numpy array of integers)
The inputs in the neighborhood.
"""
if self._wrapAround:
return topology.wrappingNeighborhood(centerInput,
self._potentialRadius,
self._inputDimensions)
else:
return topology.neighborhood(centerInput,
self._potentialRadius,
self._inputDimensions)
def _seed(self, seed=-1):
"""
Initialize the random seed
"""
if seed != -1:
self._random = NupicRandom(seed)
else:
self._random = NupicRandom()
def __setstate__(self, state):
"""
Initialize class properties from stored values.
"""
# original version was a float so check for anything less than 2
if state['_version'] < 2:
# the wrapAround property was added in version 2,
# in version 1 the wrapAround parameter was True for SP initialization
state['_wrapAround'] = True
if state['_version'] < 3:
# the overlaps and boostedOverlaps properties were added in version 3,
state['_overlaps'] = numpy.zeros(self._numColumns, dtype=realDType)
state['_boostedOverlaps'] = numpy.zeros(self._numColumns, dtype=realDType)
# update version property to current SP version
state['_version'] = VERSION
self.__dict__.update(state)
@classmethod
def getSchema(cls):
return SpatialPoolerProto
def write(self, proto):
self._random.write(proto.random)
proto.numInputs = self._numInputs
proto.numColumns = self._numColumns
cdimsProto = proto.init("columnDimensions", len(self._columnDimensions))
for i, dim in enumerate(self._columnDimensions):
cdimsProto[i] = int(dim)
idimsProto = proto.init("inputDimensions", len(self._inputDimensions))
for i, dim in enumerate(self._inputDimensions):
idimsProto[i] = int(dim)
proto.potentialRadius = self._potentialRadius
proto.potentialPct = self._potentialPct
proto.inhibitionRadius = self._inhibitionRadius
proto.globalInhibition = bool(self._globalInhibition)
proto.numActiveColumnsPerInhArea = self._numActiveColumnsPerInhArea
proto.localAreaDensity = self._localAreaDensity
proto.stimulusThreshold = self._stimulusThreshold
proto.synPermInactiveDec = self._synPermInactiveDec
proto.synPermActiveInc = self._synPermActiveInc
proto.synPermBelowStimulusInc = self._synPermBelowStimulusInc
proto.synPermConnected = self._synPermConnected
proto.minPctOverlapDutyCycles = self._minPctOverlapDutyCycles
proto.dutyCyclePeriod = self._dutyCyclePeriod
proto.boostStrength = self._boostStrength
proto.wrapAround = self._wrapAround
proto.spVerbosity = self._spVerbosity
proto.synPermMin = self._synPermMin
proto.synPermMax = self._synPermMax
proto.synPermTrimThreshold = self._synPermTrimThreshold
proto.updatePeriod = self._updatePeriod
proto.version = self._version
proto.iterationNum = self._iterationNum
proto.iterationLearnNum = self._iterationLearnNum
self._potentialPools.write(proto.potentialPools)
self._permanences.write(proto.permanences)
tieBreakersProto = proto.init("tieBreaker", len(self._tieBreaker))
for i, v in enumerate(self._tieBreaker):
tieBreakersProto[i] = float(v)
overlapDutyCyclesProto = proto.init("overlapDutyCycles",
len(self._overlapDutyCycles))
for i, v in enumerate(self._overlapDutyCycles):
overlapDutyCyclesProto[i] = float(v)
activeDutyCyclesProto = proto.init("activeDutyCycles",
len(self._activeDutyCycles))
for i, v in enumerate(self._activeDutyCycles):
activeDutyCyclesProto[i] = float(v)
minOverlapDutyCyclesProto = proto.init("minOverlapDutyCycles",
len(self._minOverlapDutyCycles))
for i, v in enumerate(self._minOverlapDutyCycles):
minOverlapDutyCyclesProto[i] = float(v)
boostFactorsProto = proto.init("boostFactors", len(self._boostFactors))
for i, v in enumerate(self._boostFactors):
boostFactorsProto[i] = float(v)
@classmethod
def read(cls, proto):
numInputs = int(proto.numInputs)
numColumns = int(proto.numColumns)
instance = cls.__new__(cls)
instance._random = NupicRandom()
instance._random.read(proto.random)
instance._numInputs = numInputs
instance._numColumns = numColumns
instance._columnDimensions = numpy.array(proto.columnDimensions)
instance._inputDimensions = numpy.array(proto.inputDimensions)
instance._potentialRadius = proto.potentialRadius
instance._potentialPct = round(proto.potentialPct,
EPSILON_ROUND)
instance._inhibitionRadius = proto.inhibitionRadius
instance._globalInhibition = proto.globalInhibition
instance._numActiveColumnsPerInhArea = proto.numActiveColumnsPerInhArea
instance._localAreaDensity = proto.localAreaDensity
instance._stimulusThreshold = proto.stimulusThreshold
instance._synPermInactiveDec = round(proto.synPermInactiveDec,
EPSILON_ROUND)
instance._synPermActiveInc = round(proto.synPermActiveInc, EPSILON_ROUND)
instance._synPermBelowStimulusInc = round(proto.synPermBelowStimulusInc,
EPSILON_ROUND)
instance._synPermConnected = round(proto.synPermConnected,
EPSILON_ROUND)
instance._minPctOverlapDutyCycles = round(proto.minPctOverlapDutyCycles,
EPSILON_ROUND)
instance._dutyCyclePeriod = proto.dutyCyclePeriod
instance._boostStrength = proto.boostStrength
instance._wrapAround = proto.wrapAround
instance._spVerbosity = proto.spVerbosity
instance._synPermMin = proto.synPermMin
instance._synPermMax = proto.synPermMax
instance._synPermTrimThreshold = round(proto.synPermTrimThreshold,
EPSILON_ROUND)
# TODO: These two overlaps attributes aren't currently saved.
instance._overlaps = numpy.zeros(numColumns, dtype=realDType)
instance._boostedOverlaps = numpy.zeros(numColumns, dtype=realDType)
instance._updatePeriod = proto.updatePeriod
instance._version = VERSION
instance._iterationNum = proto.iterationNum
instance._iterationLearnNum = proto.iterationLearnNum
instance._potentialPools = BinaryCorticalColumns(numInputs)
instance._potentialPools.resize(numColumns, numInputs)
instance._potentialPools.read(proto.potentialPools)
instance._permanences = CorticalColumns(numColumns, numInputs)
instance._permanences.read(proto.permanences)
# Initialize ephemerals and make sure they get updated
instance._connectedCounts = numpy.zeros(numColumns, dtype=realDType)
instance._connectedSynapses = BinaryCorticalColumns(numInputs)
instance._connectedSynapses.resize(numColumns, numInputs)
for columnIndex in xrange(proto.numColumns):
instance._updatePermanencesForColumn(
instance._permanences[columnIndex], columnIndex, False
)
instance._tieBreaker = numpy.array(proto.tieBreaker, dtype=realDType)
instance._overlapDutyCycles = numpy.array(proto.overlapDutyCycles,
dtype=realDType)
instance._activeDutyCycles = numpy.array(proto.activeDutyCycles,
dtype=realDType)
instance._minOverlapDutyCycles = numpy.array(proto.minOverlapDutyCycles,
dtype=realDType)
instance._boostFactors = numpy.array(proto.boostFactors, dtype=realDType)
return instance
def printParameters(self):
"""
Useful for debugging.
"""
print "------------PY SpatialPooler Parameters ------------------"
print "numInputs = ", self.getNumInputs()
print "numColumns = ", self.getNumColumns()
print "columnDimensions = ", self._columnDimensions
print "numActiveColumnsPerInhArea = ", self.getNumActiveColumnsPerInhArea()
print "potentialPct = ", self.getPotentialPct()
print "globalInhibition = ", self.getGlobalInhibition()
print "localAreaDensity = ", self.getLocalAreaDensity()
print "stimulusThreshold = ", self.getStimulusThreshold()
print "synPermActiveInc = ", self.getSynPermActiveInc()
print "synPermInactiveDec = ", self.getSynPermInactiveDec()
print "synPermConnected = ", self.getSynPermConnected()
print "minPctOverlapDutyCycle = ", self.getMinPctOverlapDutyCycles()
print "dutyCyclePeriod = ", self.getDutyCyclePeriod()
print "boostStrength = ", self.getBoostStrength()
print "spVerbosity = ", self.getSpVerbosity()
print "version = ", self._version
| 75,089 | Python | .py | 1,545 | 41.390291 | 82 | 0.702036 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,924 | trace.py | numenta_nupic-legacy/src/nupic/algorithms/monitor_mixin/trace.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Trace classes used in monitor mixin framework.
"""
import abc
import numpy
class Trace(object):
"""
A record of the past data the algorithm has seen, with an entry for each
iteration.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, monitor, title):
"""
@param monitor (MonitorMixinBase) Monitor Mixin instance that generated
this trace
@param title (string) Title
"""
self.monitor = monitor
self.title = title
self.data = []
def prettyPrintTitle(self):
return ("[{0}] {1}".format(self.monitor.mmName, self.title)
if self.monitor.mmName is not None else self.title)
@staticmethod
def prettyPrintDatum(datum):
"""
@param datum (object) Datum from `self.data` to pretty-print
@return (string) Pretty-printed datum
"""
return str(datum) if datum is not None else ""
class IndicesTrace(Trace):
"""
Each entry contains indices (for example of predicted => active cells).
"""
def makeCountsTrace(self):
"""
@return (CountsTrace) A new Trace made up of counts of this trace's indices.
"""
trace = CountsTrace(self.monitor, "# {0}".format(self.title))
trace.data = [len(indices) for indices in self.data]
return trace
def makeCumCountsTrace(self):
"""
@return (CountsTrace) A new Trace made up of cumulative counts of this
trace's indices.
"""
trace = CountsTrace(self.monitor, "# (cumulative) {0}".format(self.title))
countsTrace = self.makeCountsTrace()
def accumulate(iterator):
total = 0
for item in iterator:
total += item
yield total
trace.data = list(accumulate(countsTrace.data))
return trace
@staticmethod
def prettyPrintDatum(datum):
return str(sorted(list(datum)))
class BoolsTrace(Trace):
"""
Each entry contains bools (for example resets).
"""
pass
class CountsTrace(Trace):
"""
Each entry contains counts (for example # of predicted => active cells).
"""
pass
class StringsTrace(Trace):
"""
Each entry contains strings (for example sequence labels).
"""
pass
class MetricsTrace(Trace):
"""
Each entry contains Metrics (for example metric for # of predicted => active
cells).
"""
@staticmethod
def prettyPrintDatum(datum):
return ("min: {0:.2f}, max: {1:.2f}, sum: {2:.2f}, "
"mean: {3:.2f}, std dev: {4:.2f}").format(
datum.min, datum.max, datum.sum, datum.mean, datum.standardDeviation)
| 3,508 | Python | .py | 103 | 29.951456 | 80 | 0.665183 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,925 | temporal_memory_monitor_mixin.py | numenta_nupic-legacy/src/nupic/algorithms/monitor_mixin/temporal_memory_monitor_mixin.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Temporal Memory mixin that enables detailed monitoring of history.
"""
import copy
from collections import defaultdict
from nupic.algorithms.monitor_mixin.metric import Metric
from nupic.algorithms.monitor_mixin.monitor_mixin_base import MonitorMixinBase
from prettytable import PrettyTable
from nupic.algorithms.monitor_mixin.trace import (IndicesTrace, CountsTrace,
BoolsTrace, StringsTrace)
class TemporalMemoryMonitorMixin(MonitorMixinBase):
"""
Mixin for TemporalMemory that stores a detailed history, for inspection and
debugging.
"""
def __init__(self, *args, **kwargs):
super(TemporalMemoryMonitorMixin, self).__init__(*args, **kwargs)
self._mmResetActive = True # First iteration is always a reset
def mmGetTraceActiveColumns(self):
"""
@return (Trace) Trace of active columns
"""
return self._mmTraces["activeColumns"]
def mmGetTracePredictiveCells(self):
"""
@return (Trace) Trace of predictive cells
"""
return self._mmTraces["predictiveCells"]
def mmGetTraceNumSegments(self):
"""
@return (Trace) Trace of # segments
"""
return self._mmTraces["numSegments"]
def mmGetTraceNumSynapses(self):
"""
@return (Trace) Trace of # synapses
"""
return self._mmTraces["numSynapses"]
def mmGetTraceSequenceLabels(self):
"""
@return (Trace) Trace of sequence labels
"""
return self._mmTraces["sequenceLabels"]
def mmGetTraceResets(self):
"""
@return (Trace) Trace of resets
"""
return self._mmTraces["resets"]
def mmGetTracePredictedActiveCells(self):
"""
@return (Trace) Trace of predicted => active cells
"""
self._mmComputeTransitionTraces()
return self._mmTraces["predictedActiveCells"]
def mmGetTracePredictedInactiveCells(self):
"""
@return (Trace) Trace of predicted => inactive cells
"""
self._mmComputeTransitionTraces()
return self._mmTraces["predictedInactiveCells"]
def mmGetTracePredictedActiveColumns(self):
"""
@return (Trace) Trace of predicted => active columns
"""
self._mmComputeTransitionTraces()
return self._mmTraces["predictedActiveColumns"]
def mmGetTracePredictedInactiveColumns(self):
"""
@return (Trace) Trace of predicted => inactive columns
"""
self._mmComputeTransitionTraces()
return self._mmTraces["predictedInactiveColumns"]
def mmGetTraceUnpredictedActiveColumns(self):
"""
@return (Trace) Trace of unpredicted => active columns
"""
self._mmComputeTransitionTraces()
return self._mmTraces["unpredictedActiveColumns"]
def mmGetMetricFromTrace(self, trace):
"""
Convenience method to compute a metric over an indices trace, excluding
resets.
@param (IndicesTrace) Trace of indices
@return (Metric) Metric over trace excluding resets
"""
return Metric.createFromTrace(trace.makeCountsTrace(),
excludeResets=self.mmGetTraceResets())
def mmGetMetricSequencesPredictedActiveCellsPerColumn(self):
"""
Metric for number of predicted => active cells per column for each sequence
@return (Metric) metric
"""
self._mmComputeTransitionTraces()
numCellsPerColumn = []
for predictedActiveCells in (
self._mmData["predictedActiveCellsForSequence"].values()):
cellsForColumn = self.mapCellsToColumns(predictedActiveCells)
numCellsPerColumn += [len(x) for x in cellsForColumn.values()]
return Metric(self,
"# predicted => active cells per column for each sequence",
numCellsPerColumn)
def mmGetMetricSequencesPredictedActiveCellsShared(self):
"""
Metric for number of sequences each predicted => active cell appears in
Note: This metric is flawed when it comes to high-order sequences.
@return (Metric) metric
"""
self._mmComputeTransitionTraces()
numSequencesForCell = defaultdict(lambda: 0)
for predictedActiveCells in (
self._mmData["predictedActiveCellsForSequence"].values()):
for cell in predictedActiveCells:
numSequencesForCell[cell] += 1
return Metric(self,
"# sequences each predicted => active cells appears in",
numSequencesForCell.values())
def mmPrettyPrintConnections(self):
"""
Pretty print the connections in the temporal memory.
TODO: Use PrettyTable.
@return (string) Pretty-printed text
"""
text = ""
text += ("Segments: (format => "
"(#) [(source cell=permanence ...), ...]\n")
text += "------------------------------------\n"
columns = range(self.numberOfColumns())
for column in columns:
cells = self.cellsForColumn(column)
for cell in cells:
segmentDict = dict()
for seg in self.connections.segmentsForCell(cell):
synapseList = []
for synapse in self.connections.synapsesForSegment(seg):
synapseData = self.connections.dataForSynapse(synapse)
synapseList.append(
(synapseData.presynapticCell, synapseData.permanence))
synapseList.sort()
synapseStringList = ["{0:3}={1:.2f}".format(sourceCell, permanence) for
sourceCell, permanence in synapseList]
segmentDict[seg] = "({0})".format(" ".join(synapseStringList))
text += ("Column {0:3} / Cell {1:3}:\t({2}) {3}\n".format(
column, cell,
len(segmentDict.values()),
"[{0}]".format(", ".join(segmentDict.values()))))
if column < len(columns) - 1: # not last
text += "\n"
text += "------------------------------------\n"
return text
def mmPrettyPrintSequenceCellRepresentations(self, sortby="Column"):
"""
Pretty print the cell representations for sequences in the history.
@param sortby (string) Column of table to sort by
@return (string) Pretty-printed text
"""
self._mmComputeTransitionTraces()
table = PrettyTable(["Pattern", "Column", "predicted=>active cells"])
for sequenceLabel, predictedActiveCells in (
self._mmData["predictedActiveCellsForSequence"].iteritems()):
cellsForColumn = self.mapCellsToColumns(predictedActiveCells)
for column, cells in cellsForColumn.iteritems():
table.add_row([sequenceLabel, column, list(cells)])
return table.get_string(sortby=sortby).encode("utf-8")
# ==============================
# Helper methods
# ==============================
def _mmComputeTransitionTraces(self):
"""
Computes the transition traces, if necessary.
Transition traces are the following:
predicted => active cells
predicted => inactive cells
predicted => active columns
predicted => inactive columns
unpredicted => active columns
"""
if not self._mmTransitionTracesStale:
return
self._mmData["predictedActiveCellsForSequence"] = defaultdict(set)
self._mmTraces["predictedActiveCells"] = IndicesTrace(self,
"predicted => active cells (correct)")
self._mmTraces["predictedInactiveCells"] = IndicesTrace(self,
"predicted => inactive cells (extra)")
self._mmTraces["predictedActiveColumns"] = IndicesTrace(self,
"predicted => active columns (correct)")
self._mmTraces["predictedInactiveColumns"] = IndicesTrace(self,
"predicted => inactive columns (extra)")
self._mmTraces["unpredictedActiveColumns"] = IndicesTrace(self,
"unpredicted => active columns (bursting)")
predictedCellsTrace = self._mmTraces["predictedCells"]
for i, activeColumns in enumerate(self.mmGetTraceActiveColumns().data):
predictedActiveCells = set()
predictedInactiveCells = set()
predictedActiveColumns = set()
predictedInactiveColumns = set()
for predictedCell in predictedCellsTrace.data[i]:
predictedColumn = self.columnForCell(predictedCell)
if predictedColumn in activeColumns:
predictedActiveCells.add(predictedCell)
predictedActiveColumns.add(predictedColumn)
sequenceLabel = self.mmGetTraceSequenceLabels().data[i]
if sequenceLabel is not None:
self._mmData["predictedActiveCellsForSequence"][sequenceLabel].add(
predictedCell)
else:
predictedInactiveCells.add(predictedCell)
predictedInactiveColumns.add(predictedColumn)
unpredictedActiveColumns = activeColumns - predictedActiveColumns
self._mmTraces["predictedActiveCells"].data.append(predictedActiveCells)
self._mmTraces["predictedInactiveCells"].data.append(predictedInactiveCells)
self._mmTraces["predictedActiveColumns"].data.append(predictedActiveColumns)
self._mmTraces["predictedInactiveColumns"].data.append(
predictedInactiveColumns)
self._mmTraces["unpredictedActiveColumns"].data.append(
unpredictedActiveColumns)
self._mmTransitionTracesStale = False
# ==============================
# Overrides
# ==============================
def compute(self, activeColumns, sequenceLabel=None, **kwargs):
# Append last cycle's predictiveCells to *predicTEDCells* trace
self._mmTraces["predictedCells"].data.append(set(self.getPredictiveCells()))
super(TemporalMemoryMonitorMixin, self).compute(activeColumns, **kwargs)
# Append this cycle's predictiveCells to *predicTIVECells* trace
self._mmTraces["predictiveCells"].data.append(set(self.getPredictiveCells()))
self._mmTraces["activeCells"].data.append(set(self.getActiveCells()))
self._mmTraces["activeColumns"].data.append(activeColumns)
self._mmTraces["numSegments"].data.append(self.connections.numSegments())
self._mmTraces["numSynapses"].data.append(self.connections.numSynapses())
self._mmTraces["sequenceLabels"].data.append(sequenceLabel)
self._mmTraces["resets"].data.append(self._mmResetActive)
self._mmResetActive = False
self._mmTransitionTracesStale = True
def reset(self):
super(TemporalMemoryMonitorMixin, self).reset()
self._mmResetActive = True
def mmGetDefaultTraces(self, verbosity=1):
traces = [
self.mmGetTraceActiveColumns(),
self.mmGetTracePredictedActiveColumns(),
self.mmGetTracePredictedInactiveColumns(),
self.mmGetTraceUnpredictedActiveColumns(),
self.mmGetTracePredictedActiveCells(),
self.mmGetTracePredictedInactiveCells()
]
if verbosity == 1:
traces = [trace.makeCountsTrace() for trace in traces]
traces += [
self.mmGetTraceNumSegments(),
self.mmGetTraceNumSynapses()
]
return traces + [self.mmGetTraceSequenceLabels()]
def mmGetDefaultMetrics(self, verbosity=1):
resetsTrace = self.mmGetTraceResets()
return ([Metric.createFromTrace(trace, excludeResets=resetsTrace)
for trace in self.mmGetDefaultTraces()[:-3]] +
[Metric.createFromTrace(trace)
for trace in self.mmGetDefaultTraces()[-3:-1]] +
[self.mmGetMetricSequencesPredictedActiveCellsPerColumn(),
self.mmGetMetricSequencesPredictedActiveCellsShared()])
def mmClearHistory(self):
super(TemporalMemoryMonitorMixin, self).mmClearHistory()
self._mmTraces["predictedCells"] = IndicesTrace(self, "predicted cells")
self._mmTraces["activeColumns"] = IndicesTrace(self, "active columns")
self._mmTraces["activeCells"] = IndicesTrace(self, "active cells")
self._mmTraces["predictiveCells"] = IndicesTrace(self, "predictive cells")
self._mmTraces["numSegments"] = CountsTrace(self, "# segments")
self._mmTraces["numSynapses"] = CountsTrace(self, "# synapses")
self._mmTraces["sequenceLabels"] = StringsTrace(self, "sequence labels")
self._mmTraces["resets"] = BoolsTrace(self, "resets")
self._mmTransitionTracesStale = True
def mmGetCellActivityPlot(self, title="", showReset=False,
resetShading=0.25, activityType="activeCells"):
"""
Returns plot of the cell activity.
@param title (string) an optional title for the figure
@param showReset (bool) if true, the first set of cell activities
after a reset will have a gray background
@param resetShading (float) if showReset is true, this float specifies the
intensity of the reset background with 0.0
being white and 1.0 being black
@param activityType (string) The type of cell activity to display. Valid
types include "activeCells",
"predictiveCells", "predictedCells",
and "predictedActiveCells"
@return (Plot) plot
"""
if activityType == "predictedActiveCells":
self._mmComputeTransitionTraces()
cellTrace = copy.deepcopy(self._mmTraces[activityType].data)
for i in xrange(len(cellTrace)):
cellTrace[i] = self.getCellIndices(cellTrace[i])
return self.mmGetCellTracePlot(cellTrace, self.numberOfCells(),
activityType, title, showReset,
resetShading)
| 14,320 | Python | .py | 315 | 38.288889 | 82 | 0.680366 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,926 | monitor_mixin_base.py | numenta_nupic-legacy/src/nupic/algorithms/monitor_mixin/monitor_mixin_base.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
MonitorMixinBase class used in monitor mixin framework.
Using a monitor mixin with your algorithm
-----------------------------------------
1. Create a subclass of your algorithm class, with the first parent being the
corresponding Monitor class. For example,
class MonitoredTemporalMemory(TemporalMemoryMonitorMixin,
TemporalMemory): pass
2. Create an instance of the monitored class and use that.
instance = MonitoredTemporalMemory()
# Run data through instance
3. Now you can call the following methods to print monitored data from of your
instance:
- instance.mmPrettyPrintMetrics(instance.mmGetDefaultMetrics())
- instance.mmPrettyPrintTraces(instance.mmGetDefaultTraces())
Each specific monitor also has specific methods you can call to extract data
out of it.
Adding data to a monitor mixin
-----------------------------------------
1. Create a variable for the data you want to capture in your specific monitor's
`mmClearHistory` method. For example,
self._mmTraces["predictedCells"] = IndicesTrace(self, "predicted cells")
Make sure you use the correct type of trace for your data.
2. Add data to this trace in your algorithm's `compute` method (or anywhere
else).
self._mmTraces["predictedCells"].data.append(set(self.getPredictiveCells()))
3. You can optionally add this trace as a default trace in `mmGetDefaultTraces`,
or define a function to return that trace:
def mmGetTracePredictiveCells(self):
Any trace can be converted to a metric using the utility functions provided in
the framework (see `metric.py`).
Extending the functionality of the monitor mixin framework
-----------------------------------------
If you want to add new types of traces and metrics, add them to `trace.py`
and `metric.py`. You can also create new monitors by simply defining new classes
that inherit from MonitorMixinBase.
"""
import abc
import numpy
from prettytable import PrettyTable
from nupic.algorithms.monitor_mixin.plot import Plot
class MonitorMixinBase(object):
"""
Base class for MonitorMixin. Each subclass will be a mixin for a particular
algorithm.
All arguments, variables, and methods in monitor mixin classes should be
prefixed with "mm" (to avoid collision with the classes they mix in to).
"""
__metaclass__ = abc.ABCMeta
def __init__(self, *args, **kwargs):
"""
Note: If you set the kwarg "mmName", then pretty-printing of traces and
metrics will include the name you specify as a tag before every title.
"""
self.mmName = kwargs.get("mmName")
if "mmName" in kwargs:
del kwargs["mmName"]
super(MonitorMixinBase, self).__init__(*args, **kwargs)
# Mapping from key (string) => trace (Trace)
self._mmTraces = None
self._mmData = None
self.mmClearHistory()
def mmClearHistory(self):
"""
Clears the stored history.
"""
self._mmTraces = {}
self._mmData = {}
@staticmethod
def mmPrettyPrintTraces(traces, breakOnResets=None):
"""
Returns pretty-printed table of traces.
@param traces (list) Traces to print in table
@param breakOnResets (BoolsTrace) Trace of resets to break table on
@return (string) Pretty-printed table of traces.
"""
assert len(traces) > 0, "No traces found"
table = PrettyTable(["#"] + [trace.prettyPrintTitle() for trace in traces])
for i in xrange(len(traces[0].data)):
if breakOnResets and breakOnResets.data[i]:
table.add_row(["<reset>"] * (len(traces) + 1))
table.add_row([i] +
[trace.prettyPrintDatum(trace.data[i]) for trace in traces])
return table.get_string().encode("utf-8")
@staticmethod
def mmPrettyPrintMetrics(metrics, sigFigs=5):
"""
Returns pretty-printed table of metrics.
@param metrics (list) Traces to print in table
@param sigFigs (int) Number of significant figures to print
@return (string) Pretty-printed table of metrics.
"""
assert len(metrics) > 0, "No metrics found"
table = PrettyTable(["Metric", "mean", "standard deviation",
"min", "max", "sum", ])
for metric in metrics:
table.add_row([metric.prettyPrintTitle()] + metric.getStats())
return table.get_string().encode("utf-8")
def mmGetDefaultTraces(self, verbosity=1):
"""
Returns list of default traces. (To be overridden.)
@param verbosity (int) Verbosity level
@return (list) Default traces
"""
return []
def mmGetDefaultMetrics(self, verbosity=1):
"""
Returns list of default metrics. (To be overridden.)
@param verbosity (int) Verbosity level
@return (list) Default metrics
"""
return []
def mmGetCellTracePlot(self, cellTrace, cellCount, activityType, title="",
showReset=False, resetShading=0.25):
"""
Returns plot of the cell activity. Note that if many timesteps of
activities are input, matplotlib's image interpolation may omit activities
(columns in the image).
@param cellTrace (list) a temporally ordered list of sets of cell
activities
@param cellCount (int) number of cells in the space being rendered
@param activityType (string) type of cell activity being displayed
@param title (string) an optional title for the figure
@param showReset (bool) if true, the first set of cell activities
after a reset will have a grayscale background
@param resetShading (float) applicable if showReset is true, specifies the
intensity of the reset background with 0.0
being white and 1.0 being black
@return (Plot) plot
"""
plot = Plot(self, title)
resetTrace = self.mmGetTraceResets().data
data = numpy.zeros((cellCount, 1))
for i in xrange(len(cellTrace)):
# Set up a "background" vector that is shaded or blank
if showReset and resetTrace[i]:
activity = numpy.ones((cellCount, 1)) * resetShading
else:
activity = numpy.zeros((cellCount, 1))
activeIndices = cellTrace[i]
activity[list(activeIndices)] = 1
data = numpy.concatenate((data, activity), 1)
plot.add2DArray(data, xlabel="Time", ylabel=activityType, name=title)
return plot
| 7,351 | Python | .py | 164 | 39.573171 | 80 | 0.685008 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,927 | plot.py | numenta_nupic-legacy/src/nupic/algorithms/monitor_mixin/plot.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Plot class used in monitor mixin framework.
"""
import os
try:
# We import in here to avoid creating a matplotlib dependency in nupic.
import matplotlib.pyplot as plt
import matplotlib.cm as cm
except ImportError:
# Suppress this optional dependency on matplotlib. NOTE we don't log this,
# because python logging implicitly adds the StreamHandler to root logger when
# calling `logging.debug`, etc., which may undermine an application's logging
# configuration.
plt = None
cm = None
class Plot(object):
def __init__(self, monitor, title, show=True):
"""
@param monitor (MonitorMixinBase) Monitor Mixin instance that generated
this plot
@param title (string) Plot title
"""
self._monitor = monitor
self._title = title
self._fig = self._initFigure()
self._show = show
if self._show:
plt.ion()
plt.show()
def _initFigure(self):
fig = plt.figure()
fig.suptitle(self._prettyPrintTitle())
return fig
def _prettyPrintTitle(self):
if self._monitor.mmName is not None:
return "[{0}] {1}".format(self._monitor.mmName, self._title)
return self._title
def addGraph(self, data, position=111, xlabel=None, ylabel=None):
""" Adds a graph to the plot's figure.
@param data See matplotlib.Axes.plot documentation.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
"""
ax = self._addBase(position, xlabel=xlabel, ylabel=ylabel)
ax.plot(data)
plt.draw()
def addHistogram(self, data, position=111, xlabel=None, ylabel=None,
bins=None):
""" Adds a histogram to the plot's figure.
@param data See matplotlib.Axes.hist documentation.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
"""
ax = self._addBase(position, xlabel=xlabel, ylabel=ylabel)
ax.hist(data, bins=bins, color="green", alpha=0.8)
plt.draw()
def add2DArray(self, data, position=111, xlabel=None, ylabel=None, cmap=None,
aspect="auto", interpolation="nearest", name=None):
""" Adds an image to the plot's figure.
@param data a 2D array. See matplotlib.Axes.imshow documentation.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
@param cmap color map used in the rendering
@param aspect how aspect ratio is handled during resize
@param interpolation interpolation method
"""
if cmap is None:
# The default colormodel is an ugly blue-red model.
cmap = cm.Greys
ax = self._addBase(position, xlabel=xlabel, ylabel=ylabel)
ax.imshow(data, cmap=cmap, aspect=aspect, interpolation=interpolation)
if self._show:
plt.draw()
if name is not None:
if not os.path.exists("log"):
os.mkdir("log")
plt.savefig("log/{name}.png".format(name=name), bbox_inches="tight",
figsize=(8, 6), dpi=400)
def _addBase(self, position, xlabel=None, ylabel=None):
""" Adds a subplot to the plot's figure at specified position.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
@returns (matplotlib.Axes) Axes instance
"""
ax = self._fig.add_subplot(position)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax
| 5,229 | Python | .py | 120 | 38.158333 | 80 | 0.680843 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,928 | metric.py | numenta_nupic-legacy/src/nupic/algorithms/monitor_mixin/metric.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Metric class used in monitor mixin framework.
"""
import numpy
class Metric(object):
"""
A metric computed over a set of data (usually from a `CountsTrace`).
"""
def __init__(self, monitor, title, data):
"""
@param monitor (MonitorMixinBase) Monitor Mixin instance that generated
this trace
@param title (string) Title
@param data (list) List of numbers to compute metric from
"""
self.monitor = monitor
self.title = title
self.min = None
self.max = None
self.sum = None
self.mean = None
self.standardDeviation = None
self._computeStats(data)
@staticmethod
def createFromTrace(trace, excludeResets=None):
data = list(trace.data)
if excludeResets is not None:
data = [x for i, x in enumerate(trace.data) if not excludeResets.data[i]]
return Metric(trace.monitor, trace.title, data)
def copy(self):
metric = Metric(self.monitor, self.title, [])
metric.min = self.min
metric.max = self.max
metric.sum = self.sum
metric.mean = self.mean
metric.standardDeviation = self.standardDeviation
return metric
def prettyPrintTitle(self):
return ("[{0}] {1}".format(self.monitor.mmName, self.title)
if self.monitor.mmName is not None else self.title)
def _computeStats(self, data):
if not len(data):
return
self.min = min(data)
self.max = max(data)
self.sum = sum(data)
self.mean = numpy.mean(data)
self.standardDeviation = numpy.std(data)
def getStats(self, sigFigs=7):
if self.mean is None:
return [None, None, None, None, None]
return [round(self.mean, sigFigs),
round(self.standardDeviation, sigFigs),
round(self.min, sigFigs),
round(self.max, sigFigs),
round(self.sum, sigFigs)]
| 2,862 | Python | .py | 76 | 32.907895 | 79 | 0.654611 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,929 | configuration.py | numenta_nupic-legacy/src/nupic/support/configuration.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from nupic.support.configuration_custom import Configuration
| 1,039 | Python | .py | 21 | 48.380952 | 72 | 0.679134 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,930 | fs_helpers.py | numenta_nupic-legacy/src/nupic/support/fs_helpers.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This module contains file-system helper functions.
"""
import os
def makeDirectoryFromAbsolutePath(absDirPath):
""" Makes directory for the given directory path with default permissions.
If the directory already exists, it is treated as success.
:param absDirPath: (string) absolute path of the directory to create.
:raises: OSError if directory creation fails
:returns: (string) absolute path provided
"""
assert os.path.isabs(absDirPath)
try:
os.makedirs(absDirPath)
except OSError, e:
if e.errno != os.errno.EEXIST:
raise
return absDirPath
| 1,566 | Python | .py | 38 | 39.078947 | 76 | 0.702436 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,931 | enum.py | numenta_nupic-legacy/src/nupic/support/enum.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import re
import keyword
import functools
__IDENTIFIER_PATTERN = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$')
def __isidentifier(s):
if s in keyword.kwlist:
return False
return __IDENTIFIER_PATTERN.match(s) is not None
def Enum(*args, **kwargs):
"""
Utility function for creating enumerations in python
Example Usage:
>> Color = Enum("Red", "Green", "Blue", "Magenta")
>> print Color.Red
>> 0
>> print Color.Green
>> 1
>> print Color.Blue
>> 2
>> print Color.Magenta
>> 3
>> Color.Violet
>> 'violet'
>> Color.getLabel(Color.Red)
>> 'Red'
>> Color.getLabel(2)
>> 'Blue'
"""
def getLabel(cls, val):
""" Get a string label for the current value of the enum """
return cls.__labels[val]
def validate(cls, val):
""" Returns True if val is a valid value for the enumeration """
return val in cls.__values
def getValues(cls):
""" Returns a list of all the possible values for this enum """
return list(cls.__values)
def getLabels(cls):
""" Returns a list of all possible labels for this enum """
return list(cls.__labels.values())
def getValue(cls, label):
""" Returns value given a label """
return cls.__labels[label]
for arg in list(args)+kwargs.keys():
if type(arg) is not str:
raise TypeError("Enum arg {0} must be a string".format(arg))
if not __isidentifier(arg):
raise ValueError("Invalid enum value '{0}'. "\
"'{0}' is not a valid identifier".format(arg))
#kwargs.update(zip(args, range(len(args))))
kwargs.update(zip(args, args))
newType = type("Enum", (object,), kwargs)
newType.__labels = dict( (v,k) for k,v in kwargs.iteritems())
newType.__values = set(newType.__labels.keys())
newType.getLabel = functools.partial(getLabel, newType)
newType.validate = functools.partial(validate, newType)
newType.getValues = functools.partial(getValues, newType)
newType.getLabels = functools.partial(getLabels, newType)
newType.getValue = functools.partial(getValue, newType)
return newType
if __name__ == '__main__':
Color = Enum("Red", "Blue")
Shape = Enum("Square", "Triangle")
| 3,164 | Python | .py | 83 | 34.481928 | 72 | 0.660242 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,932 | group_by.py | numenta_nupic-legacy/src/nupic/support/group_by.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from itertools import groupby
def groupby2(*args):
""" Like itertools.groupby, with the following additions:
- Supports multiple sequences. Instead of returning (k, g), each iteration
returns (k, g0, g1, ...), with one `g` for each input sequence. The value of
each `g` is either a non-empty iterator or `None`.
- It treats the value `None` as an empty sequence. So you can make subsequent
calls to groupby2 on any `g` value.
.. note:: Read up on groupby here:
https://docs.python.org/dev/library/itertools.html#itertools.groupby
:param args: (list) Parameters alternating between sorted lists and their
respective key functions. The lists should be sorted with
respect to their key function.
:returns: (tuple) A n + 1 dimensional tuple, where the first element is the
key of the iteration, and the other n entries are groups of
objects that share this key. Each group corresponds to the an
input sequence. `groupby2` is a generator that returns a tuple
for every iteration. If an input sequence has no members with
the current key, None is returned in place of a generator.
"""
generatorList = [] # list of each list's (k, group) tuples
if len(args) % 2 == 1:
raise ValueError("Must have a key function for every list.")
advanceList = []
# populate above lists
for i in xrange(0, len(args), 2):
listn = args[i]
fn = args[i + 1]
if listn is not None:
generatorList.append(groupby(listn, fn))
advanceList.append(True) # start by advancing everyone.
else:
generatorList.append(None)
advanceList.append(False)
n = len(generatorList)
nextList = [None] * n
# while all lists aren't exhausted walk through each group in order
while True:
for i in xrange(n):
if advanceList[i]:
try:
nextList[i] = generatorList[i].next()
except StopIteration:
nextList[i] = None
# no more values to process in any of the generators
if all(entry is None for entry in nextList):
break
# the minimum key value in the nextList
minKeyVal = min(nextVal[0] for nextVal in nextList
if nextVal is not None)
# populate the tuple to return based on minKeyVal
retGroups = [minKeyVal]
for i in xrange(n):
if nextList[i] is not None and nextList[i][0] == minKeyVal:
retGroups.append(nextList[i][1])
advanceList[i] = True
else:
advanceList[i] = False
retGroups.append(None)
yield tuple(retGroups)
| 3,629 | Python | .py | 80 | 39.425 | 80 | 0.661194 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,933 | console_printer.py | numenta_nupic-legacy/src/nupic/support/console_printer.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This module defines :class:`ConsolePrinterMixin` and :class:`Tee`.
The :class:`ConsolePrinterMixin` is used by objects that need to print to the
screen under the control of a verbosity level.
The :class:`Tee` class is used to redirect standard output to a file in addition
to sending it to the console.
"""
import sys
class ConsolePrinterMixin(object):
"""Mixin class for printing to the console with different verbosity levels.
:param verbosity: (int)
0: don't print anything to stdout
1: normal (production-level) printing
2: extra debug information
3: lots of debug information
"""
def __init__(self, verbosity=1):
# The internal attribute is consolePrinterVerbosity to make it
# more clear where it comes from (without having to trace back
# through the class hierarchy). This attribute is normally
# not accessed directly, but it is fine to read or write it
# directly if you know what you're doing.
self.consolePrinterVerbosity = verbosity
def cPrint(self, level, message, *args, **kw):
"""Print a message to the console.
Prints only if level <= self.consolePrinterVerbosity
Printing with level 0 is equivalent to using a print statement,
and should normally be avoided.
:param level: (int) indicating the urgency of the message with
lower values meaning more urgent (messages at level 0 are the most
urgent and are always printed)
:param message: (string) possibly with format specifiers
:param args: specifies the values for any format specifiers in message
:param kw: newline is the only keyword argument. True (default) if a newline
should be printed
"""
if level > self.consolePrinterVerbosity:
return
if len(kw) > 1:
raise KeyError("Invalid keywords for cPrint: %s" % str(kw.keys()))
newline = kw.get("newline", True)
if len(kw) == 1 and 'newline' not in kw:
raise KeyError("Invalid keyword for cPrint: %s" % kw.keys()[0])
if len(args) == 0:
if newline:
print message
else:
print message,
else:
if newline:
print message % args
else:
print message % args,
class Tee(object):
"""This class captures standard output and writes it to a file
in addition to sending it to the console
"""
def __init__(self, outputFile):
self.outputFile = open(outputFile, 'w', buffering=False)
self.stdout = sys.stdout
sys.stdout = self
def write(self, s):
self.outputFile.write(s)
self.stdout.write(s)
def flush(self):
self.stdout.flush()
self.outputFile.flush()
def fileno(self):
return self.outputFile.fileno()
def close(self):
self.outputFile.close()
sys.stdout = self.stdout
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
self.close()
| 3,869 | Python | .py | 96 | 35.895833 | 80 | 0.687383 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,934 | configuration_custom.py | numenta_nupic-legacy/src/nupic/support/configuration_custom.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This Configuration implementation allows for persistent configuration updates
stored in ``nupic-custom.xml`` in the site conf folder.
"""
from __future__ import with_statement
from copy import copy
import errno
import logging
import os
import sys
import traceback
from xml.etree import ElementTree
from nupic.support.fs_helpers import makeDirectoryFromAbsolutePath
from nupic.support.configuration_base import Configuration as ConfigurationBase
def _getLogger():
return logging.getLogger("com.numenta.nupic.tools.configuration_custom")
class Configuration(ConfigurationBase):
"""
This class extends the
:class:`nupic.support.configuration_base.ConfigurationBase` implementation
with the ability to read and write custom, persistent parameters. The custom
settings will be stored in the ``nupic-custom.xml`` file.
If the environment variable ``NTA_CONF_PATH`` is defined, then the
configuration files are expected to be in the ``NTA_CONF_PATH`` search path,
which is a ``:`` separated list of directories (on Windows the separator is a
``;``). If ``NTA_CONF_PATH`` is not defined, then it is assumed to be
``$NTA/conf/default`` (typically ``~/nupic/current/conf/default``).
"""
@classmethod
def getCustomDict(cls):
"""
returns: (dict) containing all custom configuration properties.
"""
return _CustomConfigurationFileWrapper.getCustomDict()
@classmethod
def setCustomProperty(cls, propertyName, value):
"""
Set a single custom setting and persist it to the custom configuration
store.
:param propertyName: (string) containing the name of the property to get
:param value: (object) value to set the property to
"""
cls.setCustomProperties({propertyName : value})
@classmethod
def setCustomProperties(cls, properties):
"""
Set multiple custom properties and persist them to the custom configuration
store.
:param properties: (dict) of property name/value pairs to set
"""
_getLogger().info("Setting custom configuration properties=%r; caller=%r",
properties, traceback.format_stack())
_CustomConfigurationFileWrapper.edit(properties)
for propertyName, value in properties.iteritems():
cls.set(propertyName, value)
@classmethod
def clear(cls):
"""
Clear all configuration properties from in-memory cache, but do NOT alter
the custom configuration file. Used in unit-testing.
"""
# Clear the in-memory settings cache, forcing reload upon subsequent "get"
# request.
super(Configuration, cls).clear()
# Reset in-memory custom configuration info.
_CustomConfigurationFileWrapper.clear(persistent=False)
@classmethod
def resetCustomConfig(cls):
"""
Clear all custom configuration settings and delete the persistent custom
configuration store.
"""
_getLogger().info("Resetting all custom configuration properties; "
"caller=%r", traceback.format_stack())
# Clear the in-memory settings cache, forcing reload upon subsequent "get"
# request.
super(Configuration, cls).clear()
# Delete the persistent custom configuration store and reset in-memory
# custom configuration info
_CustomConfigurationFileWrapper.clear(persistent=True)
@classmethod
def loadCustomConfig(cls):
"""
Loads custom configuration settings from their persistent storage.
.. warning :: DO NOT CALL THIS: It's typically not necessary to call this
method directly. This method exists *solely* for the benefit of
``prepare_conf.py``, which needs to load configuration files selectively.
"""
cls.readConfigFile(_CustomConfigurationFileWrapper.customFileName)
@classmethod
def _readStdConfigFiles(cls):
""" Intercept the _readStdConfigFiles call from our base config class to
read in base and custom configuration settings.
"""
super(Configuration, cls)._readStdConfigFiles()
cls.loadCustomConfig()
class _CustomConfigurationFileWrapper(object):
"""
Private class to handle creation, deletion and editing of the custom
configuration file used by this implementation of Configuration.
Supports persistent changes to nupic-custom.xml configuration file.
This class only applies changes to the local instance.
For cluster wide changes see nupic-services.py or nupic.cluster.NupicServices
"""
# Name of the custom xml file to be created
customFileName = 'nupic-custom.xml'
# Stores the path to the file
# If none, findConfigFile is used to find path to file; defaults to
# NTA_CONF_PATH[0]
_path = None
@classmethod
def clear(cls, persistent=False):
""" If persistent is True, delete the temporary file
Parameters:
----------------------------------------------------------------
persistent: if True, custom configuration file is deleted
"""
if persistent:
try:
os.unlink(cls.getPath())
except OSError, e:
if e.errno != errno.ENOENT:
_getLogger().exception("Error %s while trying to remove dynamic " \
"configuration file: %s", e.errno, cls.getPath())
raise
cls._path = None
@classmethod
def getCustomDict(cls):
""" Returns a dict of all temporary values in custom configuration file
"""
if not os.path.exists(cls.getPath()):
return dict()
properties = Configuration._readConfigFile(os.path.basename(
cls.getPath()), os.path.dirname(cls.getPath()))
values = dict()
for propName in properties:
if 'value' in properties[propName]:
values[propName] = properties[propName]['value']
return values
@classmethod
def edit(cls, properties):
""" Edits the XML configuration file with the parameters specified by
properties
Parameters:
----------------------------------------------------------------
properties: dict of settings to be applied to the custom configuration store
(key is property name, value is value)
"""
copyOfProperties = copy(properties)
configFilePath = cls.getPath()
try:
with open(configFilePath, 'r') as fp:
contents = fp.read()
except IOError, e:
if e.errno != errno.ENOENT:
_getLogger().exception("Error %s reading custom configuration store "
"from %s, while editing properties %s.",
e.errno, configFilePath, properties)
raise
contents = '<configuration/>'
try:
elements = ElementTree.XML(contents)
ElementTree.tostring(elements)
except Exception, e:
# Raising error as RuntimeError with custom message since ElementTree
# exceptions aren't clear.
msg = "File contents of custom configuration is corrupt. File " \
"location: %s; Contents: '%s'. Original Error (%s): %s." % \
(configFilePath, contents, type(e), e)
_getLogger().exception(msg)
raise RuntimeError(msg), None, sys.exc_info()[2]
if elements.tag != 'configuration':
e = "Expected top-level element to be 'configuration' but got '%s'" % \
(elements.tag)
_getLogger().error(e)
raise RuntimeError(e)
# Apply new properties to matching settings in the custom config store;
# pop matching properties from our copy of the properties dict
for propertyItem in elements.findall('./property'):
propInfo = dict((attr.tag, attr.text) for attr in propertyItem)
name = propInfo['name']
if name in copyOfProperties:
foundValues = propertyItem.findall('./value')
if len(foundValues) > 0:
foundValues[0].text = str(copyOfProperties.pop(name))
if not copyOfProperties:
break
else:
e = "Property %s missing value tag." % (name,)
_getLogger().error(e)
raise RuntimeError(e)
# Add unmatched remaining properties to custom config store
for propertyName, value in copyOfProperties.iteritems():
newProp = ElementTree.Element('property')
nameTag = ElementTree.Element('name')
nameTag.text = propertyName
newProp.append(nameTag)
valueTag = ElementTree.Element('value')
valueTag.text = str(value)
newProp.append(valueTag)
elements.append(newProp)
try:
makeDirectoryFromAbsolutePath(os.path.dirname(configFilePath))
with open(configFilePath,'w') as fp:
fp.write(ElementTree.tostring(elements))
except Exception, e:
_getLogger().exception("Error while saving custom configuration "
"properties %s in %s.", properties, configFilePath)
raise
@classmethod
def _setPath(cls):
""" Sets the path of the custom configuration file
"""
cls._path = os.path.join(os.environ['NTA_DYNAMIC_CONF_DIR'],
cls.customFileName)
@classmethod
def getPath(cls):
""" Get the path of the custom configuration file
"""
if cls._path is None:
cls._setPath()
return cls._path
| 10,011 | Python | .py | 242 | 35.884298 | 80 | 0.692197 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,935 | __init__.py | numenta_nupic-legacy/src/nupic/support/__init__.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Package containing modules that are used internally by Numenta Python
tools and plugins to extend standard library functionality.
These modules should NOT be used by client applications.
"""
from __future__ import with_statement
# Standard imports
import os
import sys
import inspect
import logging
import logging.config
import logging.handlers
from platform import python_version
import struct
from StringIO import StringIO
import time
import traceback
from pkg_resources import resource_string, resource_filename
from configuration import Configuration
from nupic.support.fs_helpers import makeDirectoryFromAbsolutePath
# Local imports
def getCallerInfo(depth=2):
"""Utility function to get information about function callers
The information is the tuple (function/method name, filename, class)
The class will be None if the caller is just a function and not an object
method.
:param depth: (int) how far back in the callstack to go to extract the caller
info
"""
f = sys._getframe(depth)
method_name = f.f_code.co_name
filename = f.f_code.co_filename
arg_class = None
args = inspect.getargvalues(f)
if len(args[0]) > 0:
arg_name = args[0][0] # potentially the 'self' arg if its a method
arg_class = args[3][arg_name].__class__.__name__
return (method_name, filename, arg_class)
def title(s=None, additional='', stream=sys.stdout):
"""Utility function to display nice titles
It automatically extracts the name of the function/method it is called from
and you can add additional text. title() will then print the name
of the function/method and the additional text surrounded by tow lines
of dashes. If you don't want the name of the function, you can provide
alternative text (regardless of the additional text)
:param s: (string) text to display, uses the function name and arguments by
default
:param additional: (string) extra text to display (not needed if s is not
None)
:param stream: (stream) the stream to print to. Ny default goes to standard
output
Examples:
.. code-block:: python
def foo():
title()
will display:
.. code-block:: text
---
foo
---
.. code-block:: python
def foo():
title(additional='(), this is cool!!!')
will display:
.. code-block:: text
----------------------
foo(), this is cool!!!
----------------------
.. code-block:: python
def foo():
title('No function name here!')
will display:
.. code-block:: text
----------------------
No function name here!
----------------------
"""
if s is None:
callable_name, file_name, class_name = getCallerInfo(2)
s = callable_name
if class_name is not None:
s = class_name + '.' + callable_name
lines = (s + additional).split('\n')
length = max(len(line) for line in lines)
print >> stream, '-' * length
print >> stream, s + additional
print >> stream, '-' * length
def getArgumentDescriptions(f):
"""
Get the arguments, default values, and argument descriptions for a function.
Parses the argument descriptions out of the function docstring, using a
format something lke this:
::
[junk]
argument_name: description...
description...
description...
[junk]
[more arguments]
It will find an argument as long as the exact argument name starts the line.
It will then strip a trailing colon, if present, then strip the rest of the
line and use it to start the description. It will then strip and append any
subsequent lines with a greater indent level than the original argument name.
:param f: (function) to inspect
:returns: (list of tuples) (``argName``, ``argDescription``, ``defaultValue``)
If an argument has no default value, the tuple is only two elements long (as
``None`` cannot be used, since it could be a default value itself).
"""
# Get the argument names and default values
argspec = inspect.getargspec(f)
# Scan through the docstring to extract documentation for each argument as
# follows:
# Check the first word of the line, stripping a colon if one is present.
# If it matches an argument name:
# Take the rest of the line, stripping leading whitespeace
# Take each subsequent line if its indentation level is greater than the
# initial indentation level
# Once the indentation level is back to the original level, look for
# another argument
docstring = f.__doc__
descriptions = {}
if docstring:
lines = docstring.split('\n')
i = 0
while i < len(lines):
stripped = lines[i].lstrip()
if not stripped:
i += 1
continue
# Indentation level is index of the first character
indentLevel = lines[i].index(stripped[0])
# Get the first word and remove the colon, if present
firstWord = stripped.split()[0]
if firstWord.endswith(':'):
firstWord = firstWord[:-1]
if firstWord in argspec.args:
# Found an argument
argName = firstWord
restOfLine = stripped[len(firstWord)+1:].strip()
argLines = [restOfLine]
# Take the next lines as long as they are indented more
i += 1
while i < len(lines):
stripped = lines[i].lstrip()
if not stripped:
# Empty line - stop
break
if lines[i].index(stripped[0]) <= indentLevel:
# No longer indented far enough - stop
break
# This line counts too
argLines.append(lines[i].strip())
i += 1
# Store this description
descriptions[argName] = ' '.join(argLines)
else:
# Not an argument
i += 1
# Build the list of (argName, description, defaultValue)
args = []
if argspec.defaults:
defaultCount = len(argspec.defaults)
else:
defaultCount = 0
nonDefaultArgCount = len(argspec.args) - defaultCount
for i, argName in enumerate(argspec.args):
if i >= nonDefaultArgCount:
defaultValue = argspec.defaults[i - nonDefaultArgCount]
args.append((argName, descriptions.get(argName, ""), defaultValue))
else:
args.append((argName, descriptions.get(argName, "")))
return args
gLoggingInitialized = False
def initLogging(verbose=False, console='stdout', consoleLevel='DEBUG'):
"""
Initilize NuPic logging by reading in from the logging configuration file. The
logging configuration file is named ``nupic-logging.conf`` and is expected to
be in the format defined by the python logging module.
If the environment variable ``NTA_CONF_PATH`` is defined, then the logging
configuration file is expected to be in the ``NTA_CONF_PATH`` directory. If
``NTA_CONF_PATH`` is not defined, then it is found in the 'conf/default'
subdirectory of the NuPic installation directory (typically
~/nupic/current/conf/default)
The logging configuration file can use the environment variable
``NTA_LOG_DIR`` to set the locations of log files. If this variable is not
defined, logging to files will be disabled.
:param console: Defines console output for the default "root" logging
configuration; this may be one of 'stdout', 'stderr', or None;
Use None to suppress console logging output
:param consoleLevel:
Logging-level filter string for console output corresponding to
logging levels in the logging module; may be one of:
'DEBUG', 'INFO', 'WARNING', 'ERROR', or 'CRITICAL'.
E.g., a value of'WARNING' suppresses DEBUG and INFO level output
to console, but allows WARNING, ERROR, and CRITICAL
"""
# NOTE: If you call this twice from the same process there seems to be a
# bug - logged messages don't show up for loggers that you do another
# logging.getLogger() on.
global gLoggingInitialized
if gLoggingInitialized:
if verbose:
print >> sys.stderr, "Logging already initialized, doing nothing."
return
consoleStreamMappings = {
'stdout' : 'stdoutConsoleHandler',
'stderr' : 'stderrConsoleHandler',
}
consoleLogLevels = ['DEBUG', 'INFO', 'WARNING', 'WARN', 'ERROR', 'CRITICAL',
'FATAL']
assert console is None or console in consoleStreamMappings.keys(), (
'Unexpected console arg value: %r') % (console,)
assert consoleLevel in consoleLogLevels, (
'Unexpected consoleLevel arg value: %r') % (consoleLevel)
# -----------------------------------------------------------------------
# Setup logging. Look for the nupic-logging.conf file, first in the
# NTA_CONFIG_DIR path (if defined), then in a subdirectory of the nupic
# module
configFilename = 'nupic-logging.conf'
configFilePath = resource_filename("nupic.support", configFilename)
configLogDir = os.environ.get('NTA_LOG_DIR', None)
# Load in the logging configuration file
if verbose:
print >> sys.stderr, (
"Using logging configuration file: %s") % (configFilePath)
# This dict will hold our replacement strings for logging configuration
replacements = dict()
def makeKey(name):
""" Makes replacement key """
return "$$%s$$" % (name)
platform = sys.platform.lower()
if platform.startswith('java'):
# Jython
import java.lang
platform = java.lang.System.getProperty("os.name").lower()
if platform.startswith('mac os x'):
platform = 'darwin'
if platform.startswith('darwin'):
replacements[makeKey('SYSLOG_HANDLER_ADDRESS')] = '"/var/run/syslog"'
elif platform.startswith('linux'):
replacements[makeKey('SYSLOG_HANDLER_ADDRESS')] = '"/dev/log"'
elif platform.startswith('win'):
replacements[makeKey('SYSLOG_HANDLER_ADDRESS')] = '"log"'
else:
raise RuntimeError("This platform is neither darwin, win32, nor linux: %s" % (
sys.platform,))
# Nupic logs go to file
replacements[makeKey('PERSISTENT_LOG_HANDLER')] = 'fileHandler'
if platform.startswith('win'):
replacements[makeKey('FILE_HANDLER_LOG_FILENAME')] = '"NUL"'
else:
replacements[makeKey('FILE_HANDLER_LOG_FILENAME')] = '"/dev/null"'
# Set up log file path for the default file handler and configure handlers
handlers = list()
if configLogDir is not None:
logFilePath = _genLoggingFilePath()
makeDirectoryFromAbsolutePath(os.path.dirname(logFilePath))
replacements[makeKey('FILE_HANDLER_LOG_FILENAME')] = repr(logFilePath)
handlers.append(replacements[makeKey('PERSISTENT_LOG_HANDLER')])
if console is not None:
handlers.append(consoleStreamMappings[console])
replacements[makeKey('ROOT_LOGGER_HANDLERS')] = ", ".join(handlers)
# Set up log level for console handlers
replacements[makeKey('CONSOLE_LOG_LEVEL')] = consoleLevel
customConfig = StringIO()
# Using pkg_resources to get the logging file, which should be packaged and
# associated with this source file name.
loggingFileContents = resource_string(__name__, configFilename)
for lineNum, line in enumerate(loggingFileContents.splitlines()):
if "$$" in line:
for (key, value) in replacements.items():
line = line.replace(key, value)
# If there is still a replacement string in the line, we're missing it
# from our replacements dict
if "$$" in line and "$$<key>$$" not in line:
raise RuntimeError(("The text %r, found at line #%d of file %r, "
"contains a string not found in our replacement "
"dict.") % (line, lineNum, configFilePath))
customConfig.write("%s\n" % line)
customConfig.seek(0)
if python_version()[:3] >= '2.6':
logging.config.fileConfig(customConfig, disable_existing_loggers=False)
else:
logging.config.fileConfig(customConfig)
gLoggingInitialized = True
def _genLoggingFilePath():
""" Generate a filepath for the calling app """
appName = os.path.splitext(os.path.basename(sys.argv[0]))[0] or 'UnknownApp'
appLogDir = os.path.abspath(os.path.join(
os.environ['NTA_LOG_DIR'],
'numenta-logs-%s' % (os.environ['USER'],),
appName))
appLogFileName = '%s-%s-%s.log' % (
appName, long(time.mktime(time.gmtime())), os.getpid())
return os.path.join(appLogDir, appLogFileName)
def aggregationToMonthsSeconds(interval):
"""
Return the number of months and seconds from an aggregation dict that
represents a date and time.
Interval is a dict that contain one or more of the following keys: 'years',
'months', 'weeks', 'days', 'hours', 'minutes', seconds', 'milliseconds',
'microseconds'.
For example:
::
aggregationMicroseconds({'years': 1, 'hours': 4, 'microseconds':42}) ==
{'months':12, 'seconds':14400.000042}
:param interval: (dict) The aggregation interval representing a date and time
:returns: (dict) number of months and seconds in the interval:
``{months': XX, 'seconds': XX}``. The seconds is
a floating point that can represent resolutions down to a
microsecond.
"""
seconds = interval.get('microseconds', 0) * 0.000001
seconds += interval.get('milliseconds', 0) * 0.001
seconds += interval.get('seconds', 0)
seconds += interval.get('minutes', 0) * 60
seconds += interval.get('hours', 0) * 60 * 60
seconds += interval.get('days', 0) * 24 * 60 * 60
seconds += interval.get('weeks', 0) * 7 * 24 * 60 * 60
months = interval.get('months', 0)
months += 12 * interval.get('years', 0)
return {'months': months, 'seconds': seconds}
def aggregationDivide(dividend, divisor):
"""
Return the result from dividing two dicts that represent date and time.
Both dividend and divisor are dicts that contain one or more of the following
keys: 'years', 'months', 'weeks', 'days', 'hours', 'minutes', seconds',
'milliseconds', 'microseconds'.
For example:
::
aggregationDivide({'hours': 4}, {'minutes': 15}) == 16
:param dividend: (dict) The numerator, as a dict representing a date and time
:param divisor: (dict) the denominator, as a dict representing a date and time
:returns: (float) number of times divisor goes into dividend
"""
# Convert each into microseconds
dividendMonthSec = aggregationToMonthsSeconds(dividend)
divisorMonthSec = aggregationToMonthsSeconds(divisor)
# It is a usage error to mix both months and seconds in the same operation
if (dividendMonthSec['months'] != 0 and divisorMonthSec['seconds'] != 0) \
or (dividendMonthSec['seconds'] != 0 and divisorMonthSec['months'] != 0):
raise RuntimeError("Aggregation dicts with months/years can only be "
"inter-operated with other aggregation dicts that contain "
"months/years")
if dividendMonthSec['months'] > 0:
return float(dividendMonthSec['months']) / divisor['months']
else:
return float(dividendMonthSec['seconds']) / divisorMonthSec['seconds']
| 15,849 | Python | .py | 369 | 38.192412 | 82 | 0.689155 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,936 | configuration_base.py | numenta_nupic-legacy/src/nupic/support/configuration_base.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This is the base Configuration implementation. It provides for reading
configuration parameters from ``nupic-site.xml`` and ``nupic-default.xml``.
"""
from __future__ import with_statement
import os
import logging
from xml.etree import ElementTree
from pkg_resources import resource_string
# Turn on additional print statements
DEBUG = False
DEFAULT_CONFIG = "nupic-default.xml"
USER_CONFIG = "nupic-site.xml"
CUSTOM_CONFIG = "nupic-custom.xml"
def _getLogger():
logger = logging.getLogger("com.numenta.nupic.tools.configuration_base")
if DEBUG:
logger.setLevel(logging.DEBUG)
return logger
class Configuration(object):
""" This class can be used to fetch NuPic configuration settings which are
stored in one or more XML files.
If the environment variable ``NTA_CONF_PATH`` is defined, then the
configuration files are expected to be in the ``NTA_CONF_PATH`` search path,
which is a ':' separated list of directories (on Windows the separator is a
';'). If ``NTA_CONF_PATH`` is not defined, then it is loaded via
pkg_resources.
"""
# Once we read in the properties, they are stored in this dict
_properties = None
# This stores the paths we search for config files. It can be modified through
# the setConfigPaths() method.
_configPaths = None
# Any environment variable prefixed with this string serves as an override
# to property defined in the current configuration
envPropPrefix = 'NTA_CONF_PROP_'
@classmethod
def getString(cls, prop):
""" Retrieve the requested property as a string. If property does not exist,
then KeyError will be raised.
:param prop: (string) name of the property
:raises: KeyError
:returns: (string) property value
"""
if cls._properties is None:
cls._readStdConfigFiles()
# Allow configuration properties to be overridden via environment variables
envValue = os.environ.get("%s%s" % (cls.envPropPrefix,
prop.replace('.', '_')), None)
if envValue is not None:
return envValue
return cls._properties[prop]
@classmethod
def getBool(cls, prop):
""" Retrieve the requested property and return it as a bool. If property
does not exist, then KeyError will be raised. If the property value is
neither 0 nor 1, then ValueError will be raised
:param prop: (string) name of the property
:raises: KeyError, ValueError
:returns: (bool) property value
"""
value = cls.getInt(prop)
if value not in (0, 1):
raise ValueError("Expected 0 or 1, but got %r in config property %s" % (
value, prop))
return bool(value)
@classmethod
def getInt(cls, prop):
""" Retrieve the requested property and return it as an int. If property
does not exist, then KeyError will be raised.
:param prop: (string) name of the property
:returns: (int) property value
"""
return int(cls.getString(prop))
@classmethod
def getFloat(cls, prop):
""" Retrieve the requested property and return it as a float. If property
does not exist, then KeyError will be raised.
:param prop: (string) name of the property
:returns: (float) property value
"""
return float(cls.getString(prop))
@classmethod
def get(cls, prop, default=None):
""" Get the value of the given configuration property as string. This
returns a string which is the property value, or the value of "default" arg.
If the property is not found, use :meth:`getString` instead.
.. note:: it's atypical for our configuration properties to be missing - a
missing configuration property is usually a very serious error. Because
of this, it's preferable to use one of the :meth:`getString`,
:meth:`getInt`, :meth:`getFloat`, etc. variants instead of :meth:`get`.
Those variants will raise KeyError when an expected property is missing.
:param prop: (string) name of the property
:param default: default value to return if property does not exist
:returns: (string) property value, or default if the property does not exist
"""
try:
return cls.getString(prop)
except KeyError:
return default
@classmethod
def set(cls, prop, value):
""" Set the value of the given configuration property.
:param prop: (string) name of the property
:param value: (object) value to set
"""
if cls._properties is None:
cls._readStdConfigFiles()
cls._properties[prop] = str(value)
@classmethod
def dict(cls):
""" Return a dict containing all of the configuration properties
:returns: (dict) containing all configuration properties.
"""
if cls._properties is None:
cls._readStdConfigFiles()
# Make a copy so we can update any current values obtained from environment
# variables
result = dict(cls._properties)
keys = os.environ.keys()
replaceKeys = filter(lambda x: x.startswith(cls.envPropPrefix),
keys)
for envKey in replaceKeys:
key = envKey[len(cls.envPropPrefix):]
key = key.replace('_', '.')
result[key] = os.environ[envKey]
return result
@classmethod
def readConfigFile(cls, filename, path=None):
""" Parse the given XML file and store all properties it describes.
:param filename: (string) name of XML file to parse (no path)
:param path: (string) path of the XML file. If None, then use the standard
configuration search path.
"""
properties = cls._readConfigFile(filename, path)
# Create properties dict if necessary
if cls._properties is None:
cls._properties = dict()
for name in properties:
if 'value' in properties[name]:
cls._properties[name] = properties[name]['value']
@classmethod
def _readConfigFile(cls, filename, path=None):
""" Parse the given XML file and return a dict describing the file.
:param filename: (string) name of XML file to parse (no path)
:param path: (string) path of the XML file. If None, then use the standard
configuration search path.
:returns: (dict) with each property as a key and a dict of all the
property's attributes as value
"""
outputProperties = dict()
# Get the path to the config files.
if path is None:
filePath = cls.findConfigFile(filename)
else:
filePath = os.path.join(path, filename)
# ------------------------------------------------------------------
# Read in the config file
try:
if filePath is not None:
try:
# Use warn since console log level is set to warning
_getLogger().debug("Loading config file: %s", filePath)
with open(filePath, 'r') as inp:
contents = inp.read()
except Exception:
raise RuntimeError("Expected configuration file at %s" % filePath)
else:
# If the file was not found in the normal search paths, which includes
# checking the NTA_CONF_PATH, we'll try loading it from pkg_resources.
try:
contents = resource_string("nupic.support", filename)
except Exception as resourceException:
# We expect these to be read, and if they don't exist we'll just use
# an empty configuration string.
if filename in [USER_CONFIG, CUSTOM_CONFIG]:
contents = '<configuration/>'
else:
raise resourceException
elements = ElementTree.XML(contents)
if elements.tag != 'configuration':
raise RuntimeError("Expected top-level element to be 'configuration' "
"but got '%s'" % (elements.tag))
# ------------------------------------------------------------------
# Add in each property found
propertyElements = elements.findall('./property')
for propertyItem in propertyElements:
propInfo = dict()
# Parse this property element
propertyAttributes = list(propertyItem)
for propertyAttribute in propertyAttributes:
propInfo[propertyAttribute.tag] = propertyAttribute.text
# Get the name
name = propInfo.get('name', None)
# value is allowed to be empty string
if 'value' in propInfo and propInfo['value'] is None:
value = ''
else:
value = propInfo.get('value', None)
if value is None:
if 'novalue' in propInfo:
# Placeholder "novalue" properties are intended to be overridden
# via dynamic configuration or another configuration layer.
continue
else:
raise RuntimeError("Missing 'value' element within the property "
"element: => %s " % (str(propInfo)))
# The value is allowed to contain substitution tags of the form
# ${env.VARNAME}, which should be substituted with the corresponding
# environment variable values
restOfValue = value
value = ''
while True:
# Find the beginning of substitution tag
pos = restOfValue.find('${env.')
if pos == -1:
# No more environment variable substitutions
value += restOfValue
break
# Append prefix to value accumulator
value += restOfValue[0:pos]
# Find the end of current substitution tag
varTailPos = restOfValue.find('}', pos)
if varTailPos == -1:
raise RuntimeError("Trailing environment variable tag delimiter '}'"
" not found in %r" % (restOfValue))
# Extract environment variable name from tag
varname = restOfValue[pos+6:varTailPos]
if varname not in os.environ:
raise RuntimeError("Attempting to use the value of the environment"
" variable %r, which is not defined" % (varname))
envVarValue = os.environ[varname]
value += envVarValue
restOfValue = restOfValue[varTailPos+1:]
# Check for errors
if name is None:
raise RuntimeError("Missing 'name' element within following property "
"element:\n => %s " % (str(propInfo)))
propInfo['value'] = value
outputProperties[name] = propInfo
return outputProperties
except Exception:
_getLogger().exception("Error while parsing configuration file: %s.",
filePath)
raise
@classmethod
def clear(cls):
""" Clear out the entire configuration.
"""
cls._properties = None
cls._configPaths = None
@classmethod
def findConfigFile(cls, filename):
""" Search the configuration path (specified via the NTA_CONF_PATH
environment variable) for the given filename. If found, return the complete
path to the file.
:param filename: (string) name of file to locate
"""
paths = cls.getConfigPaths()
for p in paths:
testPath = os.path.join(p, filename)
if os.path.isfile(testPath):
return os.path.join(p, filename)
@classmethod
def getConfigPaths(cls):
""" Return the list of paths to search for configuration files.
:returns: (list) of paths
"""
configPaths = []
if cls._configPaths is not None:
return cls._configPaths
else:
if 'NTA_CONF_PATH' in os.environ:
configVar = os.environ['NTA_CONF_PATH']
# Return as a list of paths
configPaths = configVar.split(os.pathsep)
return configPaths
@classmethod
def setConfigPaths(cls, paths):
""" Modify the paths we use to search for configuration files.
:param paths: (list) of paths to search for config files.
"""
cls._configPaths = list(paths)
@classmethod
def _readStdConfigFiles(cls):
""" Read in all standard configuration files
"""
# Default one first
cls.readConfigFile(DEFAULT_CONFIG)
# Site specific one can override properties defined in default
cls.readConfigFile(USER_CONFIG)
| 13,061 | Python | .py | 311 | 35.18328 | 80 | 0.659314 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,937 | lock_attributes.py | numenta_nupic-legacy/src/nupic/support/lock_attributes.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
The lock attributes machinery is engaged by default. To deactivate it
define the ``NTA_DONT_USE_LOCK_ATTRIBUTES`` environment variable. The benefit is
that there will be no runtime overhead (Except for a one-time check when classes
that derive from :class:`LockAttributesMixin` are defined or methods decorated
with ``_canAddAttributes`` are defined).
"""
import os
# This is the environment variable that controls the lock attributes
# enforcement.
deactivation_key = 'NTA_DONT_USE_LOCK_ATTRIBUTES'
def _allow_new_attributes(f):
"""A decorator that maintains the attribute lock state of an object
It coperates with the LockAttributesMetaclass (see bellow) that replaces
the __setattr__ method with a custom one that checks the _canAddAttributes
counter and allows setting new attributes only if _canAddAttributes > 0.
New attributes can be set only from methods decorated
with this decorator (should be only __init__ and __setstate__ normally)
The decorator is reentrant (e.g. if from inside a decorated function another
decorated function is invoked). Before invoking the target function it
increments the counter (or sets it to 1). After invoking the target function
it decrements the counter and if it's 0 it removed the counter.
"""
def decorated(self, *args, **kw):
"""The decorated function that replaces __init__() or __setstate__()
"""
# Run the original function
if not hasattr(self, '_canAddAttributes'):
self.__dict__['_canAddAttributes'] = 1
else:
self._canAddAttributes += 1
assert self._canAddAttributes >= 1
# Save add attribute counter
count = self._canAddAttributes
f(self, *args, **kw)
# Restore _CanAddAttributes if deleted from dict (can happen in __setstte__)
if hasattr(self, '_canAddAttributes'):
self._canAddAttributes -= 1
else:
self._canAddAttributes = count - 1
assert self._canAddAttributes >= 0
if self._canAddAttributes == 0:
del self._canAddAttributes
decorated.__doc__ = f.__doc__
decorated.__name__ = f.__name__
return decorated
def _simple_init(self, *args, **kw):
"""trivial init method that just calls base class's __init__()
This method is attached to classes that don't define __init__(). It is needed
because LockAttributesMetaclass must decorate the __init__() method of
its target class.
"""
type(self).__base__.__init__(self, *args, **kw)
class LockAttributesMetaclass(type):
"""This metaclass makes objects attribute-locked by decorating their
``__init__`` and ``__setstate__`` methods with the ``_allow_new_attributes``
decorator.
It doesn't do anything unless the environment variable
``NTA_USE_LOCK_ATTRIBUTES`` is defined. That allows for verifying proper usage
during testing and skipping it in production code (that was verified during
testing) to avoid the cost of verifying every attribute setting.
It also replaces the ``__setattr__`` magic method with a custom one that
verifies new attributes are set only in code that originates from a decorated
method (normally ``__init__`` or ``__setstate__``).
If the target class has no ``__init__`` method it adds a trivial ``__init__``
method to provide a hook for the decorator (the ``_simple_init``
function defined above)
"""
def __init__(cls, name, bases, dict):
"""
"""
def custom_setattr(self, name, value):
"""A custom replacement for __setattr__
Allows setting only exisitng attributes. It is designed to work
with the _allow_new_attributes decorator.
It works is by checking if the requested attribute is already in the
__dict__ or if the _canAddAttributes counter > 0. Otherwise it raises an
exception.
If all is well it calls the original __setattr__. This means it can work
also with classes that already have custom __setattr__
"""
if (name == '_canAddAttributes' or
(hasattr(self, '_canAddAttributes') and self._canAddAttributes > 0) or
hasattr(self, name)):
return self._original_setattr(name, value)
else:
#from dbgp.client import brk; brk(port=9029)
raise Exception('Attempting to set a new attribute: ' + name)
# Bail out if not active. Zero overhead other than this one-time check
# at class definition time
if deactivation_key in os.environ:
return
# Initialize the super-class
super(LockAttributesMetaclass, cls).__init__(name, bases, dict)
# Store and replace the __setattr__ with the custom one (if needed)
if not hasattr(cls, '_original_setattr'):
cls._original_setattr = cls.__setattr__
cls.__setattr__ = custom_setattr
# Keep the original __init__ if exists. This was needed for NuPIC 1. Remove?
if '__init__' in dict:
setattr(cls, '_original_init', dict['__init__'])
# Get the __init__ and __setstate__ form the target class's dict
# If there is no __init__ use _simple_init (it's Ok if there is no
#__setstate__)
methods = [('__init__', dict.get('__init__', _simple_init)),
('__setstate__', dict.get('__setstate__', None))]
# Wrap the methods with _allow_new_attributes decorator
for name, method in methods:
if method is not None:
setattr(cls, name, _allow_new_attributes(method))
class LockAttributesMixin(object):
"""This class serves as a base (or mixin) for classes that want to enforce
the locked attributes pattern (all attributes should be defined in
``__init__`` or ``__setstate__``.
All the target class has to do add LockAttributesMixin as one of its bases
(inherit from it).
The metaclass will be activated when the application class is created
and the lock attributes machinery will be injected (unless the
deactivation_key is defined in the environment)
"""
__metaclass__ = LockAttributesMetaclass
| 6,860 | Python | .py | 141 | 44.503546 | 80 | 0.70012 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,938 | decorators.py | numenta_nupic-legacy/src/nupic/support/decorators.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import functools
import logging
import sys
import time
import traceback
# TODO Need unit tests
def logExceptions(logger=None):
""" Returns a closure suitable for use as function/method decorator for
logging exceptions that leave the scope of the decorated function. Exceptions
are logged at ERROR level.
logger: user-supplied logger instance. Defaults to logging.getLogger.
Usage Example:
NOTE: logging must be initialized *before* any loggers are created, else
there will be no output; see nupic.support.initLogging()
@logExceptions()
def myFunctionFoo():
...
raise RuntimeError("something bad happened")
...
"""
logger = (logger if logger is not None else logging.getLogger(__name__))
def exceptionLoggingDecorator(func):
@functools.wraps(func)
def exceptionLoggingWrap(*args, **kwargs):
try:
return func(*args, **kwargs)
except:
logger.exception(
"Unhandled exception %r from %r. Caller stack:\n%s",
sys.exc_info()[1], func, ''.join(traceback.format_stack()), )
raise
return exceptionLoggingWrap
return exceptionLoggingDecorator
def logEntryExit(getLoggerCallback=logging.getLogger,
entryExitLogLevel=logging.DEBUG, logArgs=False,
logTraceback=False):
""" Returns a closure suitable for use as function/method decorator for
logging entry/exit of function/method.
getLoggerCallback: user-supplied callback function that takes no args and
returns the logger instance to use for logging.
entryExitLogLevel: Log level for logging entry/exit of decorated function;
e.g., logging.DEBUG; pass None to disable entry/exit
logging.
logArgs: If True, also log args
logTraceback: If True, also log Traceback information
Usage Examples:
NOTE: logging must be initialized *before* any loggers are created, else
there will be no output; see nupic.support.initLogging()
@logEntryExit()
def myFunctionBar():
...
@logEntryExit(logTraceback=True)
@logExceptions()
def myFunctionGamma():
...
raise RuntimeError("something bad happened")
...
"""
def entryExitLoggingDecorator(func):
@functools.wraps(func)
def entryExitLoggingWrap(*args, **kwargs):
if entryExitLogLevel is None:
enabled = False
else:
logger = getLoggerCallback()
enabled = logger.isEnabledFor(entryExitLogLevel)
if not enabled:
return func(*args, **kwargs)
funcName = str(func)
if logArgs:
argsRepr = ', '.join(
[repr(a) for a in args] +
['%s=%r' % (k,v,) for k,v in kwargs.iteritems()])
else:
argsRepr = ''
logger.log(
entryExitLogLevel, "ENTERING: %s(%s)%s", funcName, argsRepr,
'' if not logTraceback else '; ' + repr(traceback.format_stack()))
try:
return func(*args, **kwargs)
finally:
logger.log(
entryExitLogLevel, "LEAVING: %s(%s)%s", funcName, argsRepr,
'' if not logTraceback else '; ' + repr(traceback.format_stack()))
return entryExitLoggingWrap
return entryExitLoggingDecorator
def retry(timeoutSec, initialRetryDelaySec, maxRetryDelaySec,
retryExceptions=(Exception,),
retryFilter=lambda e, args, kwargs: True,
logger=None, clientLabel=""):
""" Returns a closure suitable for use as function/method decorator for
retrying a function being decorated.
timeoutSec: How many seconds from time of initial call to stop
retrying (floating point); 0 = no retries
initialRetryDelaySec: Number of seconds to wait for first retry.
Subsequent retries will occur at geometrically
doubling intervals up to a maximum interval of
maxRetryDelaySec (floating point)
maxRetryDelaySec: Maximum amount of seconds to wait between retries
(floating point)
retryExceptions: A tuple (must be a tuple) of exception classes that,
including their subclasses, should trigger retries;
Default: any Exception-based exception will trigger
retries
retryFilter: Optional filter function used to further filter the
exceptions in the retryExceptions tuple; called if the
current exception meets the retryExceptions criteria:
takes the current exception instance, args, and kwargs
that were passed to the decorated function, and returns
True to retry, False to allow the exception to be
re-raised without retrying. Default: permits any
exception that matches retryExceptions to be retried.
logger: User-supplied logger instance to use for logging.
None=defaults to logging.getLogger(__name__).
Usage Example:
NOTE: logging must be initialized *before* any loggers are created, else
there will be no output; see nupic.support.initLogging()
_retry = retry(timeoutSec=300, initialRetryDelaySec=0.2,
maxRetryDelaySec=10, retryExceptions=[socket.error])
@_retry
def myFunctionFoo():
...
raise RuntimeError("something bad happened")
...
"""
assert initialRetryDelaySec > 0, str(initialRetryDelaySec)
assert timeoutSec >= 0, str(timeoutSec)
assert maxRetryDelaySec >= initialRetryDelaySec, \
"%r < %r" % (maxRetryDelaySec, initialRetryDelaySec)
assert isinstance(retryExceptions, tuple), (
"retryExceptions must be tuple, but got %r") % (type(retryExceptions),)
if logger is None:
logger = logging.getLogger(__name__)
def retryDecorator(func):
@functools.wraps(func)
def retryWrap(*args, **kwargs):
numAttempts = 0
delaySec = initialRetryDelaySec
startTime = time.time()
# Make sure it gets called at least once
while True:
numAttempts += 1
try:
result = func(*args, **kwargs)
except retryExceptions, e:
if not retryFilter(e, args, kwargs):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
'[%s] Failure in %r; retries aborted by custom retryFilter. '
'Caller stack:\n%s', clientLabel, func,
''.join(traceback.format_stack()), exc_info=True)
raise
now = time.time()
# Compensate for negative time adjustment so we don't get stuck
# waiting way too long (python doesn't provide monotonic time yet)
if now < startTime:
startTime = now
if (now - startTime) >= timeoutSec:
logger.exception(
'[%s] Exhausted retry timeout (%s sec.; %s attempts) for %r. '
'Caller stack:\n%s', clientLabel, timeoutSec, numAttempts, func,
''.join(traceback.format_stack()))
raise
if numAttempts == 1:
logger.warning(
'[%s] First failure in %r; initial retry in %s sec.; '
'timeoutSec=%s. Caller stack:\n%s', clientLabel, func, delaySec,
timeoutSec, ''.join(traceback.format_stack()), exc_info=True)
else:
logger.debug(
'[%s] %r failed %s times; retrying in %s sec.; timeoutSec=%s. '
'Caller stack:\n%s',
clientLabel, func, numAttempts, delaySec, timeoutSec,
''.join(traceback.format_stack()), exc_info=True)
time.sleep(delaySec)
delaySec = min(delaySec*2, maxRetryDelaySec)
else:
if numAttempts > 1:
logger.info('[%s] %r succeeded on attempt # %d',
clientLabel, func, numAttempts)
return result
return retryWrap
return retryDecorator
| 9,092 | Python | .py | 203 | 35.852217 | 79 | 0.633216 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,939 | pymysql_helpers.py | numenta_nupic-legacy/src/nupic/support/pymysql_helpers.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Helper utilities for python scripts that use pymysql
"""
import inspect
import logging
from socket import error as socket_error
import pymysql
from pymysql.constants import ER
from nupic.support.decorators import retry as make_retry_decorator
# Client mysql error codes of interest; pymysql didn't have constants for these
# at the time of this writing.
# (per https://dev.mysql.com/doc/refman/5.5/en/error-messages-client.html)
CR_CONNECTION_ERROR = 2002
""" Can't connect to local MySQL server through socket '%s' (%d) """
CR_CONN_HOST_ERROR = 2003
""" Can't connect to MySQL server on '%s' (%d) """
CR_UNKNOWN_HOST = 2005
""" Unknown MySQL server host '%s' (%d) """
CR_SERVER_GONE_ERROR = 2006
""" MySQL server has gone away """
CR_TCP_CONNECTION = 2011
""" %s via TCP/IP """
CR_SERVER_HANDSHAKE_ERR = 2012
""" Error in server handshake """
CR_SERVER_LOST = 2013
""" Lost connection to MySQL server during query """
CR_SERVER_LOST_EXTENDED = 2055
""" Lost connection to MySQL server at '%s', system error: %d """
_RETRIABLE_CLIENT_ERROR_CODES = (
CR_CONNECTION_ERROR,
CR_CONN_HOST_ERROR,
CR_UNKNOWN_HOST,
CR_SERVER_GONE_ERROR,
CR_TCP_CONNECTION,
CR_SERVER_HANDSHAKE_ERR,
CR_SERVER_LOST,
CR_SERVER_LOST_EXTENDED,
)
_RETRIABLE_SERVER_ERROR_CODES = (
ER.TABLE_DEF_CHANGED,
ER.LOCK_WAIT_TIMEOUT,
ER.LOCK_DEADLOCK,
#Maybe these also?
# ER_TOO_MANY_DELAYED_THREADS
# ER_BINLOG_PURGE_EMFILE
# ER_TOO_MANY_CONCURRENT_TRXS
# ER_CON_COUNT_ERROR
# ER_OUTOFMEMORY
)
_ALL_RETRIABLE_ERROR_CODES = set(_RETRIABLE_CLIENT_ERROR_CODES +
_RETRIABLE_SERVER_ERROR_CODES)
def retrySQL(timeoutSec=60*5, logger=None):
""" Return a closure suitable for use as a decorator for
retrying a pymysql DAO function on certain failures that warrant retries (
e.g., RDS/MySQL server down temporarily, transaction deadlock, etc.).
We share this function across multiple scripts (e.g., ClientJobsDAO,
StreamMgr) for consitent behavior.
.. note:: Please ensure that the operation being retried is idempotent.
.. note:: logging must be initialized *before* any loggers are created, else
there will be no output; see nupic.support.initLogging()
Usage Example:
.. code-block:: python
@retrySQL()
def jobInfo(self, jobID):
...
:param timeoutSec: How many seconds from time of initial call to stop retrying
(floating point)
:param logger: User-supplied logger instance.
"""
if logger is None:
logger = logging.getLogger(__name__)
def retryFilter(e, args, kwargs):
if isinstance(e, (pymysql.InternalError, pymysql.OperationalError)):
if e.args and e.args[0] in _ALL_RETRIABLE_ERROR_CODES:
return True
elif isinstance(e, pymysql.Error):
if (e.args and
inspect.isclass(e.args[0]) and issubclass(e.args[0], socket_error)):
return True
return False
retryExceptions = tuple([
pymysql.InternalError,
pymysql.OperationalError,
pymysql.Error,
])
return make_retry_decorator(
timeoutSec=timeoutSec, initialRetryDelaySec=0.1, maxRetryDelaySec=10,
retryExceptions=retryExceptions, retryFilter=retryFilter,
logger=logger)
| 4,228 | Python | .py | 109 | 35.440367 | 86 | 0.70669 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,940 | algorithm_test_helpers.py | numenta_nupic-legacy/src/nupic/support/unittesthelpers/algorithm_test_helpers.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""This script contains helper routines for testing algorithms"""
import numpy
import time
import traceback
from nupic.bindings.algorithms import SpatialPooler as CPPSpatialPooler
from nupic.bindings.math import GetNTAReal, Random as NupicRandom
from nupic.algorithms.spatial_pooler import SpatialPooler as PySpatialPooler
realType = GetNTAReal()
uintType = "uint32"
def getNumpyRandomGenerator(seed = None):
"""
Return a numpy random number generator with the given seed.
If seed is None, set it randomly based on time. Regardless we log
the actual seed and stack trace so that test failures are replicable.
"""
if seed is None:
seed = int((time.time()%10000)*10)
print "Numpy seed set to:", seed, "called by",
callStack = traceback.extract_stack(limit=3)
print callStack[0][2], "line", callStack[0][1], "->", callStack[1][2]
return numpy.random.RandomState(seed)
def convertPermanences(sourceSP, destSP):
"""
Transfer the permanences from source to dest SP's. This is used in test
routines to counteract some drift between implementations.
We assume the two SP's have identical configurations/parameters.
"""
numColumns = sourceSP.getNumColumns()
numInputs = sourceSP.getNumInputs()
for i in xrange(numColumns):
potential = numpy.zeros(numInputs).astype(uintType)
sourceSP.getPotential(i, potential)
destSP.setPotential(i, potential)
perm = numpy.zeros(numInputs).astype(realType)
sourceSP.getPermanence(i, perm)
destSP.setPermanence(i, perm)
def getSeed():
"""Generate and log a 32-bit compatible seed value."""
seed = int((time.time()%10000)*10)
print "New seed generated as:", seed, "called by",
callStack = traceback.extract_stack(limit=3)
print callStack[0][2], "line", callStack[0][1], "->", callStack[1][2]
return seed
def convertSP(pySp, newSeed):
"""
Given an instance of a python spatial_pooler return an instance of the CPP
spatial_pooler with identical parameters.
"""
columnDim = pySp._columnDimensions
inputDim = pySp._inputDimensions
numInputs = pySp.getNumInputs()
numColumns = pySp.getNumColumns()
cppSp = CPPSpatialPooler(inputDim, columnDim)
cppSp.setPotentialRadius(pySp.getPotentialRadius())
cppSp.setPotentialPct(pySp.getPotentialPct())
cppSp.setGlobalInhibition(pySp.getGlobalInhibition())
numActiveColumnsPerInhArea = pySp.getNumActiveColumnsPerInhArea()
localAreaDensity = pySp.getLocalAreaDensity()
if (numActiveColumnsPerInhArea > 0):
cppSp.setNumActiveColumnsPerInhArea(numActiveColumnsPerInhArea)
else:
cppSp.setLocalAreaDensity(localAreaDensity)
cppSp.setStimulusThreshold(pySp.getStimulusThreshold())
cppSp.setInhibitionRadius(pySp.getInhibitionRadius())
cppSp.setDutyCyclePeriod(pySp.getDutyCyclePeriod())
cppSp.setBoostStrength(pySp.getBoostStrength())
cppSp.setIterationNum(pySp.getIterationNum())
cppSp.setIterationLearnNum(pySp.getIterationLearnNum())
cppSp.setSpVerbosity(pySp.getSpVerbosity())
cppSp.setUpdatePeriod(pySp.getUpdatePeriod())
cppSp.setSynPermTrimThreshold(pySp.getSynPermTrimThreshold())
cppSp.setSynPermActiveInc(pySp.getSynPermActiveInc())
cppSp.setSynPermInactiveDec(pySp.getSynPermInactiveDec())
cppSp.setSynPermBelowStimulusInc(pySp.getSynPermBelowStimulusInc())
cppSp.setSynPermConnected(pySp.getSynPermConnected())
cppSp.setMinPctOverlapDutyCycles(pySp.getMinPctOverlapDutyCycles())
boostFactors = numpy.zeros(numColumns).astype(realType)
pySp.getBoostFactors(boostFactors)
cppSp.setBoostFactors(boostFactors)
overlapDuty = numpy.zeros(numColumns).astype(realType)
pySp.getOverlapDutyCycles(overlapDuty)
cppSp.setOverlapDutyCycles(overlapDuty)
activeDuty = numpy.zeros(numColumns).astype(realType)
pySp.getActiveDutyCycles(activeDuty)
cppSp.setActiveDutyCycles(activeDuty)
minOverlapDuty = numpy.zeros(numColumns).astype(realType)
pySp.getMinOverlapDutyCycles(minOverlapDuty)
cppSp.setMinOverlapDutyCycles(minOverlapDuty)
for i in xrange(numColumns):
potential = numpy.zeros(numInputs).astype(uintType)
pySp.getPotential(i, potential)
cppSp.setPotential(i, potential)
perm = numpy.zeros(numInputs).astype(realType)
pySp.getPermanence(i, perm)
cppSp.setPermanence(i, perm)
pySp._random = NupicRandom(newSeed)
cppSp.seed_(newSeed)
return cppSp
def CreateSP(imp, params):
"""
Helper class for creating an instance of the appropriate spatial pooler using
given parameters.
Parameters:
----------------------------
imp: Either 'py' or 'cpp' for creating the appropriate instance.
params: A dict for overriding constructor parameters. The keys must
correspond to contructor parameter names.
Returns the SP object.
"""
if (imp == "py"):
spClass = PySpatialPooler
elif (imp == "cpp"):
spClass = CPPSpatialPooler
else:
raise RuntimeError("unrecognized implementation")
print params
sp = spClass(**params)
return sp
| 5,939 | Python | .py | 138 | 39.956522 | 79 | 0.762697 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,941 | __init__.py | numenta_nupic-legacy/src/nupic/support/unittesthelpers/__init__.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
| 976 | Python | .py | 20 | 47.8 | 72 | 0.665272 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,942 | testcasebase.py | numenta_nupic-legacy/src/nupic/support/unittesthelpers/testcasebase.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# This script implements an extension of the unittest2.TestCase class to be
# used as a base class unit tests
import copy
import optparse
import sys
from datetime import datetime
import unittest2 as unittest
class TestCaseBase(unittest.TestCase):
""" Here, we wrap the various unittest.TestCase assert methods (that
our base classes use) in order to add extra info to their msg args from our
log buffer.
"""
def __init__(self, testMethodName, *args, **kwargs):
#_debugOut(("Constructing=%s") % (testMethodName,))
# Construct the base-class instance
#unittest.TestCase.__init__(self, testMethodName, *args, **kwargs)
super(TestCaseBase, self).__init__(testMethodName, *args, **kwargs)
# Extra log items to be added to msg strings in our self.myAssertXXXXXX
# wrappers. Items may be added here during a given test run by calling
# self.addExtraLogItem(item)
self.__logItems = []
return
def printTestHeader(self):
""" Print out what test we are running """
print
print "###############################################################"
print "Running %s..." % (self,)
print "[%s UTC]" % (datetime.utcnow())
print "###############################################################"
sys.stdout.flush()
return
def printBanner(self, msg, *args):
""" Print out a banner """
print
print "==============================================================="
print msg % args
print >> sys.stdout, "[%s UTC; %s]" % (datetime.utcnow(), self,)
print "==============================================================="
sys.stdout.flush()
return
#========================
def addExtraLogItem(self, item):
""" Add an item to the log items list for the currently running session.
Our self.myAssertXXXXXX wrappers add the current items to the msg that is
passed to the unittest's assertXXXXX methods. The extra info will show up
in test results if the test fails.
"""
self.__logItems.append(item)
return
#========================
def resetExtraLogItems(self):
self.__logItems = []
return
#========================
def __wrapMsg(self, msg):
""" Called by our unittest.TestCase.assertXXXXXX overrides to construct a
message from the given message plus self.__logItems, if any. If
self.__logItems is non-empty, returns a dictionary containing the given
message value as the "msg" property and self.__logItems as the "extra"
property. If self.__logItems is empy, returns the given msg arg.
"""
msg = msg \
if not self.__logItems \
else {"msg":msg, "extra":copy.copy(self.__logItems)}
# Honor line feeds in the message for when it gets printed out
msg = str(msg)
msg = msg.replace('\\n', '\n')
return msg
#========================
def assertEqual(self, first, second, msg=None):
"""unittest.TestCase.assertEqual override; adds extra log items to msg"""
unittest.TestCase.assertEqual(self, first, second, self.__wrapMsg(msg))
return
#========================
def assertNotEqual(self, first, second, msg=None):
"""unittest.TestCase.assertNotEqual override; adds extra log items to msg"""
unittest.TestCase.assertNotEqual(self, first, second, self.__wrapMsg(msg))
return
#========================
def assertTrue(self, expr, msg=None):
"""unittest.TestCase.assertTrue override; adds extra log items to msg"""
unittest.TestCase.assertTrue(self, expr, self.__wrapMsg(msg))
return
#========================
def assertFalse(self, expr, msg=None):
"""unittest.TestCase.assertFalse override; adds extra log items to msg"""
unittest.TestCase.assertFalse(self, expr, self.__wrapMsg(msg))
return
class TestOptionParser(optparse.OptionParser, object):
"""Option parser with predefined test options."""
__long__ = None
standard_option_list = [
optparse.Option('--verbosity', default=0, type='int',
help='Verbosity level from least verbose, 0, to most, '
'3 [default=%default].'),
optparse.Option('--seed', default=42, type='int',
help='Seed to use for random number generators '
'[default: %default].'),
optparse.Option('--short', action='store_false', dest='long',
default=True, help='Run only short tests.'),
optparse.Option('--long', action='store_true', dest='long',
default=False, help='Run all short and long tests.'),
optparse.Option('--installDir', dest='installDir',
help='Installation directory used for this test run.'),
]
def parse_args(self, args=None, values=None, consumeArgv=True):
options, remainingArgs = super(TestOptionParser, self).parse_args(args, values)
TestOptionParser.__long__ = options.long
if consumeArgv:
sys.argv = [sys.argv[0]] + remainingArgs
return options, remainingArgs
def longTest(testMethod):
"""Decorator for specifying tests that only run when --long is specified."""
def newTestMethod(*args, **kwargs):
if TestOptionParser.__long__ is None:
raise Exception('TestOptionParser must be used in order to use @longTest'
'decorator.')
if TestOptionParser.__long__:
return testMethod(*args, **kwargs)
else:
msg = 'Skipping long test: %s' % testMethod.__name__
return unittest.skip(msg)(testMethod)(*args, **kwargs)
return newTestMethod
| 6,520 | Python | .py | 143 | 40.265734 | 83 | 0.625118 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,943 | test_framework_helpers.py | numenta_nupic-legacy/src/nupic/support/unittesthelpers/test_framework_helpers.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Abstraction around some test framework features to make it easier to
move to a different framework in the future.
Tests should make use of the built-in unittest framework's features where
possible, turning to this module only for those features that are not.
This module should only abstract features that are likely to be supported
by multiple mainstream test frameworks, such as pytest and nose.
"""
# Our current test framework is pytest
import pytest
def tagTest(tag, comment=None):
""" A decorator for tagging a test class or test method with the given tag
string
tag: test tag string
comment: reason for the tag; string; optional
Examples:
@tagTest("slowTests", "takes a long time to execute")
class ClusterTests(TestCase):
def testSwarmWithAggregation(self):
pass
def testSwarmWithoutAggregation(self):
pass
or
class MiscTests(TestCase):
def testOnePlusOne(self):
pass
@tagTest("slowTests")
def testSwarm(self):
pass
"""
return getattr(pytest.mark, tag)
| 2,021 | Python | .py | 51 | 37.058824 | 76 | 0.716258 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,944 | abstract_temporal_memory_test.py | numenta_nupic-legacy/src/nupic/support/unittesthelpers/abstract_temporal_memory_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014-2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from abc import ABCMeta, abstractmethod
from nupic.algorithms.monitor_mixin.temporal_memory_monitor_mixin import (
TemporalMemoryMonitorMixin)
from nupic.data.generators.sequence_machine import SequenceMachine
class AbstractTemporalMemoryTest(object):
__metaclass__ = ABCMeta
VERBOSITY = 1
@abstractmethod
def getTMClass(self):
"""
Implement this method to specify the Temporal Memory class.
"""
@abstractmethod
def getPatternMachine(self):
"""
Implement this method to provide the pattern machine.
"""
def getDefaultTMParams(self):
"""
Override this method to set the default TM params for `self.tm`.
"""
return {}
def setUp(self):
self.tm = None
self.patternMachine = self.getPatternMachine()
self.sequenceMachine = SequenceMachine(self.patternMachine)
def init(self, overrides=None):
"""
Initialize Temporal Memory, and other member variables.
:param overrides: overrides for default Temporal Memory parameters
"""
params = self._computeTMParams(overrides)
class MonitoredTemporalMemory(TemporalMemoryMonitorMixin,
self.getTMClass()): pass
self.tm = MonitoredTemporalMemory(**params)
def feedTM(self, sequence, learn=True, num=1):
repeatedSequence = sequence * num
self.tm.mmClearHistory()
for pattern in repeatedSequence:
if pattern is None:
self.tm.reset()
else:
self.tm.compute(pattern, learn=learn)
# ==============================
# Helper functions
# ==============================
def _computeTMParams(self, overrides):
params = dict(self.getDefaultTMParams())
params.update(overrides or {})
return params
| 2,714 | Python | .py | 70 | 34.671429 | 74 | 0.68395 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,945 | __init__.py | numenta_nupic-legacy/src/nupic/frameworks/__init__.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
| 976 | Python | .py | 20 | 47.8 | 72 | 0.665272 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,946 | __init__.py | numenta_nupic-legacy/src/nupic/frameworks/viz/__init__.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This module provides tools for visualizing a Network graph in different ways.
Here is an example of usage:
.. code-block:: python
from nupic.frameworks.viz import (NetworkVisualizer,
GraphVizRenderer,
NetworkXRenderer,
DotRenderer)
# Create Network instance
network = Network()
# Add three TestNode regions to network
network.addRegion("region1", "TestNode", "")
network.addRegion("region2", "TestNode", "")
network.addRegion("region3", "TestNode", "")
# Set dimensions on first region
region1 = network.getRegions().getByName("region1")
region1.setDimensions(Dimensions([1, 1]))
# Link regions
network.link("region1", "region2", "UniformLink", "")
network.link("region2", "region1", "UniformLink", "")
network.link("region1", "region3", "UniformLink", "")
network.link("region2", "region3", "UniformLink", "")
# Initialize network
network.initialize()
# Initialize Network Visualizer
viz = NetworkVisualizer(network)
# Render w/ graphviz
viz.render(renderer=GraphVizRenderer)
# Render w/ networkx
viz.render(renderer=NetworkXRenderer)
# Render to dot (stdout)
viz.render(renderer=DotRenderer)
# Render to dot (file)
viz.render(renderer=lambda: DotRenderer(open("example.dot", "w")))
"""
from nupic.frameworks.viz.dot_renderer import DotRenderer
from nupic.frameworks.viz.graphviz_renderer import GraphVizRenderer
from nupic.frameworks.viz.networkx_renderer import NetworkXRenderer
from nupic.frameworks.viz.network_visualization import NetworkVisualizer
| 2,611 | Python | .py | 59 | 40.271186 | 77 | 0.696217 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,947 | network_visualization.py | numenta_nupic-legacy/src/nupic/frameworks/viz/network_visualization.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import networkx as nx
from nupic.frameworks.viz import DotRenderer as DEFAULT_RENDERER
class NetworkVisualizer(object):
"""
Network visualization framework entry point.
Usage:
.. code-block:: python
NetworkVisualizer(network).render()
You may optionally specify a specific renderers. e.g.:
.. code-block:: python
viz = NetworkVisualizer(network)
viz.render(renderer=GraphVizRenderer)
viz.render(renderer=NetworkXRenderer)
:param network: (:class:`nupic.engine.Network`)
"""
def __init__(self, network):
self.network = network
def export(self):
"""
Exports a network as a networkx MultiDiGraph intermediate representation
suitable for visualization.
:return: networkx MultiDiGraph
"""
graph = nx.MultiDiGraph()
# Add regions to graph as nodes, annotated by name
regions = self.network.getRegions()
for idx in xrange(regions.getCount()):
regionPair = regions.getByIndex(idx)
regionName = regionPair[0]
graph.add_node(regionName, label=regionName)
# Add links between regions to graph as edges, annotate by input-output
# name pairs
for linkName, link in self.network.getLinks():
graph.add_edge(link.getSrcRegionName(),
link.getDestRegionName(),
src=link.getSrcOutputName(),
dest=link.getDestInputName())
return graph
def render(self, renderer=DEFAULT_RENDERER):
"""
Render network. Default is
:class:`~nupic.frameworks.viz.dot_renderer.DotRenderer`.
:param renderer: Constructor parameter to a "renderer" implementation.
Return value for which must have a "render" method that accepts a
single argument (a networkx graph instance).
"""
renderer().render(self.export())
| 2,799 | Python | .py | 67 | 36.955224 | 77 | 0.689464 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,948 | graphviz_renderer.py | numenta_nupic-legacy/src/nupic/frameworks/viz/graphviz_renderer.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import io
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
class GraphVizRenderer(object):
"""
Network visualization "renderer" implementation to render a network with
graphviz.
"""
def render(self, graph):
graph = nx.nx_agraph.to_agraph(graph)
graph.layout()
buffer = io.BytesIO()
graph.draw(buffer, format="png", prog="dot")
buffer.seek(0)
img = mpimg.imread(buffer)
plt.imshow(img)
plt.show()
| 1,458 | Python | .py | 38 | 36.105263 | 74 | 0.683876 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,949 | dot_renderer.py | numenta_nupic-legacy/src/nupic/frameworks/viz/dot_renderer.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import sys
class DotRenderer(object):
"""
Network visualization "renderer" implementation to render a network to a
dot-formatted document, suitable for use w/ graphviz.
:param outp: File-like obj to which rendered graph is written (defaults to
sys.stdout)
:param node_attrs: Node attributes to apply to all nodes in graph
"""
# Default node attributes to apply to all nodes in graph.
defaultNodeAttrs = {"shape": "record"}
def __init__(self, outp=sys.stdout, node_attrs=None):
self.outp = outp
self.node_attrs = node_attrs or self.defaultNodeAttrs
def render(self, graph):
self.outp.write(u"digraph structs {\n")
self.outp.write(u'rankdir = "LR";\n')
lookup = {}
for edge in graph.edges():
data = graph.get_edge_data(*edge)
lookup.setdefault(edge[0], {"inputs": set(), "outputs": set()})
lookup.setdefault(edge[1], {"inputs": set(), "outputs": set()})
for labels in data.values():
lookup[edge[0]]["outputs"].add(labels["src"])
lookup[edge[1]]["inputs"].add(labels["dest"])
self.outp.write(u'"{}":{} -> "{}":{};\n'.format(edge[0],
labels["src"],
edge[1],
labels["dest"]))
def _renderPorts(ports):
return "{" + "|".join("<{}>{}".format(port, port) for port in ports) + "}"
for node, ports in lookup.items():
def _renderNode():
nodeAttrs = ",".join("{}={}".format(key, value)
for key, value in self.node_attrs.items())
nodeAttrs += "," if nodeAttrs else ""
return ('{} [{}label="{}"];\n'
.format(node,
nodeAttrs,
"{" + "|".join([_renderPorts(ports["inputs"]),
node,
_renderPorts(ports["outputs"])]) + "}"))
self.outp.write(unicode(_renderNode()))
self.outp.write(u"}\n")
| 3,061 | Python | .py | 64 | 38.75 | 80 | 0.568884 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,950 | networkx_renderer.py | numenta_nupic-legacy/src/nupic/frameworks/viz/networkx_renderer.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import networkx as nx
import matplotlib.pyplot as plt
class NetworkXRenderer(object):
"""
Network visualization "renderer" implementation to render a network with
graphviz.
"""
def __init__(self, layoutFn=nx.spring_layout):
self.layoutFn = layoutFn
def render(self, graph):
pos = self.layoutFn(graph)
nx.draw_networkx(graph, pos)
nx.draw_networkx_edge_labels(graph, pos, clip_on=False, rotate=False)
plt.show()
| 1,425 | Python | .py | 34 | 39.794118 | 74 | 0.687365 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,951 | Demo.ipynb | numenta_nupic-legacy/src/nupic/frameworks/viz/examples/Demo.ipynb | {
"cells": [
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true,
"slideshow": {
"slide_type": "slide"
}
},
"source": [
"## Visualizing Networks\n",
"\n",
"The following demonstrates basic use of `nupic.frameworks.viz.NetworkVisualizer` to visualize a network.\n",
"\n",
"Before you begin, you will need to install the otherwise optional dependencies. From the root of nupic repository:\n",
"\n",
"```\n",
"pip install --user .[viz]\n",
"```\n",
"\n",
"Setup a simple network so we have something to work with:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"from nupic.engine import Network, Dimensions\n",
"\n",
"# Create Network instance\n",
"network = Network()\n",
"\n",
"# Add three TestNode regions to network\n",
"network.addRegion(\"region1\", \"TestNode\", \"\")\n",
"network.addRegion(\"region2\", \"TestNode\", \"\")\n",
"network.addRegion(\"region3\", \"TestNode\", \"\")\n",
"\n",
"# Set dimensions on first region\n",
"region1 = network.getRegions().getByName(\"region1\")\n",
"region1.setDimensions(Dimensions([1, 1]))\n",
"\n",
"# Link regions\n",
"network.link(\"region1\", \"region2\", \"UniformLink\", \"\")\n",
"network.link(\"region2\", \"region1\", \"UniformLink\", \"\")\n",
"network.link(\"region1\", \"region3\", \"UniformLink\", \"\")\n",
"network.link(\"region2\", \"region3\", \"UniformLink\", \"\")\n",
"\n",
"# Initialize network\n",
"network.initialize()"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"Render with `nupic.frameworks.viz.NetworkVisualizer`, which takes as input any `nupic.engine.Network` instance:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"from nupic.frameworks.viz import NetworkVisualizer\n",
"\n",
"# Initialize Network Visualizer\n",
"viz = NetworkVisualizer(network)\n",
"\n",
"# Render to dot (stdout)\n",
"viz.render()"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"That's interesting, but not necessarily useful if you don't understand [dot](http://www.graphviz.org/doc/info/lang.html). Let's capture that output and do something else:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"from nupic.frameworks.viz import DotRenderer\n",
"from io import StringIO\n",
"\n",
"outp = StringIO()\n",
"viz.render(renderer=lambda: DotRenderer(outp))"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"`outp` now contains the rendered output, render to an image with `graphviz`:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"# Render dot to image\n",
"from graphviz import Source\n",
"from IPython.display import Image\n",
"\n",
"Image(Source(outp.getvalue()).pipe(\"png\"))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"In the example above, each three-columned rectangle is a discrete region, the user-defined name for which is in the middle column. The left-hand and right-hand columns are respective inputs and outputs, the names for which, e.g. \"bottumUpIn\" and \"bottomUpOut\", are specific to the region type. The arrows indicate links between outputs from one region to the input of another.\n",
"\n",
"I know what you're thinking. _That's a cool trick, but nobody cares about your contrived example. I want to see something real!_\n",
"\n",
"Continuing below, I'll instantiate a HTMPredictionModel and visualize it. In this case, I'll use one of the \"hotgym\" examples."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"from nupic.frameworks.opf.model_factory import ModelFactory\n",
"\n",
"# Note: parameters copied from examples/opf/clients/hotgym/simple/model_params.py\n",
"model = ModelFactory.create({'aggregationInfo': {'hours': 1, 'microseconds': 0, 'seconds': 0, 'fields': [('consumption', 'sum')], 'weeks': 0, 'months': 0, 'minutes': 0, 'days': 0, 'milliseconds': 0, 'years': 0}, 'model': 'HTMPrediction', 'version': 1, 'predictAheadTime': None, 'modelParams': {'sensorParams': {'verbosity': 0, 'encoders': {'timestamp_timeOfDay': {'type': 'DateEncoder', 'timeOfDay': (21, 1), 'fieldname': u'timestamp', 'name': u'timestamp_timeOfDay'}, u'consumption': {'resolution': 0.88, 'seed': 1, 'fieldname': u'consumption', 'name': u'consumption', 'type': 'RandomDistributedScalarEncoder'}, 'timestamp_weekend': {'type': 'DateEncoder', 'fieldname': u'timestamp', 'name': u'timestamp_weekend', 'weekend': 21}}, 'sensorAutoReset': None}, 'spParams': {'columnCount': 2048, 'spVerbosity': 0, 'spatialImp': 'cpp', 'synPermConnected': 0.1, 'seed': 1956, 'numActiveColumnsPerInhArea': 40, 'globalInhibition': 1, 'inputWidth': 0, 'synPermInactiveDec': 0.005, 'synPermActiveInc': 0.04, 'potentialPct': 0.85, 'boostStrength': 3.0}, 'spEnable': True, 'clParams': {'implementation': 'cpp', 'alpha': 0.1, 'verbosity': 0, 'steps': '1,5', 'regionName': 'SDRClassifierRegion'}, 'inferenceType': 'TemporalMultiStep', 'tmEnable': True, 'tmParams': {'columnCount': 2048, 'activationThreshold': 16, 'pamLength': 1, 'cellsPerColumn': 32, 'permanenceInc': 0.1, 'minThreshold': 12, 'verbosity': 0, 'maxSynapsesPerSegment': 32, 'outputType': 'normal', 'initialPerm': 0.21, 'globalDecay': 0.0, 'maxAge': 0, 'permanenceDec': 0.1, 'seed': 1960, 'newSynapseCount': 20, 'maxSegmentsPerCell': 128, 'temporalImp': 'cpp', 'inputWidth': 2048}, 'trainSPNetOnlyIfRequested': False}})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Same deal as before, create a `NetworkVisualizer` instance, render to a buffer, then to an image, and finally display it inline."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"# New network, new NetworkVisualizer instance\n",
"viz = NetworkVisualizer(model._netInfo.net)\n",
"\n",
"# Render to Dot output to buffer\n",
"outp = StringIO()\n",
"viz.render(renderer=lambda: DotRenderer(outp))\n",
"\n",
"# Render Dot to image, display inline\n",
"Image(Source(outp.getvalue()).pipe(\"png\"))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"In these examples, I'm using `graphviz` to render an image from the `dot` document in Python, but you may want to do something else. `dot` is a generic and flexible graph description language and there are many tools for working with `dot` files."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
""
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 2",
"language": "python",
"name": "python2"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2.0
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.10"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
| 7,994 | Python | .py | 234 | 29.935897 | 1,676 | 0.618557 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,952 | visualize_network.py | numenta_nupic-legacy/src/nupic/frameworks/viz/examples/visualize_network.py | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from nupic.engine import Network, Dimensions
from nupic.frameworks.viz import (DotRenderer,
NetworkVisualizer,
GraphVizRenderer,
NetworkXRenderer)
def main():
# Create Network instance
network = Network()
# Add three TestNode regions to network
network.addRegion("region1", "TestNode", "")
network.addRegion("region2", "TestNode", "")
network.addRegion("region3", "TestNode", "")
# Set dimensions on first region
region1 = network.getRegions().getByName("region1")
region1.setDimensions(Dimensions([1, 1]))
# Link regions
network.link("region1", "region2", "UniformLink", "")
network.link("region2", "region1", "UniformLink", "")
network.link("region1", "region3", "UniformLink", "")
network.link("region2", "region3", "UniformLink", "")
# Initialize network
network.initialize()
# Initialize Network Visualizer
viz = NetworkVisualizer(network)
# Render w/ graphviz
viz.render(renderer=GraphVizRenderer)
# Render w/ networkx
viz.render(renderer=NetworkXRenderer)
# Render to dot (stdout)
viz.render(renderer=DotRenderer)
# Render to dot (file)
viz.render(renderer=lambda: DotRenderer(open("example.dot", "w")))
if __name__ == "__main__":
main()
| 2,314 | Python | .py | 55 | 37.963636 | 72 | 0.670232 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,953 | __init__.py | numenta_nupic-legacy/src/nupic/frameworks/viz/examples/__init__.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
| 976 | Python | .py | 20 | 47.8 | 72 | 0.665272 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,954 | exp_description_helpers.py | numenta_nupic-legacy/src/nupic/frameworks/opf/exp_description_helpers.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import imp
from nupic.data.dict_utils import rUpdate
# This file contains utility functions that are used
# internally by the prediction framework and may be imported
# by description files. Functions that are used only by
# the prediction framework should be in utils.py
#
# This file provides support for the following experiment description features:
#
# 1. Sub-experiment support
# 2. Lazy evaluators (e.g., DeferredDictLookup, applyValueGettersToContainer)
###############################################################################
# Sub-experiment support
###############################################################################
# Utility methods for description files are organized as a base description
# and an experiment based on that base description.
# The base description calls getConfig to get the configuration from the
# specific experiment, and the specific experiment calls importBaseDescription
# empty initial config allows base experiment to run by itself
_config = dict()
# Save the path to the current sub-experiment here during importBaseDescription()
subExpDir = None
# We will load the description file as a module, which allows us to
# use the debugger and see source code. But description files are frequently
# modified and we want to be able to easily reload them. To facilitate this,
# we reload with a unique module name ("pf_description%d") each time.
baseDescriptionImportCount = 0
def importBaseDescription(path, config):
global baseDescriptionImportCount, _config, subExpDir
if not os.path.isabs(path):
# grab the path to the file doing the import
import inspect
callingFrame = inspect.stack()[1][0]
callingFile = callingFrame.f_globals['__file__']
subExpDir = os.path.dirname(callingFile)
path = os.path.normpath(os.path.join(subExpDir, path))
#print "Importing from: %s" % path
# stash the config in a place where the loading module can find it.
_config = config
mod = imp.load_source("pf_base_description%d" % baseDescriptionImportCount,
path)
# don't want to override __file__ in our caller
mod.__base_file__ = mod.__file__
del mod.__file__
baseDescriptionImportCount += 1
return mod
def updateConfigFromSubConfig(config):
# Newer method just updates from sub-experiment
# _config is the configuration provided by the sub-experiment
global _config
rUpdate(config, _config)
_config = dict()
def getSubExpDir():
global subExpDir
return subExpDir
###############################################################################
# Lazy evaluators (DeferredDictLookup, applyValueGettersToContainer, and friends)
###############################################################################
class ValueGetterBase(object):
""" Base class for "value getters" (e.g., class DictValueGetter) that are used
to resolve values of sub-fields after the experiment's config dictionary (in
description.py) is defined and possibly updated from a sub-experiment.
This solves the problem of referencing the config dictionary's field from within
the definition of the dictionary itself (before the dictionary's own defintion
is complete).
NOTE: its possible that the referenced value does not yet exist at the
time of instantiation of a given value-getter future. It will be
resolved when the base description.py calls
applyValueGettersToContainer().
NOTE: The constructor of the derived classes MUST call our constructor.
NOTE: The derived classes MUST override handleGetValue(self).
NOTE: may be used by base and sub-experiments to derive their own custom value
getters; however, their use is applicapble only where permitted, as
described in comments within descriptionTemplate.tpl. See class
DictValueGetter for implementation example.
"""
class __NoResult(object):
""" A private class that we use as a special unique value to indicate that
our result cache instance variable does not hold a valid result.
"""
pass
def __init__(self):
#print("NOTE: ValueGetterBase INITIALIZING")
self.__inLookup = False
self.__cachedResult = self.__NoResult
def __call__(self, topContainer):
""" Resolves the referenced value. If the result is already cached,
returns it to caller. Otherwise, invokes the pure virtual method
handleGetValue. If handleGetValue() returns another value-getter, calls
that value-getter to resolve the value. This may result in a chain of calls
that terminates once the value is fully resolved to a non-value-getter value.
Upon return, the value is fully resolved and cached, so subsequent calls will
always return the cached value reference.
topContainer: The top-level container (dict, tuple, or list [sub-]instance)
within whose context the value-getter is applied.
Returns: The fully-resolved value that was referenced by the value-getter
instance
"""
#print("IN ValueGetterBase.__CAll__()")
assert(not self.__inLookup)
if self.__cachedResult is not self.__NoResult:
return self.__cachedResult
self.__cachedResult = self.handleGetValue(topContainer)
if isinstance(self.__cachedResult, ValueGetterBase):
valueGetter = self.__cachedResult
self.__inLookup = True
self.__cachedResult = valueGetter(topContainer)
self.__inLookup = False
# The value should be full resolved at this point
assert(self.__cachedResult is not self.__NoResult)
assert(not isinstance(self.__cachedResult, ValueGetterBase))
return self.__cachedResult
def handleGetValue(self, topContainer):
""" A "pure virtual" method. The derived class MUST override this method
and return the referenced value. The derived class is NOT responsible for
fully resolving the reference'd value in the event the value resolves to
another ValueGetterBase-based instance -- this is handled automatically
within ValueGetterBase implementation.
topContainer: The top-level container (dict, tuple, or list [sub-]instance)
within whose context the value-getter is applied.
Returns: The value referenced by this instance (which may be another
value-getter instance)
"""
raise NotImplementedError("ERROR: ValueGetterBase is an abstract " + \
"class; base class MUST override handleGetValue()")
class DictValueGetter(ValueGetterBase):
"""
Creates a "future" reference to a value within a top-level or a nested
dictionary. See also class DeferredDictLookup.
"""
def __init__(self, referenceDict, *dictKeyChain):
"""
referenceDict: Explicit reference dictionary that contains the field
corresonding to the first key name in dictKeyChain. This may
be the result returned by the built-in globals() function,
when we desire to look up a dictionary value from a dictionary
referenced by a global variable within the calling module.
If None is passed for referenceDict, then the topContainer
parameter supplied to handleGetValue() will be used as the
reference dictionary instead (this allows the desired module
to designate the appropriate reference dictionary for the
value-getters when it calls applyValueGettersToContainer())
dictKeyChain: One or more strings; the first string is a key (that will
eventually be defined) in the reference dictionary. If
additional strings are supplied, then the values
correspnding to prior key strings must be dictionaries, and
each additionl string references a sub-dictionary of the
former. The final string is the key of the field whose value
will be returned by handleGetValue().
NOTE: Its possible that the referenced value does not yet exist at the
time of instantiation of this class. It will be resolved when the
base description.py calls applyValueGettersToContainer().
Example:
config = dict(
_dsEncoderFieldName2_N = 70,
_dsEncoderFieldName2_W = 5,
dsEncoderSchema = [
dict(
base=dict(
fieldname='Name2', type='ScalarEncoder',
name='Name2', minval=0, maxval=270, clipInput=True,
n=DictValueGetter(None, '_dsEncoderFieldName2_N'),
w=DictValueGetter(None, '_dsEncoderFieldName2_W')),
),
],
)
updateConfigFromSubConfig(config)
applyValueGettersToContainer(config)
"""
# First, invoke base constructor
ValueGetterBase.__init__(self)
assert(referenceDict is None or isinstance(referenceDict, dict))
assert(len(dictKeyChain) >= 1)
self.__referenceDict = referenceDict
self.__dictKeyChain = dictKeyChain
def handleGetValue(self, topContainer):
""" This method overrides ValueGetterBase's "pure virtual" method. It
returns the referenced value. The derived class is NOT responsible for
fully resolving the reference'd value in the event the value resolves to
another ValueGetterBase-based instance -- this is handled automatically
within ValueGetterBase implementation.
topContainer: The top-level container (dict, tuple, or list [sub-]instance)
within whose context the value-getter is applied. If
self.__referenceDict is None, then topContainer will be used
as the reference dictionary for resolving our dictionary key
chain.
Returns: The value referenced by this instance (which may be another
value-getter instance)
"""
value = self.__referenceDict if self.__referenceDict is not None else topContainer
for key in self.__dictKeyChain:
value = value[key]
return value
class DeferredDictLookup(DictValueGetter):
"""
Creates a "future" reference to a value within an implicit dictionary that
will be passed to applyValueGettersToContainer() in the future (typically
called by description.py after its config dictionary has been updated from
the sub-experiment). The reference is relative to the dictionary that will
be passed to applyValueGettersToContainer()
"""
def __init__(self, *dictKeyChain):
"""
dictKeyChain: One or more strings; the first string is a key (that will
eventually be defined) in the dictionary that will be passed
to applyValueGettersToContainer(). If additional strings are
supplied, then the values correspnding to prior key strings
must be dictionaries, and each additionl string references a
sub-dictionary of the former. The final string is the key of
the field whose value will be returned by this value-getter
NOTE: its possible that the referenced value does not yet exist at the
time of instantiation of this class. It will be resolved when the
base description.py calls applyValueGettersToContainer().
Example:
config = dict(
_dsEncoderFieldName2_N = 70,
_dsEncoderFieldName2_W = 5,
dsEncoderSchema = [
dict(
base=dict(
fieldname='Name2', type='ScalarEncoder',
name='Name2', minval=0, maxval=270, clipInput=True,
n=DeferredDictLookup('_dsEncoderFieldName2_N'),
w=DeferredDictLookup('_dsEncoderFieldName2_W')),
),
],
)
updateConfigFromSubConfig(config)
applyValueGettersToContainer(config)
"""
# Invoke base (DictValueGetter constructor), passing None for referenceDict,
# which will force it use the dictionary passed via
# applyValueGettersToContainer(), instead.
DictValueGetter.__init__(self, None, *dictKeyChain)
def applyValueGettersToContainer(container):
"""
"""
_applyValueGettersImpl(container=container, currentObj=container,
recursionStack=[])
def _applyValueGettersImpl(container, currentObj, recursionStack):
"""
"""
# Detect cycles
if currentObj in recursionStack:
return
# Sanity-check of our cycle-detection logic
assert(len(recursionStack) < 1000)
# Push the current object on our cycle-detection stack
recursionStack.append(currentObj)
# Resolve value-getters within dictionaries, tuples and lists
if isinstance(currentObj, dict):
for (key, value) in currentObj.items():
if isinstance(value, ValueGetterBase):
currentObj[key] = value(container)
_applyValueGettersImpl(container, currentObj[key], recursionStack)
elif isinstance(currentObj, tuple) or isinstance(currentObj, list):
for (i, value) in enumerate(currentObj):
# NOTE: values within a tuple should never be value-getters, since
# the top-level elements within a tuple are immutable. However,
# if any nested sub-elements might be mutable
if isinstance(value, ValueGetterBase):
currentObj[i] = value(container)
_applyValueGettersImpl(container, currentObj[i], recursionStack)
else:
pass
recursionStack.pop()
return
| 14,463 | Python | .py | 288 | 43.361111 | 86 | 0.691882 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,955 | client.py | numenta_nupic-legacy/src/nupic/frameworks/opf/client.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Simple OPF client."""
from nupic.frameworks.opf.model_factory import ModelFactory
from nupic.frameworks.opf.opf_basic_environment import BasicDatasetReader
from nupic.frameworks.opf.prediction_metrics_manager import MetricsManager
class Client(object):
"""
Simple OPF client.
:param modelConfig: (dict) The model config.
:param metricSpecs: A sequence of
:class:`~nupic.frameworks.opf.metrics.MetricSpec` instances.
:param sourceSpec: (string) Path to the source CSV file.
:param sinkSpec: (string) Path to the sink CSV file.
"""
def __init__(self, modelConfig, inferenceArgs, metricSpecs, sourceSpec,
sinkSpec=None):
self.model = ModelFactory.create(modelConfig)
self.model.enableInference(inferenceArgs)
self.metricsManager = MetricsManager(metricSpecs, self.model.getFieldInfo(),
self.model.getInferenceType())
self.sink = None
if sinkSpec is not None:
# TODO: make this work - sinkSpec not yet supported.
raise NotImplementedError('The sinkSpec is not yet implemented.')
#self.sink = BasicPredictionLogger(
# self.model.getFieldInfo(), sinkSpec, 'myOutput',
# self.model.getInferenceType())
#self.sink.setLoggedMetrics(
# self.metricsManager.getMetricLabels())
self.datasetReader = BasicDatasetReader(sourceSpec)
def __iter__(self):
return self
def _processRecord(self, inputRecord):
modelResult = self.model.run(inputRecord)
modelResult.metrics = self.metricsManager.update(modelResult)
if self.sink:
self.sink.writeRecord(modelResult)
return modelResult
def next(self):
record = self.datasetReader.next()
return self._processRecord(record)
def skipNRecords(self, n):
for i in range(n):
self.datasetReader.next()
def nextTruthPrediction(self, field):
record = self.datasetReader.next()
prediction=self._processRecord(record).inferences['prediction'][0]
truth=record[field]
return truth, prediction
def run(self):
result = None
while True:
try:
result = self.next()
#print result
except StopIteration:
break
return result
| 3,192 | Python | .py | 77 | 36.844156 | 80 | 0.697937 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,956 | periodic.py | numenta_nupic-legacy/src/nupic/frameworks/opf/periodic.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from collections import namedtuple
# Passed as parameter to ActivityMgr
#
# repeating: True if the activity is a repeating activite, False if one-shot
# period: period of activity's execution (number of "ticks")
# cb: a callable to call upon expiration of period; will be called
# as cb()
PeriodicActivityRequest = namedtuple("PeriodicActivityRequest",
("repeating", "period", "cb"))
class PeriodicActivityMgr(object):
"""
TODO: move to shared script so that we can share it with run_opf_experiment
"""
# iteratorHolder: a list holding one iterator; we use a list so that we can
# replace the iterator for repeating activities (a tuple would not
# allow it if the field was an imutable value)
Activity = namedtuple("Activity", ("repeating",
"period",
"cb",
"iteratorHolder"))
def __init__(self, requestedActivities=[]):
"""
requestedActivities: a sequence of PeriodicActivityRequest elements
"""
self.__activities = []
self.__appendActivities(requestedActivities)
return
def addActivities(self, periodicActivities):
""" Adds activities
periodicActivities: A sequence of PeriodicActivityRequest elements
"""
self.__appendActivities(periodicActivities)
return
def tick(self):
""" Activity tick handler; services all activities
Returns: True if controlling iterator says it's okay to keep going;
False to stop
"""
# Run activities whose time has come
for act in self.__activities:
if not act.iteratorHolder[0]:
continue
try:
next(act.iteratorHolder[0])
except StopIteration:
act.cb()
if act.repeating:
act.iteratorHolder[0] = iter(xrange(act.period-1))
else:
act.iteratorHolder[0] = None
return True
def __appendActivities(self, periodicActivities):
"""
periodicActivities: A sequence of PeriodicActivityRequest elements
"""
for req in periodicActivities:
act = self.Activity(repeating=req.repeating,
period=req.period,
cb=req.cb,
iteratorHolder=[iter(xrange(req.period-1))])
self.__activities.append(act)
return
| 3,393 | Python | .py | 82 | 34.573171 | 78 | 0.643227 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,957 | opf_basic_environment.py | numenta_nupic-legacy/src/nupic/frameworks/opf/opf_basic_environment.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This script provides a file-based implementation of the ``opf_environment``
interfaces (OPF).
This "basic" implementation of the interface (need a better name
instead of "basic") uses files (.csv, etc.) versus Nupic's implementation
that would use databases.
This implementation is used by research tools, such as
``scripts/run_opf_experiment.py``.
The ``opf_environment`` interfaces encapsulate external specifics, such as
data source (e.g., .csv file or database, etc.), prediction sink (.csv file or
databse, etc.), report and serialization destination, etc.
"""
from abc import ABCMeta, abstractmethod
import copy
import csv
import json
import logging
import logging.handlers
import os
import shutil
import StringIO
import opf_utils
import opf_environment as opfenv
from nupic.data.file_record_stream import FileRecordStream
from nupic.data.stream_reader import StreamReader
from nupic.data.field_meta import (FieldMetaInfo,
FieldMetaType,
FieldMetaSpecial)
from nupic.data.inference_shifter import InferenceShifter
from opf_utils import InferenceType, InferenceElement
class PredictionMetricsLoggerIface(object):
""" This is the interface for output of prediction metrics.
"""
__metaclass__ = ABCMeta
@abstractmethod
def emitPeriodicMetrics(self, metrics):
""" Emits periodic metrics to stdout in JSON.
:param metrics: A list of metrics as returned by
:meth:`nupic.frameworks.opf.opf_task_driver.OPFTaskDriver.getMetrics`.
"""
@abstractmethod
def emitFinalMetrics(self, metrics):
""" Emits final metrics.
.. note:: the intention is that the final metrics may go to a different
place (e.g., csv file) versus :meth:`emitPeriodicMetrics`
(e.g., stdout)
:param metrics: A list of metrics as returned by
:meth:`nupic.frameworks.opf.opf_task_driver.OPFTaskDriver.getMetrics`.
"""
class DatasetReaderIface(object):
""" This is the interface class for a dataset readers
"""
__metaclass__ = ABCMeta
@abstractmethod
def getDatasetFieldMetaData(self):
"""
:returns: a tuple of dataset field metadata descriptors that are
arranged in the same order as the columns in the dataset.
Each field metadata descriptor is of type
:class:`nupic.data.field_meta.FieldMetaInfo`
"""
@abstractmethod
def next(self):
"""
:returns: The next record from the dataset. The returned record object
is of the same structure as returned by
:meth:`nupic.data.record_stream.RecordStreamIface.getNextRecord`.
Returns ``None`` if the next record is not available yet.
:raises: (StopIteration) if a hard "end of file" has been reached
and no more records will be forthcoming.
"""
class PredictionWriterIface(object):
""" This class defines the interface for prediction writer implementation
returned by an object factory conforming to PredictionWriterFactoryIface
"""
__metaclass__ = ABCMeta
@abstractmethod
def close(self):
""" Closes the writer (e.g., close the underlying file)
"""
@abstractmethod
def append(self, inputRow, predictionRow, sequenceReset, metrics=None):
""" Emits a single prediction as input versus predicted.
inputRow: A tuple or list of fields comprising the input data row.
predictionRow: A tuple or list of fields comprising the prediction, or None
if prediction is not available. The None use case is
intended for temporal inference where there is no matching
prediction for the same timestep as the given ground truth,
such as the case with the very first input record.
sequenceReset: A value that tests True if the input row was
accompanied by a sequence reset signal; False if not
accompanied by a sequence reset signal.
metrics: OPTIONAL -A dictionary of metrics that will be written out
with every prediction. The keys are the automatically
generated metric labels (see MetricSpec in
prediction_metrics_manager.py), and the value is the real
number value of the metric.
"""
@abstractmethod
def checkpoint(self, checkpointSink, maxRows):
""" Save a checkpoint of the prediction output stream. The checkpoint
comprises up to maxRows of the most recent inference records.
Parameters:
----------------------------------------------------------------------
checkpointSink: A File-like object where predictions checkpoint data, if
any, will be stored.
maxRows: Maximum number of most recent inference rows
to checkpoint.
"""
class BasicPredictionMetricsLogger(PredictionMetricsLoggerIface):
""" This is the file-based implementation of the interface for output of
prediction metrics
TODO: where should periodic and final predictions go (versus stdout)
:param experimentDir: (string) path to directory for experiment to run.
:param label: (string) used to distinguish the output's container (e.g.,
filename, directory name, property key, etc.).
"""
def __init__(self, experimentDir, label):
self.__experimentDir = experimentDir
self.__label = label
return
def __repr__(self):
return ("%s(experimentDir=%r,label=%r)" % (self.__class__.__name__,
self.__experimentDir,
self.__label))
def emitPeriodicMetrics(self, metrics):
jsonString = self._translateMetricsToJSON(metrics, label="PERIODIC")
self._emitJSONStringToStdout(jsonString)
return
def emitFinalMetrics(self, metrics):
jsonString = self._translateMetricsToJSON(metrics, label="FINAL")
self._emitJSONStringToStdout(jsonString)
return
def _translateMetricsToJSON(self, metrics, label):
""" Translates the given metrics value to JSON string
metrics: A list of dictionaries per OPFTaskDriver.getMetrics():
Returns: JSON string representing the given metrics object.
"""
# Transcode the MetricValueElement values into JSON-compatible
# structure
metricsDict = metrics
# Convert the structure to a display-friendly JSON string
def _mapNumpyValues(obj):
"""
"""
import numpy
if isinstance(obj, numpy.float32):
return float(obj)
elif isinstance(obj, numpy.bool_):
return bool(obj)
elif isinstance(obj, numpy.ndarray):
return obj.tolist()
else:
raise TypeError("UNEXPECTED OBJ: %s; class=%s" % (obj, obj.__class__))
jsonString = json.dumps(metricsDict, indent=4, default=_mapNumpyValues)
return jsonString
def _emitJSONStringToStdout(self, jsonString):
print '<JSON>'
print jsonString
print '</JSON>'
class BasicDatasetReader(DatasetReaderIface):
""" This is a CSV file-based implementation of :class:`DatasetReaderIface`.
:param streamDefDict: stream definition, as defined `here <stream-def.html>`_.
"""
def __init__(self, streamDefDict):
# Create the object to read from
self._reader = StreamReader(streamDefDict, saveOutput=True)
return
def __iter__(self):
return self
def next(self):
row = self._reader.getNextRecordDict()
if row == None:
raise StopIteration
return row
def getDatasetFieldMetaData(self):
return FieldMetaInfo.createListFromFileFieldList(self._reader.getFields())
class _BasicPredictionWriter(PredictionWriterIface):
""" This class defines the basic (file-based) implementation of
PredictionWriterIface, whose instances are returned by
BasicPredictionWriterFactory
"""
def __init__(self, experimentDir, label, inferenceType,
fields, metricNames=None, checkpointSource=None):
""" Constructor
experimentDir:
experiment directory path that contains description.py
label: A label string to incorporate into the filename.
inferenceElements:
inferenceType:
An constant from opf_utils.InferenceType for the
requested prediction writer
fields: a non-empty sequence of nupic.data.fieldmeta.FieldMetaInfo
representing fields that will be emitted to this prediction
writer
metricNames: OPTIONAL - A list of metric names that well be emiited by this
prediction writer
checkpointSource:
If not None, a File-like object containing the
previously-checkpointed predictions for setting the initial
contents of this PredictionOutputStream. Will be copied
before returning, if needed.
"""
#assert len(fields) > 0
self.__experimentDir = experimentDir
# opf_utils.InferenceType kind value
self.__inferenceType = inferenceType
# A tuple of nupic.data.fieldmeta.FieldMetaInfo
self.__inputFieldsMeta = tuple(copy.deepcopy(fields))
self.__numInputFields = len(self.__inputFieldsMeta)
self.__label = label
if metricNames is not None:
metricNames.sort()
self.__metricNames = metricNames
# Define our output field meta info
self.__outputFieldsMeta = []
# The list of inputs that we include in the prediction output
self._rawInputNames = []
# Output dataset
self.__datasetPath = None
self.__dataset = None
# Save checkpoint data until we're ready to create the output dataset
self.__checkpointCache = None
if checkpointSource is not None:
checkpointSource.seek(0)
self.__checkpointCache = StringIO.StringIO()
shutil.copyfileobj(checkpointSource, self.__checkpointCache)
return
def __openDatafile(self, modelResult):
"""Open the data file and write the header row"""
# Write reset bit
resetFieldMeta = FieldMetaInfo(
name="reset",
type=FieldMetaType.integer,
special = FieldMetaSpecial.reset)
self.__outputFieldsMeta.append(resetFieldMeta)
# -----------------------------------------------------------------------
# Write each of the raw inputs that go into the encoders
rawInput = modelResult.rawInput
rawFields = rawInput.keys()
rawFields.sort()
for field in rawFields:
if field.startswith('_') or field == 'reset':
continue
value = rawInput[field]
meta = FieldMetaInfo(name=field, type=FieldMetaType.string,
special=FieldMetaSpecial.none)
self.__outputFieldsMeta.append(meta)
self._rawInputNames.append(field)
# -----------------------------------------------------------------------
# Handle each of the inference elements
for inferenceElement, value in modelResult.inferences.iteritems():
inferenceLabel = InferenceElement.getLabel(inferenceElement)
# TODO: Right now we assume list inferences are associated with
# The input field metadata
if type(value) in (list, tuple):
# Append input and prediction field meta-info
self.__outputFieldsMeta.extend(self.__getListMetaInfo(inferenceElement))
elif isinstance(value, dict):
self.__outputFieldsMeta.extend(self.__getDictMetaInfo(inferenceElement,
value))
else:
if InferenceElement.getInputElement(inferenceElement):
self.__outputFieldsMeta.append(FieldMetaInfo(name=inferenceLabel+".actual",
type=FieldMetaType.string, special = ''))
self.__outputFieldsMeta.append(FieldMetaInfo(name=inferenceLabel,
type=FieldMetaType.string, special = ''))
if self.__metricNames:
for metricName in self.__metricNames:
metricField = FieldMetaInfo(
name = metricName,
type = FieldMetaType.float,
special = FieldMetaSpecial.none)
self.__outputFieldsMeta.append(metricField)
# Create the inference directory for our experiment
inferenceDir = _FileUtils.createExperimentInferenceDir(self.__experimentDir)
# Consctruct the prediction dataset file path
filename = (self.__label + "." +
opf_utils.InferenceType.getLabel(self.__inferenceType) +
".predictionLog.csv")
self.__datasetPath = os.path.join(inferenceDir, filename)
# Create the output dataset
print "OPENING OUTPUT FOR PREDICTION WRITER AT: %r" % self.__datasetPath
print "Prediction field-meta: %r" % ([tuple(i) for i in self.__outputFieldsMeta],)
self.__dataset = FileRecordStream(streamID=self.__datasetPath, write=True,
fields=self.__outputFieldsMeta)
# Copy data from checkpoint cache
if self.__checkpointCache is not None:
self.__checkpointCache.seek(0)
reader = csv.reader(self.__checkpointCache, dialect='excel')
# Skip header row
try:
header = reader.next()
except StopIteration:
print "Empty record checkpoint initializer for %r" % (self.__datasetPath,)
else:
assert tuple(self.__dataset.getFieldNames()) == tuple(header), \
"dataset.getFieldNames(): %r; predictionCheckpointFieldNames: %r" % (
tuple(self.__dataset.getFieldNames()), tuple(header))
# Copy the rows from checkpoint
numRowsCopied = 0
while True:
try:
row = reader.next()
except StopIteration:
break
#print "DEBUG: restoring row from checkpoint: %r" % (row,)
self.__dataset.appendRecord(row)
numRowsCopied += 1
self.__dataset.flush()
print "Restored %d rows from checkpoint for %r" % (
numRowsCopied, self.__datasetPath)
# Dispose of our checkpoint cache
self.__checkpointCache.close()
self.__checkpointCache = None
return
def setLoggedMetrics(self, metricNames):
""" Tell the writer which metrics should be written
Parameters:
-----------------------------------------------------------------------
metricsNames: A list of metric lables to be written
"""
if metricNames is None:
self.__metricNames = set([])
else:
self.__metricNames = set(metricNames)
def close(self):
""" [virtual method override] Closes the writer (e.g., close the underlying
file)
"""
if self.__dataset:
self.__dataset.close()
self.__dataset = None
return
def __getListMetaInfo(self, inferenceElement):
""" Get field metadata information for inferences that are of list type
TODO: Right now we assume list inferences are associated with the input field
metadata
"""
fieldMetaInfo = []
inferenceLabel = InferenceElement.getLabel(inferenceElement)
for inputFieldMeta in self.__inputFieldsMeta:
if InferenceElement.getInputElement(inferenceElement):
outputFieldMeta = FieldMetaInfo(
name=inputFieldMeta.name + ".actual",
type=inputFieldMeta.type,
special=inputFieldMeta.special
)
predictionField = FieldMetaInfo(
name=inputFieldMeta.name + "." + inferenceLabel,
type=inputFieldMeta.type,
special=inputFieldMeta.special
)
fieldMetaInfo.append(outputFieldMeta)
fieldMetaInfo.append(predictionField)
return fieldMetaInfo
def __getDictMetaInfo(self, inferenceElement, inferenceDict):
"""Get field metadate information for inferences that are of dict type"""
fieldMetaInfo = []
inferenceLabel = InferenceElement.getLabel(inferenceElement)
if InferenceElement.getInputElement(inferenceElement):
fieldMetaInfo.append(FieldMetaInfo(name=inferenceLabel+".actual",
type=FieldMetaType.string,
special = ''))
keys = sorted(inferenceDict.keys())
for key in keys:
fieldMetaInfo.append(FieldMetaInfo(name=inferenceLabel+"."+str(key),
type=FieldMetaType.string,
special=''))
return fieldMetaInfo
def append(self, modelResult):
""" [virtual method override] Emits a single prediction as input versus
predicted.
modelResult: An opf_utils.ModelResult object that contains the model input
and output for the current timestep.
"""
#print "DEBUG: _BasicPredictionWriter: writing modelResult: %r" % (modelResult,)
# If there are no inferences, don't write anything
inferences = modelResult.inferences
hasInferences = False
if inferences is not None:
for value in inferences.itervalues():
hasInferences = hasInferences or (value is not None)
if not hasInferences:
return
if self.__dataset is None:
self.__openDatafile(modelResult)
inputData = modelResult.sensorInput
sequenceReset = int(bool(inputData.sequenceReset))
outputRow = [sequenceReset]
# -----------------------------------------------------------------------
# Write out the raw inputs
rawInput = modelResult.rawInput
for field in self._rawInputNames:
outputRow.append(str(rawInput[field]))
# -----------------------------------------------------------------------
# Write out the inference element info
for inferenceElement, outputVal in inferences.iteritems():
inputElement = InferenceElement.getInputElement(inferenceElement)
if inputElement:
inputVal = getattr(inputData, inputElement)
else:
inputVal = None
if type(outputVal) in (list, tuple):
assert type(inputVal) in (list, tuple, None)
for iv, ov in zip(inputVal, outputVal):
# Write actual
outputRow.append(str(iv))
# Write inferred
outputRow.append(str(ov))
elif isinstance(outputVal, dict):
if inputVal is not None:
# If we have a predicted field, include only that in the actuals
if modelResult.predictedFieldName is not None:
outputRow.append(str(inputVal[modelResult.predictedFieldName]))
else:
outputRow.append(str(inputVal))
for key in sorted(outputVal.keys()):
outputRow.append(str(outputVal[key]))
else:
if inputVal is not None:
outputRow.append(str(inputVal))
outputRow.append(str(outputVal))
metrics = modelResult.metrics
for metricName in self.__metricNames:
outputRow.append(metrics.get(metricName, 0.0))
#print "DEBUG: _BasicPredictionWriter: writing outputRow: %r" % (outputRow,)
self.__dataset.appendRecord(outputRow)
self.__dataset.flush()
return
def checkpoint(self, checkpointSink, maxRows):
""" [virtual method override] Save a checkpoint of the prediction output
stream. The checkpoint comprises up to maxRows of the most recent inference
records.
Parameters:
----------------------------------------------------------------------
checkpointSink: A File-like object where predictions checkpoint data, if
any, will be stored.
maxRows: Maximum number of most recent inference rows
to checkpoint.
"""
checkpointSink.truncate()
if self.__dataset is None:
if self.__checkpointCache is not None:
self.__checkpointCache.seek(0)
shutil.copyfileobj(self.__checkpointCache, checkpointSink)
checkpointSink.flush()
return
else:
# Nothing to checkpoint
return
self.__dataset.flush()
totalDataRows = self.__dataset.getDataRowCount()
if totalDataRows == 0:
# Nothing to checkpoint
return
# Open reader of prediction file (suppress missingValues conversion)
reader = FileRecordStream(self.__datasetPath, missingValues=[])
# Create CSV writer for writing checkpoint rows
writer = csv.writer(checkpointSink)
# Write the header row to checkpoint sink -- just field names
writer.writerow(reader.getFieldNames())
# Determine number of rows to checkpoint
numToWrite = min(maxRows, totalDataRows)
# Skip initial rows to get to the rows that we actually need to checkpoint
numRowsToSkip = totalDataRows - numToWrite
for i in xrange(numRowsToSkip):
reader.next()
# Write the data rows to checkpoint sink
numWritten = 0
while True:
row = reader.getNextRecord()
if row is None:
break;
row = [str(element) for element in row]
#print "DEBUG: _BasicPredictionWriter: checkpointing row: %r" % (row,)
writer.writerow(row)
numWritten +=1
assert numWritten == numToWrite, \
"numWritten (%s) != numToWrite (%s)" % (numWritten, numToWrite)
checkpointSink.flush()
return
###############################################################################
# Prediction Log adapters
###############################################################################
class NonTemporalPredictionLogAdapter(object):
""" This class serves as an adapter for a client-instantiated Non-temporal log
writer.
:param writer: (:class:`PredictionWriterIface`) Non-temporal prediction log
writer
"""
def __init__(self, writer):
self.__writer = writer
return
def close(self):
self.__writer.close()
self.__writer = None
return
def update(self, modelResult):
""" Emit a input/prediction pair, if possible.
modelResult: An opf_utils.ModelResult object that contains the model input
and output for the current timestep.
"""
self.__writer.append(modelResult)
return
class TemporalPredictionLogAdapter(object):
"""This class serves as an adapter for a client-instantiated Temporal log
writer. It maintains a prediction FIFO for matching T(i+1) input record
with T(i=1) prediction for outputting to the log writer.
TODO: Right now this is broken
"""
def __init__(self, writer):
"""
writer: Non-temporal prediction log writer conforming to
PredictionWriterIface interface.
"""
self.__logger = logging.getLogger(".".join(
['com.numenta', self.__class__.__module__, self.__class__.__name__]))
self.__writer = writer
self.__inferenceShifter = InferenceShifter()
return
def close(self):
self.__writer.close()
self.__writer = None
return
def update(self, modelResult):
""" Queue up the T(i+1) prediction value and emit a T(i)
input/prediction pair, if possible. E.g., if the previous T(i-1)
iteration was learn-only, then we would not have a T(i) prediction in our
FIFO and would not be able to emit a meaningful input/prediction
pair.
modelResult: An opf_utils.ModelResult object that contains the model input
and output for the current timestep.
"""
self.__writer.append(self.__inferenceShifter.shift(modelResult))
class BasicPredictionLogger(opfenv.PredictionLoggerIface):
""" This class implements logging of predictions to files as actual vs
predicted values.
:param fields: (list) of :class:`nupic.data.field_meta.FieldMetaInfo` objects
representing the encoder-mapped data row field value sequences that
will be emitted to this prediction logger.
:param experimentDir: (string) experiment directory path that contains
description.py
:param label: (string) to incorporate into the filename.
:param checkpointSource: If not None, a File-like object containing the
previously-checkpointed predictions for setting the initial contents of
this output stream. Will be copied before returning, if
needed.
"""
def __init__(self, fields, experimentDir, label, inferenceType,
checkpointSource=None):
#assert len(fields) > 0
self.__reprString = (
"%s(fields=%r)" % (
self.__class__.__name__, fields))
self.__inputFieldsMeta = tuple(copy.deepcopy(fields))
self.__experimentDir = experimentDir
self.__label = label
self.__inferenceType = inferenceType
self.__writer = None
self.__logAdapter = None
self.__loggedMetricNames = None
# Save checkpoint data until we're ready to create the output writer
self.__checkpointCache = None
if checkpointSource is not None:
checkpointSource.seek(0)
self.__checkpointCache = StringIO.StringIO()
shutil.copyfileobj(checkpointSource, self.__checkpointCache)
return
def __repr__(self):
return self.__reprString
def close(self):
if self.__logAdapter:
self.__logAdapter.close()
self.__logAdapter = None
return
def writeRecord(self, modelResult):
self.writeRecords([modelResult])
return
def writeRecords(self, modelResults, progressCB=None):
# Instantiate the logger if it doesn't exist yet
if self.__logAdapter is None and modelResults:
self.__writer = _BasicPredictionWriter(
experimentDir=self.__experimentDir,
label=self.__label,
inferenceType=self.__inferenceType,
fields=self.__inputFieldsMeta,
metricNames=self.__loggedMetricNames,
checkpointSource=self.__checkpointCache)
# Dispose of our checkpoint cache now
if self.__checkpointCache is not None:
self.__checkpointCache.close()
self.__checkpointCache = None
if InferenceType.isTemporal(self.__inferenceType):
logAdapterClass = TemporalPredictionLogAdapter
else:
logAdapterClass = NonTemporalPredictionLogAdapter
self.__logAdapter = logAdapterClass(self.__writer)
self.__writer.setLoggedMetrics(self.__loggedMetricNames)
for modelResult in modelResults:
if modelResult.inferences is not None:
# -----------------------------------------------------------------------
# Update the prediction log
self.__logAdapter.update(modelResult)
else:
# Handle the learn-only scenario: pass input to existing logAdapters
self.__logAdapter.update(modelResult)
return
def setLoggedMetrics(self, metricNames):
self.__loggedMetricNames = metricNames
if self.__writer is not None:
self.__writer.setLoggedMetrics(metricNames)
def checkpoint(self, checkpointSink, maxRows):
checkpointSink.truncate()
if self.__writer is None:
if self.__checkpointCache is not None:
self.__checkpointCache.seek(0)
shutil.copyfileobj(self.__checkpointCache, checkpointSink)
checkpointSink.flush()
return
else:
# Nothing to checkpoint
return
self.__writer.checkpoint(checkpointSink, maxRows)
return
class _FileUtils(object):
@staticmethod
def getExperimentInferenceDirPath(experimentDir):
"""
experimentDir: experiment directory path that contains description.py
Returns: experiment inference directory path string (the path may not
yet exist - see createExperimentInferenceDir())
"""
return os.path.abspath(os.path.join(experimentDir, "inference"))
@classmethod
def createExperimentInferenceDir(cls, experimentDir):
""" Creates the inference output directory for the given experiment
experimentDir: experiment directory path that contains description.py
Returns: path of the inference output directory
"""
path = cls.getExperimentInferenceDirPath(experimentDir)
cls.makeDirectory(path)
return path
@staticmethod
def makeDirectory(path):
""" Makes directory for the given directory path if it doesn't already exist
in the filesystem. Creates all requested directory segments as needed.
path: path of the directory to create.
Returns: nothing
"""
# Create the experiment directory
# TODO Is default mode (0777) appropriate?
try:
os.makedirs(path)
except OSError as e:
if e.errno == os.errno.EEXIST:
#print "Experiment directory already exists (that's okay)."
pass
else:
raise
return
def test():
#testLogging()
return
#def testLogging():
# dir = os.path.expanduser('~/nupic/trunk/examples/opf/experiments/opfrunexperiment_test/base')
# outfile = "test.log"
# message = "This is a test message."
# filepath = "%s/%s" % (dir,outfile)
#
# if os.path.exists(filepath):
# os.remove(filepath)
#
# logOutputDesc = dict(
# outputDestination = [outfile],
# level = "DEBUG",
# format = '%(levelname)10s: %(asctime)s - %(name)s. %(message)s'
# )
# logHandlerFactory = BasicLoggingHandlerFactory(dir)
# logHandlerList = logHandlerFactory(logOutputDesc)
# for handler in logHandlerList:
# logging.root.addHandler(handler)
#
# logger = logging.getLogger("test.logger")
# logger.setLevel(logging.DEBUG)
#
# logger.debug(message)
# logger.info(message)
#
# f = open(filepath)
# fcontents = f.read()
# import string
# c = string.count(fcontents, message)
# assert(c == 2)
# os.remove(filepath)
#
# print "Logging test passed."
# return
if __name__ == "__main__":
test()
| 30,580 | Python | .py | 720 | 35.298611 | 96 | 0.664054 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,958 | model_factory.py | numenta_nupic-legacy/src/nupic/frameworks/opf/model_factory.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
""" @file model_factory.py
Model factory.
"""
import logging
import nupic.frameworks.opf.opf_utils as opf_utils
# Import models
from htm_prediction_model import HTMPredictionModel
from model import Model
from two_gram_model import TwoGramModel
from previous_value_model import PreviousValueModel
class ModelFactory(object):
"""
Static factory class that produces a :class:`nupic.frameworks.opf.model.Model`
based on a description dict.
"""
__logger = None
@classmethod
def __getLogger(cls):
""" Get the logger for this object.
:returns: (Logger) A Logger object.
"""
if cls.__logger is None:
cls.__logger = opf_utils.initLogger(cls)
return cls.__logger
@staticmethod
def create(modelConfig, logLevel=logging.ERROR):
""" Create a new model instance, given a description dictionary.
:param modelConfig: (dict)
A dictionary describing the current model,
`described here <../../quick-start/example-model-params.html>`_.
:param logLevel: (int) The level of logging output that should be generated
:raises Exception: Unsupported model type
:returns: :class:`nupic.frameworks.opf.model.Model`
"""
logger = ModelFactory.__getLogger()
logger.setLevel(logLevel)
logger.debug("ModelFactory returning Model from dict: %s", modelConfig)
modelClass = None
if modelConfig['model'] == "HTMPrediction":
modelClass = HTMPredictionModel
elif modelConfig['model'] == "TwoGram":
modelClass = TwoGramModel
elif modelConfig['model'] == "PreviousValue":
modelClass = PreviousValueModel
else:
raise Exception("ModelFactory received unsupported Model type: %s" % \
modelConfig['model'])
return modelClass(**modelConfig['modelParams'])
@staticmethod
def loadFromCheckpoint(savedModelDir, newSerialization=False):
""" Load saved model.
:param savedModelDir: (string)
Directory of where the experiment is to be or was saved
:returns: (:class:`nupic.frameworks.opf.model.Model`) The loaded model
instance.
"""
if newSerialization:
return HTMPredictionModel.readFromCheckpoint(savedModelDir)
else:
return Model.load(savedModelDir)
| 3,233 | Python | .py | 80 | 36.2 | 80 | 0.700383 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,959 | experiment_runner.py | numenta_nupic-legacy/src/nupic/frameworks/opf/experiment_runner.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This script provides the runExperiment() API function that is used
by the command-line client run_opf_experiment.py of Online Prediction
Framework (OPF). It executes a single experiment.
This runner is generally run through `scripts/run_opf_experiment.py`.
"""
from collections import namedtuple
import itertools
import logging
import optparse
import os
import sys
import random
import numpy
from nupic.data import json_helpers
from nupic.frameworks.opf import opf_basic_environment, helpers
from nupic.frameworks.opf.exp_description_api import OpfEnvironment
from nupic.frameworks.opf.model_factory import ModelFactory
from nupic.frameworks.opf.opf_task_driver import OPFTaskDriver
from nupic.frameworks.opf.opf_utils import (InferenceElement, matchPatterns,
validateOpfJsonValue)
from nupic.support import initLogging
g_defaultCheckpointExtension = ".nta"
# Schema of the Private Command-line Options dictionary returned by
# _parseCommandLineOptions(). This "Private" options dict is consumed internally
# by runExperiment (i.e. not passed to external modules).
g_parsedPrivateCommandLineOptionsSchema = {
"description":"OPF RunExperiment control args",
"type":"object",
"additionalProperties":False,
"properties":{
"createCheckpointName":{
"description":"Create a model and save it under the checkpoint name, " + \
"but don't run it. " + \
"TODO: 'blank' is a non-standard JSON schema setting; " + \
"validictory 8.0 supports a blank_by_default arg.",
"required":True,
"type":"string",
"minLength":0,
"blank":True
},
"listAvailableCheckpoints":{
"description":"List all checkpoints and exit",
"required":True,
"type":"boolean"
},
"listTasks":{
"description":"List all tasks and exit",
"required":True,
"type":"boolean"
},
"runCheckpointName":{
"description":"Name of saved checkpoint to load and run" + \
"TODO: 'blank' is a non-standard JSON schema setting; " + \
"validictory 8.0 supports a blank_by_default arg.",
"required":True,
"type":"string",
"minLength":0,
"blank":True
},
"newSerialization":{
"description":"Use new capnproto serialization.",
"required":True,
"type":"boolean"
},
#"reuseDatasets":{
# "description":"Keep existing generated/aggregated datasets",
# "required":True,
# "type":"boolean"
#},
"testMode":{
"description":"True to override iteration counts with very small values",
"required":True,
"type":"boolean"
},
"taskLabels":{
"required":False,
"type":"array",
"uniqueItems":False,
"minItems":0,
"items":{"type":"string", "minLength":1}
},
"checkpointModel":{
"description":"True to checkpoint model after running each task",
"required":True,
"type":"boolean"
},
}
}
def runExperiment(args, model=None):
"""
Run a single OPF experiment.
.. note:: The caller is responsible for initializing python logging before
calling this function (e.g., import :mod:`nupic.support`;
:meth:`nupic.support.initLogging`)
See also: :meth:`.initExperimentPrng`.
:param args: (string) Experiment command-line args list. Too see all options,
run with ``--help``:
.. code-block:: text
Options:
-h, --help show this help message and exit
-c <CHECKPOINT> Create a model and save it under the given <CHECKPOINT>
name, but don't run it
--listCheckpoints List all available checkpoints
--listTasks List all task labels in description.py
--load=<CHECKPOINT> Load a model from the given <CHECKPOINT> and run it.
Run with --listCheckpoints flag for more details.
--newSerialization Use new capnproto serialization
--tasks Run the tasks with the given TASK LABELS in the order
they are given. Either end of arg-list, or a
standalone dot ('.') arg or the next short or long
option name (-a or --blah) terminates the list. NOTE:
FAILS TO RECOGNIZE task label names with one or more
leading dashes. [default: run all of the tasks in
description.py]
--testMode Reduce iteration count for testing
--noCheckpoint Don't checkpoint the model after running each task.
:param model: (:class:`~nupic.frameworks.opf.model.Model`) For testing, may
pass in an existing OPF Model to use instead of creating a new one.
:returns: (:class:`~nupic.frameworks.opf.model.Model`)
reference to OPF Model instance that was constructed (this
is provided to aid with debugging) or None, if none was
created.
"""
# Parse command-line options
opt = _parseCommandLineOptions(args)
#print "runExperiment: Parsed Command Options: ", opt
model = _runExperimentImpl(opt, model)
return model
def initExperimentPrng():
"""Initialize PRNGs that may be used by other modules in the experiment stack.
.. note:: User may call this function to initialize PRNGs that are used by the
experiment stack before calling runExperiment(), unless user has its own
own logic for initializing these PRNGs.
"""
seed = 42
random.seed(seed)
numpy.random.seed(seed)
ParseCommandLineOptionsResult = namedtuple('ParseCommandLineOptionsResult',
('experimentDir', 'privateOptions'))
"""_parseCommandLineOptions() return value type
Args:
experimentDir: path of experiment directory that contains description.py
privateOptions: dictionary of options of consumption only by this script;
the schema is described by g_parsedPrivateCommandLineOptionsSchema
"""
def _parseCommandLineOptions(args):
"""Parse command line options
Args:
args: command line arguments (not including sys.argv[0])
Returns:
namedtuple ParseCommandLineOptionsResult
"""
usageStr = (
"%prog [options] descriptionPyDirectory\n"
"This script runs a single OPF Model described by description.py "
"located in the given directory."
)
parser = optparse.OptionParser(usage=usageStr)
parser.add_option("-c",
help="Create a model and save it under the given "
"<CHECKPOINT> name, but don't run it",
dest="createCheckpointName",
action="store", type="string", default="",
metavar="<CHECKPOINT>")
parser.add_option("--listCheckpoints",
help="List all available checkpoints",
dest="listAvailableCheckpoints",
action="store_true", default=False)
parser.add_option("--listTasks",
help="List all task labels in description.py",
dest="listTasks",
action="store_true", default=False)
parser.add_option("--load",
help="Load a model from the given <CHECKPOINT> and run it. "
"Run with --listCheckpoints flag for more details. ",
dest="runCheckpointName",
action="store", type="string", default="",
metavar="<CHECKPOINT>")
parser.add_option("--newSerialization",
help="Use new capnproto serialization",
dest="newSerialization",
action="store_true", default=False)
#parser.add_option("--reuseDatasets",
# help="Keep existing generated/aggregated datasets",
# dest="reuseDatasets", action="store_true",
# default=False)
parser.add_option("--tasks",
help="Run the tasks with the given TASK LABELS "
"in the order they are given. Either end of "
"arg-list, or a standalone dot ('.') arg or "
"the next short or long option name (-a or "
"--blah) terminates the list. NOTE: FAILS "
"TO RECOGNIZE task label names with one or more "
"leading dashes. [default: run all of the tasks in "
"description.py]",
dest="taskLabels", default=[],
action="callback", callback=reapVarArgsCallback,
metavar="TASK_LABELS")
parser.add_option("--testMode",
help="Reduce iteration count for testing",
dest="testMode", action="store_true",
default=False)
parser.add_option("--noCheckpoint",
help="Don't checkpoint the model after running each task.",
dest="checkpointModel", action="store_false",
default=True)
options, experiments = parser.parse_args(args)
# Validate args
mutuallyExclusiveOptionCount = sum([bool(options.createCheckpointName),
options.listAvailableCheckpoints,
options.listTasks,
bool(options.runCheckpointName)])
if mutuallyExclusiveOptionCount > 1:
_reportCommandLineUsageErrorAndExit(
parser,
"Options: -c, --listCheckpoints, --listTasks, and --load are "
"mutually exclusive. Please select only one")
mutuallyExclusiveOptionCount = sum([bool(not options.checkpointModel),
bool(options.createCheckpointName)])
if mutuallyExclusiveOptionCount > 1:
_reportCommandLineUsageErrorAndExit(
parser,
"Options: -c and --noCheckpoint are "
"mutually exclusive. Please select only one")
if len(experiments) != 1:
_reportCommandLineUsageErrorAndExit(
parser,
"Exactly ONE experiment must be specified, but got %s (%s)" % (
len(experiments), experiments))
# Done with parser
parser.destroy()
# Prepare results
# Directory path of the experiment (that contain description.py)
experimentDir = os.path.abspath(experiments[0])
# RunExperiment.py's private options (g_parsedPrivateCommandLineOptionsSchema)
privateOptions = dict()
privateOptions['createCheckpointName'] = options.createCheckpointName
privateOptions['listAvailableCheckpoints'] = options.listAvailableCheckpoints
privateOptions['listTasks'] = options.listTasks
privateOptions['runCheckpointName'] = options.runCheckpointName
privateOptions['newSerialization'] = options.newSerialization
privateOptions['testMode'] = options.testMode
#privateOptions['reuseDatasets'] = options.reuseDatasets
privateOptions['taskLabels'] = options.taskLabels
privateOptions['checkpointModel'] = options.checkpointModel
result = ParseCommandLineOptionsResult(experimentDir=experimentDir,
privateOptions=privateOptions)
return result
def reapVarArgsCallback(option, optStr, value, parser):
"""Used as optparse callback for reaping a variable number of option args.
The option may be specified multiple times, and all the args associated with
that option name will be accumulated in the order that they are encountered
"""
newValues = []
# Reap the args, taking care to stop before the next option or '.'
gotDot = False
for arg in parser.rargs:
# Stop on --longname options
if arg.startswith("--") and len(arg) > 2:
break
# Stop on -b options
if arg.startswith("-") and len(arg) > 1:
break
if arg == ".":
gotDot = True
break
newValues.append(arg)
if not newValues:
raise optparse.OptionValueError(
("Empty arg list for option %r expecting one or more args "
"(remaining tokens: %r)") % (optStr, parser.rargs))
del parser.rargs[:len(newValues) + int(gotDot)]
# Retrieve the existing arg accumulator, if any
value = getattr(parser.values, option.dest, [])
#print "Previous value: %r" % value
if value is None:
value = []
# Append the new args to the existing ones and save to the parser
value.extend(newValues)
setattr(parser.values, option.dest, value)
def _reportCommandLineUsageErrorAndExit(parser, message):
"""Report usage error and exit program with error indication."""
print parser.get_usage()
print message
sys.exit(1)
def _runExperimentImpl(options, model=None):
"""Creates and runs the experiment
Args:
options: namedtuple ParseCommandLineOptionsResult
model: For testing: may pass in an existing OPF Model instance
to use instead of creating a new one.
Returns: reference to OPFExperiment instance that was constructed (this
is provided to aid with debugging) or None, if none was
created.
"""
json_helpers.validate(options.privateOptions,
schemaDict=g_parsedPrivateCommandLineOptionsSchema)
# Load the experiment's description.py module
experimentDir = options.experimentDir
descriptionPyModule = helpers.loadExperimentDescriptionScriptFromDir(
experimentDir)
expIface = helpers.getExperimentDescriptionInterfaceFromModule(
descriptionPyModule)
# Handle "list checkpoints" request
if options.privateOptions['listAvailableCheckpoints']:
_printAvailableCheckpoints(experimentDir)
return None
# Load experiment tasks
experimentTasks = expIface.getModelControl().get('tasks', [])
# If the tasks list is empty, and this is a nupic environment description
# file being run from the OPF, convert it to a simple OPF description file.
if (len(experimentTasks) == 0 and
expIface.getModelControl()['environment'] == OpfEnvironment.Nupic):
expIface.convertNupicEnvToOPF()
experimentTasks = expIface.getModelControl().get('tasks', [])
# Ensures all the source locations are either absolute paths or relative to
# the nupic.datafiles package_data location.
expIface.normalizeStreamSources()
# Extract option
newSerialization = options.privateOptions['newSerialization']
# Handle listTasks
if options.privateOptions['listTasks']:
print "Available tasks:"
for label in [t['taskLabel'] for t in experimentTasks]:
print "\t", label
return None
# Construct the experiment instance
if options.privateOptions['runCheckpointName']:
assert model is None
checkpointName = options.privateOptions['runCheckpointName']
model = ModelFactory.loadFromCheckpoint(
savedModelDir=_getModelCheckpointDir(experimentDir, checkpointName),
newSerialization=newSerialization)
elif model is not None:
print "Skipping creation of OPFExperiment instance: caller provided his own"
else:
modelDescription = expIface.getModelDescription()
model = ModelFactory.create(modelDescription)
# Handle "create model" request
if options.privateOptions['createCheckpointName']:
checkpointName = options.privateOptions['createCheckpointName']
_saveModel(model=model,
experimentDir=experimentDir,
checkpointLabel=checkpointName,
newSerialization=newSerialization)
return model
# Build the task list
# Default task execution index list is in the natural list order of the tasks
taskIndexList = range(len(experimentTasks))
customTaskExecutionLabelsList = options.privateOptions['taskLabels']
if customTaskExecutionLabelsList:
taskLabelsList = [t['taskLabel'] for t in experimentTasks]
taskLabelsSet = set(taskLabelsList)
customTaskExecutionLabelsSet = set(customTaskExecutionLabelsList)
assert customTaskExecutionLabelsSet.issubset(taskLabelsSet), \
("Some custom-provided task execution labels don't correspond "
"to actual task labels: mismatched labels: %r; actual task "
"labels: %r.") % (customTaskExecutionLabelsSet - taskLabelsSet,
customTaskExecutionLabelsList)
taskIndexList = [taskLabelsList.index(label) for label in
customTaskExecutionLabelsList]
print "#### Executing custom task list: %r" % [taskLabelsList[i] for
i in taskIndexList]
# Run all experiment tasks
for taskIndex in taskIndexList:
task = experimentTasks[taskIndex]
# Create a task runner and run it!
taskRunner = _TaskRunner(model=model,
task=task,
cmdOptions=options)
taskRunner.run()
del taskRunner
if options.privateOptions['checkpointModel']:
_saveModel(model=model,
experimentDir=experimentDir,
checkpointLabel=task['taskLabel'],
newSerialization=newSerialization)
return model
def _saveModel(model, experimentDir, checkpointLabel, newSerialization=False):
"""Save model"""
checkpointDir = _getModelCheckpointDir(experimentDir, checkpointLabel)
if newSerialization:
model.writeToCheckpoint(checkpointDir)
else:
model.save(saveModelDir=checkpointDir)
def _getModelCheckpointDir(experimentDir, checkpointLabel):
"""Creates directory for serialization of the model
checkpointLabel:
Checkpoint label (string)
Returns:
absolute path to the serialization directory
"""
checkpointDir = os.path.join(getCheckpointParentDir(experimentDir),
checkpointLabel + g_defaultCheckpointExtension)
checkpointDir = os.path.abspath(checkpointDir)
return checkpointDir
def getCheckpointParentDir(experimentDir):
"""Get checkpoint parent dir.
Returns: absolute path to the base serialization directory within which
model checkpoints for this experiment are created
"""
baseDir = os.path.join(experimentDir, "savedmodels")
baseDir = os.path.abspath(baseDir)
return baseDir
def _checkpointLabelFromCheckpointDir(checkpointDir):
"""Returns a checkpoint label string for the given model checkpoint directory
checkpointDir: relative or absolute model checkpoint directory path
"""
assert checkpointDir.endswith(g_defaultCheckpointExtension)
lastSegment = os.path.split(checkpointDir)[1]
checkpointLabel = lastSegment[0:-len(g_defaultCheckpointExtension)]
return checkpointLabel
def _isCheckpointDir(checkpointDir):
"""Return true iff checkpointDir appears to be a checkpoint directory."""
lastSegment = os.path.split(checkpointDir)[1]
if lastSegment[0] == '.':
return False
if not checkpointDir.endswith(g_defaultCheckpointExtension):
return False
if not os.path.isdir(checkpointDir):
return False
return True
def _printAvailableCheckpoints(experimentDir):
"""List available checkpoints for the specified experiment."""
checkpointParentDir = getCheckpointParentDir(experimentDir)
if not os.path.exists(checkpointParentDir):
print "No available checkpoints."
return
checkpointDirs = [x for x in os.listdir(checkpointParentDir)
if _isCheckpointDir(os.path.join(checkpointParentDir, x))]
if not checkpointDirs:
print "No available checkpoints."
return
print "Available checkpoints:"
checkpointList = [_checkpointLabelFromCheckpointDir(x)
for x in checkpointDirs]
for checkpoint in sorted(checkpointList):
print "\t", checkpoint
print
print "To start from a checkpoint:"
print " python run_opf_experiment.py experiment --load <CHECKPOINT>"
print "For example, to start from the checkpoint \"MyCheckpoint\":"
print " python run_opf_experiment.py experiment --load MyCheckpoint"
class _TaskRunner(object):
"""This class is responsible for running a single experiment task on the
given Model instance
"""
__FILE_SCHEME = "file://"
def __init__(self, model, task, cmdOptions):
""" Constructor
Args:
model: The OPF Model instance against which to run the task
task: A dictionary conforming to opfTaskSchema.json
cmdOptions: ParseCommandLineOptionsResult namedtuple
"""
validateOpfJsonValue(task, "opfTaskSchema.json")
# Set up our logger
self.__logger = logging.getLogger(".".join(
['com.numenta', self.__class__.__module__, self.__class__.__name__]))
#self.__logger.setLevel(logging.DEBUG)
self.__logger.debug(("Instantiated %s(" + \
"model=%r, " + \
"task=%r, " + \
"cmdOptions=%r)") % \
(self.__class__.__name__,
model,
task,
cmdOptions))
# Generate a new dataset from streamDef and create the dataset reader
streamDef = task['dataset']
datasetReader = opf_basic_environment.BasicDatasetReader(streamDef)
self.__model = model
self.__datasetReader = datasetReader
self.__task = task
self.__cmdOptions = cmdOptions
self.__predictionLogger = opf_basic_environment.BasicPredictionLogger(
fields=model.getFieldInfo(),
experimentDir=cmdOptions.experimentDir,
label=task['taskLabel'],
inferenceType=self.__model.getInferenceType())
taskControl = task['taskControl']
# Create Task Driver
self.__taskDriver = OPFTaskDriver(
taskControl=taskControl,
model=model)
loggedMetricPatterns = taskControl.get('loggedMetrics', None)
loggedMetricLabels = matchPatterns(loggedMetricPatterns,
self.__taskDriver.getMetricLabels())
self.__predictionLogger.setLoggedMetrics(loggedMetricLabels)
# Create a prediction metrics logger
self.__metricsLogger = opf_basic_environment.BasicPredictionMetricsLogger(
experimentDir=cmdOptions.experimentDir,
label=task['taskLabel'])
def __del__(self):
"""Destructor"""
#print "IN %s.%r destructor" % (type(self), self)
def run(self):
"""Runs a single experiment task"""
self.__logger.debug("run(): Starting task <%s>", self.__task['taskLabel'])
# Set up the task
# Create our main loop-control iterator
if self.__cmdOptions.privateOptions['testMode']:
numIters = 10
else:
numIters = self.__task['iterationCount']
if numIters >= 0:
iterTracker = iter(xrange(numIters))
else:
iterTracker = iter(itertools.count())
# Initialize periodic activities
periodic = PeriodicActivityMgr(
requestedActivities=self._createPeriodicActivities())
# Reset sequence states in the model, so it starts looking for a new
# sequence
# TODO: should this be done in OPFTaskDriver.setup(), instead? Is it always
# desired in Nupic?
self.__model.resetSequenceStates()
# Have Task Driver perform its initial setup activities, including setup
# callbacks
self.__taskDriver.setup()
# Run it!
while True:
# Check controlling iterator first
try:
next(iterTracker)
except StopIteration:
break
# Read next input record
try:
inputRecord = self.__datasetReader.next()
except StopIteration:
break
# Process input record
result = self.__taskDriver.handleInputRecord(inputRecord=inputRecord)
if InferenceElement.encodings in result.inferences:
result.inferences.pop(InferenceElement.encodings)
self.__predictionLogger.writeRecord(result)
# Run periodic activities
periodic.tick()
# Dump the experiment metrics at the end of the task
self._getAndEmitExperimentMetrics(final=True)
# Have Task Driver perform its final activities
self.__taskDriver.finalize()
# Reset sequence states in the model, so it starts looking for a new
# sequence
# TODO: should this be done in OPFTaskDriver.setup(), instead? Is it always
# desired in Nupic?
self.__model.resetSequenceStates()
def _createPeriodicActivities(self):
"""Creates and returns a list of activites for this TaskRunner instance
Returns: a list of PeriodicActivityRequest elements
"""
# Initialize periodic activities
periodicActivities = []
# Metrics reporting
class MetricsReportCb(object):
def __init__(self, taskRunner):
self.__taskRunner = taskRunner
return
def __call__(self):
self.__taskRunner._getAndEmitExperimentMetrics()
reportMetrics = PeriodicActivityRequest(
repeating=True,
period=1000,
cb=MetricsReportCb(self))
periodicActivities.append(reportMetrics)
# Iteration progress
class IterationProgressCb(object):
PROGRESS_UPDATE_PERIOD_TICKS = 1000
def __init__(self, taskLabel, requestedIterationCount, logger):
self.__taskLabel = taskLabel
self.__requestedIterationCount = requestedIterationCount
self.__logger = logger
self.__numIterationsSoFar = 0
def __call__(self):
self.__numIterationsSoFar += self.PROGRESS_UPDATE_PERIOD_TICKS
self.__logger.debug("%s: ITERATION PROGRESS: %s of %s" % (
self.__taskLabel,
self.__numIterationsSoFar,
self.__requestedIterationCount))
iterationProgressCb = IterationProgressCb(
taskLabel=self.__task['taskLabel'],
requestedIterationCount=self.__task['iterationCount'],
logger=self.__logger)
iterationProgressReporter = PeriodicActivityRequest(
repeating=True,
period=IterationProgressCb.PROGRESS_UPDATE_PERIOD_TICKS,
cb=iterationProgressCb)
periodicActivities.append(iterationProgressReporter)
return periodicActivities
def _getAndEmitExperimentMetrics(self, final=False):
# Get metrics
metrics = self.__taskDriver.getMetrics()
# Emit metrics
if metrics is not None:
if final:
self.__metricsLogger.emitFinalMetrics(metrics)
else:
self.__metricsLogger.emitPeriodicMetrics(metrics)
PeriodicActivityRequest = namedtuple("PeriodicActivityRequest",
("repeating", "period", "cb"))
"""Passed as parameter to PeriodicActivityMgr
repeating: True if the activity is a repeating activite, False if one-shot
period: period of activity's execution (number of "ticks")
cb: a callable to call upon expiration of period; will be called
as cb()
"""
class PeriodicActivityMgr(object):
Activity = namedtuple("Activity",
("repeating", "period", "cb", "iteratorHolder"))
"""Activity
iteratorHolder: a list holding one iterator; we use a list so that we can
replace the iterator for repeating activities (a tuple would not
allow it if the field was an imutable value)
"""
def __init__(self, requestedActivities):
"""
requestedActivities: a sequence of PeriodicActivityRequest elements
"""
self.__activities = []
for req in requestedActivities:
act = self.Activity(repeating=req.repeating,
period=req.period,
cb=req.cb,
iteratorHolder=[iter(xrange(req.period-1))])
self.__activities.append(act)
def tick(self):
"""Activity tick handler; services all activities
Returns:
True if controlling iterator says it's okay to keep going;
False to stop
"""
# Run activities whose time has come
for act in self.__activities:
if not act.iteratorHolder[0]:
continue
try:
next(act.iteratorHolder[0])
except StopIteration:
act.cb()
if act.repeating:
act.iteratorHolder[0] = iter(xrange(act.period-1))
else:
act.iteratorHolder[0] = None
return True
def main():
""" Module-level entry point. Run according to options in sys.argv
Usage: python -m python -m nupic.frameworks.opf.experiment_runner
"""
initLogging(verbose=True)
# Initialize pseudo-random number generators (PRNGs)
#
# This will fix the seed that is used by numpy when generating 'random'
# numbers. This allows for repeatability across experiments.
initExperimentPrng()
# Run it!
runExperiment(sys.argv[1:])
if __name__ == "__main__":
main()
| 29,504 | Python | .py | 680 | 35.711765 | 86 | 0.676714 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,960 | two_gram_model.py | numenta_nupic-legacy/src/nupic/frameworks/opf/two_gram_model.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Module containing the two gram OPF model implementation. """
import collections
import itertools
from nupic import encoders
from nupic.data import field_meta
from nupic.frameworks.opf import model
from nupic.frameworks.opf import opf_utils
from opf_utils import InferenceType
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.frameworks.opf.two_gram_model_capnp import TwoGramModelProto
class TwoGramModel(model.Model):
"""
Two-gram benchmark model.
:param inferenceType: (:class:`nupic.frameworks.opf.opf_utils.InferenceType`)
:param encoders: a dict of dicts, eventually sent to
:meth:`~nupic.encoders.multi.MultiEncoder.addMultipleEncoders` (see
docs of that method for param details).
"""
def __init__(self, inferenceType=InferenceType.TemporalNextStep,
encoderParams=()):
super(TwoGramModel, self).__init__(inferenceType)
self._logger = opf_utils.initLogger(self)
self._reset = False
self._hashToValueDict = dict()
self._learningEnabled = True
self._encoder = encoders.MultiEncoder(encoderParams)
self._fieldNames = self._encoder.getScalarNames()
self._prevValues = [] * len(self._fieldNames)
self._twoGramDicts = [dict() for _ in xrange(len(self._fieldNames))]
def run(self, inputRecord):
results = super(TwoGramModel, self).run(inputRecord)
# Set up the lists of values, defaults, and encoded values.
values = [inputRecord[k] for k in self._fieldNames]
defaults = ['' if type(v) == str else 0 for v in values]
inputFieldEncodings = self._encoder.encodeEachField(inputRecord)
inputBuckets = self._encoder.getBucketIndices(inputRecord)
results.sensorInput = opf_utils.SensorInput(
dataRow=values, dataEncodings=inputFieldEncodings,
sequenceReset=int(self._reset))
# Keep track of the last value associated with each encoded value for that
# predictions can be returned in the original value format.
for value, bucket in itertools.izip(values, inputBuckets):
self._hashToValueDict[bucket] = value
# Update the two-gram dict if learning is enabled.
for bucket, prevValue, twoGramDict in itertools.izip(
inputBuckets, self._prevValues, self._twoGramDicts):
if self._learningEnabled and not self._reset:
if prevValue not in twoGramDict:
twoGramDict[prevValue] = collections.defaultdict(int)
twoGramDict[prevValue][bucket] += 1
# Populate the results.inferences dict with the predictions and encoded
# predictions.
predictions = []
encodedPredictions = []
for bucket, twoGramDict, default, fieldName in (
itertools.izip(inputBuckets, self._twoGramDicts, defaults,
self._fieldNames)):
if bucket in twoGramDict:
probabilities = twoGramDict[bucket].items()
prediction = self._hashToValueDict[
max(probabilities, key=lambda x: x[1])[0]]
predictions.append(prediction)
encodedPredictions.append(self._encoder.encodeField(fieldName,
prediction))
else:
predictions.append(default)
encodedPredictions.append(self._encoder.encodeField(fieldName,
default))
results.inferences = dict()
results.inferences[opf_utils.InferenceElement.prediction] = predictions
results.inferences[opf_utils.InferenceElement.encodings] = encodedPredictions
self._prevValues = inputBuckets
self._reset = False
return results
def finishLearning(self):
self._learningEnabled = False
def setFieldStatistics(self,fieldStats):
"""
Since the two-gram has no use for this information, this is a no-op
"""
pass
def getFieldInfo(self):
fieldTypes = self._encoder.getDecoderOutputFieldTypes()
assert len(self._fieldNames) == len(fieldTypes)
return tuple(field_meta.FieldMetaInfo(*args) for args in
itertools.izip(
self._fieldNames, fieldTypes,
itertools.repeat(field_meta.FieldMetaSpecial.none)))
def getRuntimeStats(self):
# TODO: Add debugging stats.
return dict()
def _getLogger(self):
return self._logger
def resetSequenceStates(self):
self._reset = True
@staticmethod
def getSchema():
return TwoGramModelProto
@classmethod
def read(cls, proto):
"""
:param proto: capnp TwoGramModelProto message reader
"""
instance = object.__new__(cls)
super(TwoGramModel, instance).__init__(proto=proto.modelBase)
instance._logger = opf_utils.initLogger(instance)
instance._reset = proto.reset
instance._hashToValueDict = {x.hash: x.value
for x in proto.hashToValueDict}
instance._learningEnabled = proto.learningEnabled
instance._encoder = encoders.MultiEncoder.read(proto.encoder)
instance._fieldNames = instance._encoder.getScalarNames()
instance._prevValues = list(proto.prevValues)
instance._twoGramDicts = [dict() for _ in xrange(len(proto.twoGramDicts))]
for idx, field in enumerate(proto.twoGramDicts):
for entry in field:
prev = None if entry.value == -1 else entry.value
instance._twoGramDicts[idx][prev] = collections.defaultdict(int)
for bucket in entry.buckets:
instance._twoGramDicts[idx][prev][bucket.index] = bucket.count
return instance
def write(self, proto):
"""
:param proto: capnp TwoGramModelProto message builder
"""
super(TwoGramModel, self).writeBaseToProto(proto.modelBase)
proto.reset = self._reset
proto.learningEnabled = self._learningEnabled
proto.prevValues = self._prevValues
self._encoder.write(proto.encoder)
proto.hashToValueDict = [{"hash": h, "value": v}
for h, v in self._hashToValueDict.items()]
twoGramDicts = []
for items in self._twoGramDicts:
twoGramArr = []
for prev, values in items.iteritems():
buckets = [{"index": index, "count": count}
for index, count in values.iteritems()]
if prev is None:
prev = -1
twoGramArr.append({"value": prev, "buckets": buckets})
twoGramDicts.append(twoGramArr)
proto.twoGramDicts = twoGramDicts
def __getstate__(self):
# NOTE This deletion doesn't seem to make sense, as someone might want to
# serialize and then continue to use the model instance.
del self._logger
return self.__dict__
def __setstate__(self):
self._logger = opf_utils.initLogger(self)
| 7,624 | Python | .py | 174 | 37.390805 | 81 | 0.688124 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,961 | htm_prediction_model_classifier_helper.py | numenta_nupic-legacy/src/nupic/frameworks/opf/htm_prediction_model_classifier_helper.py | import copy
import numpy
from nupic.support.configuration import Configuration
from nupic.frameworks.opf.exceptions import (HTMPredictionModelInvalidRangeError,
HTMPredictionModelInvalidArgument)
class _CLAClassificationRecord(object):
"""
A single record to store data associated with a single prediction for the
anomaly classifier.
ROWID - prediction stream ROWID record number
setByUser - if true, a delete must be called explicitly on this point to
remove its label
"""
__slots__ = ["ROWID", "anomalyScore", "anomalyVector", "anomalyLabel",
"setByUser"]
def __init__(self, ROWID, anomalyScore, anomalyVector, anomalyLabel,
setByUser=False):
self.ROWID = ROWID
self.anomalyScore = anomalyScore
self.anomalyVector = anomalyVector
self.anomalyLabel = anomalyLabel
self.setByUser = setByUser
def __getstate__(self):
obj_slot_values = dict((k, getattr(self, k)) for k in self.__slots__)
return obj_slot_values
def __setstate__(self, data_dict):
for (name, value) in data_dict.iteritems():
setattr(self, name, value)
class HTMPredictionModelClassifierHelper(object):
"""
This class implements a record classifier used to classify prediction
records. It currently depends on the KNN classifier within the parent model.
Currently it is classifying based on SP / TM properties and has a sliding
window of 1000 records.
The model should call the compute() method for each iteration that will be
classified.
This model also exposes methods to classify records after they have been
processed.
"""
AUTO_THRESHOLD_CLASSIFIED_LABEL = "Auto Threshold Classification"
AUTO_TAG = " (auto)"
__VERSION__ = 3
def __init__(self, htm_prediction_model, anomalyParams={}):
if anomalyParams is None:
anomalyParams = {}
self.htm_prediction_model = htm_prediction_model
self._version = HTMPredictionModelClassifierHelper.__VERSION__
self._classificationMaxDist = 0.1
if 'autoDetectWaitRecords' not in anomalyParams or \
anomalyParams['autoDetectWaitRecords'] is None:
self._autoDetectWaitRecords = int(Configuration.get(
'nupic.model.temporalAnomaly.wait_records'))
else:
self._autoDetectWaitRecords = anomalyParams['autoDetectWaitRecords']
if 'autoDetectThreshold' not in anomalyParams or \
anomalyParams['autoDetectThreshold'] is None:
self._autoDetectThreshold = float(Configuration.get(
'nupic.model.temporalAnomaly.auto_detect_threshold'))
else:
self._autoDetectThreshold = anomalyParams['autoDetectThreshold']
if 'anomalyCacheRecords' not in anomalyParams or \
anomalyParams['anomalyCacheRecords'] is None:
self._history_length = int(Configuration.get(
'nupic.model.temporalAnomaly.window_length'))
else:
self._history_length = anomalyParams['anomalyCacheRecords']
if 'anomalyVectorType' not in anomalyParams or \
anomalyParams['anomalyVectorType'] is None:
self._vectorType = str(Configuration.get(
'nupic.model.temporalAnomaly.anomaly_vector'))
else:
self._vectorType = anomalyParams['anomalyVectorType']
self._activeColumnCount = \
self.htm_prediction_model._getSPRegion().getSelf().getParameter('numActiveColumnsPerInhArea')
# Storage for last run
self._anomalyVectorLength = None
self._classificationVector = numpy.array([])
self._prevPredictedColumns = numpy.array([])
self._prevTPCells = numpy.array([])
# Array of CLAClassificationRecord's used to recompute and get history
self.saved_states = []
self.saved_categories = []
def run(self):
# Compute an iteration of this classifier
result = self.compute()
# return the label to assign to this point
return result.anomalyLabel
def getLabels(self, start=None, end=None):
if len(self.saved_states) == 0:
return {
'isProcessing': False,
'recordLabels': []
}
if start is None:
start = 0
if end is None:
end = self.saved_states[-1].ROWID
if end <= start:
raise HTMPredictionModelInvalidRangeError("Invalid supplied range for 'getLabels'.",
debugInfo={
'requestRange': {
'startRecordID': start,
'endRecordID': end
},
'numRecordsStored': len(self.saved_states)
})
results = {
'isProcessing': False,
'recordLabels': []
}
classifier = self.htm_prediction_model._getAnomalyClassifier()
knn = classifier.getSelf()._knn
ROWIDX = numpy.array(
classifier.getSelf().getParameter('categoryRecencyList'))
validIdx = numpy.where((ROWIDX >= start) & (ROWIDX < end))[0].tolist()
categories = knn._categoryList
for idx in validIdx:
row = dict(
ROWID=int(ROWIDX[idx]),
labels=self._categoryToLabelList(categories[idx]))
results['recordLabels'].append(row)
return results
def addLabel(self, start, end, labelName):
"""
Add the label labelName to each record with record ROWID in range from
start to end, noninclusive of end.
This will recalculate all points from end to the last record stored in the
internal cache of this classifier.
"""
if len(self.saved_states) == 0:
raise HTMPredictionModelInvalidRangeError("Invalid supplied range for 'addLabel'. "
"Model has no saved records.")
startID = self.saved_states[0].ROWID
clippedStart = max(0, start - startID)
clippedEnd = max(0, min( len( self.saved_states) , end - startID))
if clippedEnd <= clippedStart:
raise HTMPredictionModelInvalidRangeError("Invalid supplied range for 'addLabel'.",
debugInfo={
'requestRange': {
'startRecordID': start,
'endRecordID': end
},
'clippedRequestRange': {
'startRecordID': clippedStart,
'endRecordID': clippedEnd
},
'validRange': {
'startRecordID': startID,
'endRecordID': self.saved_states[len(self.saved_states)-1].ROWID
},
'numRecordsStored': len(self.saved_states)
})
# Add label to range [clippedStart, clippedEnd)
for state in self.saved_states[clippedStart:clippedEnd]:
if labelName not in state.anomalyLabel:
state.anomalyLabel.append(labelName)
state.setByUser = True
self._addRecordToKNN(state)
assert len(self.saved_categories) > 0
# Recompute [end, ...)
for state in self.saved_states[clippedEnd:]:
self._updateState(state)
def removeLabels(self, start=None, end=None, labelFilter=None):
"""
Remove labels from each record with record ROWID in range from
start to end, noninclusive of end. Removes all records if labelFilter is
None, otherwise only removes the labels eqaul to labelFilter.
This will recalculate all points from end to the last record stored in the
internal cache of this classifier.
"""
if len(self.saved_states) == 0:
raise HTMPredictionModelInvalidRangeError("Invalid supplied range for "
"'removeLabels'. Model has no saved records.")
startID = self.saved_states[0].ROWID
clippedStart = 0 if start is None else max(0, start - startID)
clippedEnd = len(self.saved_states) if end is None else \
max(0, min( len( self.saved_states) , end - startID))
if clippedEnd <= clippedStart:
raise HTMPredictionModelInvalidRangeError("Invalid supplied range for "
"'removeLabels'.", debugInfo={
'requestRange': {
'startRecordID': start,
'endRecordID': end
},
'clippedRequestRange': {
'startRecordID': clippedStart,
'endRecordID': clippedEnd
},
'validRange': {
'startRecordID': startID,
'endRecordID': self.saved_states[len(self.saved_states)-1].ROWID
},
'numRecordsStored': len(self.saved_states)
})
# Remove records within the cache
recordsToDelete = []
for state in self.saved_states[clippedStart:clippedEnd]:
if labelFilter is not None:
if labelFilter in state.anomalyLabel:
state.anomalyLabel.remove(labelFilter)
else:
state.anomalyLabel = []
state.setByUser = False
recordsToDelete.append(state)
self._deleteRecordsFromKNN(recordsToDelete)
# Remove records not in cache
self._deleteRangeFromKNN(start, end)
# Recompute [clippedEnd, ...)
for state in self.saved_states[clippedEnd:]:
self._updateState(state)
return {'status': 'success'}
def _updateState(self, state):
# Record is before wait period do not classifiy
if state.ROWID < self._autoDetectWaitRecords:
if not state.setByUser:
state.anomalyLabel = []
self._deleteRecordsFromKNN([state])
return
label = HTMPredictionModelClassifierHelper.AUTO_THRESHOLD_CLASSIFIED_LABEL
autoLabel = label + HTMPredictionModelClassifierHelper.AUTO_TAG
# Update the label based on classifications
newCategory = self._recomputeRecordFromKNN(state)
labelList = self._categoryToLabelList(newCategory)
if state.setByUser:
if label in state.anomalyLabel:
state.anomalyLabel.remove(label)
if autoLabel in state.anomalyLabel:
state.anomalyLabel.remove(autoLabel)
labelList.extend(state.anomalyLabel)
if state.anomalyScore >= self._autoDetectThreshold:
labelList.append(label)
elif label in labelList:
# If not above threshold but classified - set to auto threshold label
ind = labelList.index(label)
labelList[ind] = autoLabel
# Make all entries unique
labelList = list(set(labelList))
# If both above threshold and auto classified above - remove auto label
if label in labelList and autoLabel in labelList:
labelList.remove(autoLabel)
if state.anomalyLabel == labelList:
return
# Update state's labeling
state.anomalyLabel = labelList
# Update KNN Classifier with new labeling
if state.anomalyLabel == []:
self._deleteRecordsFromKNN([state])
else:
self._addRecordToKNN(state)
def _addRecordToKNN(self, record):
"""
This method will add the record to the KNN classifier.
"""
classifier = self.htm_prediction_model._getAnomalyClassifier()
knn = classifier.getSelf()._knn
prototype_idx = classifier.getSelf().getParameter('categoryRecencyList')
category = self._labelListToCategoryNumber(record.anomalyLabel)
# If record is already in the classifier, overwrite its labeling
if record.ROWID in prototype_idx:
knn.prototypeSetCategory(record.ROWID, category)
return
# Learn this pattern in the knn
pattern = self._getStateAnomalyVector(record)
rowID = record.ROWID
knn.learn(pattern, category, rowID=rowID)
def _deleteRecordsFromKNN(self, recordsToDelete):
"""
This method will remove the given records from the classifier.
parameters
------------
recordsToDelete - list of records to delete from the classififier
"""
classifier = self.htm_prediction_model._getAnomalyClassifier()
knn = classifier.getSelf()._knn
prototype_idx = classifier.getSelf().getParameter('categoryRecencyList')
idsToDelete = [r.ROWID for r in recordsToDelete if \
not r.setByUser and r.ROWID in prototype_idx]
nProtos = knn._numPatterns
knn.removeIds(idsToDelete)
assert knn._numPatterns == nProtos - len(idsToDelete)
def _deleteRangeFromKNN(self, start=0, end=None):
"""
This method will remove any stored records within the range from start to
end. Noninclusive of end.
parameters
------------
start - integer representing the ROWID of the start of the deletion range,
end - integer representing the ROWID of the end of the deletion range,
if None, it will default to end.
"""
classifier = self.htm_prediction_model._getAnomalyClassifier()
knn = classifier.getSelf()._knn
prototype_idx = numpy.array(
classifier.getSelf().getParameter('categoryRecencyList'))
if end is None:
end = prototype_idx.max() + 1
idsIdxToDelete = numpy.logical_and(prototype_idx >= start,
prototype_idx < end)
idsToDelete = prototype_idx[idsIdxToDelete]
nProtos = knn._numPatterns
knn.removeIds(idsToDelete.tolist())
assert knn._numPatterns == nProtos - len(idsToDelete)
def _recomputeRecordFromKNN(self, record):
"""
return the classified labeling of record
"""
inputs = {
"categoryIn": [None],
"bottomUpIn": self._getStateAnomalyVector(record),
}
outputs = {"categoriesOut": numpy.zeros((1,)),
"bestPrototypeIndices":numpy.zeros((1,)),
"categoryProbabilitiesOut":numpy.zeros((1,))}
# Run inference only to capture state before learning
classifier = self.htm_prediction_model._getAnomalyClassifier()
knn = classifier.getSelf()._knn
# Only use points before record to classify and after the wait period.
classifier_indexes = \
numpy.array(classifier.getSelf().getParameter('categoryRecencyList'))
valid_idx = numpy.where(
(classifier_indexes >= self._autoDetectWaitRecords) &
(classifier_indexes < record.ROWID)
)[0].tolist()
if len(valid_idx) == 0:
return None
classifier.setParameter('inferenceMode', True)
classifier.setParameter('learningMode', False)
classifier.getSelf().compute(inputs, outputs)
classifier.setParameter('learningMode', True)
classifier_distances = classifier.getSelf().getLatestDistances()
valid_distances = classifier_distances[valid_idx]
if valid_distances.min() <= self._classificationMaxDist:
classifier_indexes_prev = classifier_indexes[valid_idx]
rowID = classifier_indexes_prev[valid_distances.argmin()]
indexID = numpy.where(classifier_indexes == rowID)[0][0]
category = classifier.getSelf().getCategoryList()[indexID]
return category
return None
def _constructClassificationRecord(self):
"""
Construct a _HTMClassificationRecord based on the current state of the
htm_prediction_model of this classifier.
***This will look into the internals of the model and may depend on the
SP, TM, and KNNClassifier***
"""
model = self.htm_prediction_model
sp = model._getSPRegion()
tm = model._getTPRegion()
tpImp = tm.getSelf()._tfdr
# Count the number of unpredicted columns
activeColumns = sp.getOutputData("bottomUpOut").nonzero()[0]
score = numpy.in1d(activeColumns, self._prevPredictedColumns).sum()
score = (self._activeColumnCount - score)/float(self._activeColumnCount)
spSize = sp.getParameter('activeOutputCount')
tpSize = tm.getParameter('cellsPerColumn') * tm.getParameter('columnCount')
classificationVector = numpy.array([])
if self._vectorType == 'tpc':
# Classification Vector: [---TM Cells---]
classificationVector = numpy.zeros(tpSize)
activeCellMatrix = tpImp.getLearnActiveStateT().reshape(tpSize, 1)
activeCellIdx = numpy.where(activeCellMatrix > 0)[0]
if activeCellIdx.shape[0] > 0:
classificationVector[numpy.array(activeCellIdx, dtype=numpy.uint16)] = 1
elif self._vectorType == 'sp_tpe':
# Classification Vecotr: [---SP---|---(TM-SP)----]
classificationVector = numpy.zeros(spSize+spSize)
if activeColumns.shape[0] > 0:
classificationVector[activeColumns] = 1.0
errorColumns = numpy.setdiff1d(self._prevPredictedColumns, activeColumns)
if errorColumns.shape[0] > 0:
errorColumnIndexes = ( numpy.array(errorColumns, dtype=numpy.uint16) +
spSize )
classificationVector[errorColumnIndexes] = 1.0
else:
raise TypeError("Classification vector type must be either 'tpc' or"
" 'sp_tpe', current value is %s" % (self._vectorType))
# Store the state for next time step
numPredictedCols = len(self._prevPredictedColumns)
predictedColumns = tm.getOutputData("topDownOut").nonzero()[0]
self._prevPredictedColumns = copy.deepcopy(predictedColumns)
if self._anomalyVectorLength is None:
self._anomalyVectorLength = len(classificationVector)
result = _CLAClassificationRecord(
ROWID=int(model.getParameter('__numRunCalls') - 1), #__numRunCalls called
#at beginning of model.run
anomalyScore=score,
anomalyVector=classificationVector.nonzero()[0].tolist(),
anomalyLabel=[]
)
return result
def compute(self):
"""
Run an iteration of this anomaly classifier
"""
result = self._constructClassificationRecord()
# Classify this point after waiting the classification delay
if result.ROWID >= self._autoDetectWaitRecords:
self._updateState(result)
# Save new classification record and keep history as moving window
self.saved_states.append(result)
if len(self.saved_states) > self._history_length:
self.saved_states.pop(0)
return result
def setAutoDetectWaitRecords(self, waitRecords):
"""
Sets the autoDetectWaitRecords.
"""
if not isinstance(waitRecords, int):
raise HTMPredictionModelInvalidArgument("Invalid argument type \'%s\'. WaitRecord "
"must be a number." % (type(waitRecords)))
if len(self.saved_states) > 0 and waitRecords < self.saved_states[0].ROWID:
raise HTMPredictionModelInvalidArgument("Invalid value. autoDetectWaitRecord value "
"must be valid record within output stream. Current minimum ROWID in "
"output stream is %d." % (self.saved_states[0].ROWID))
self._autoDetectWaitRecords = waitRecords
# Update all the states in the classifier's cache
for state in self.saved_states:
self._updateState(state)
def getAutoDetectWaitRecords(self):
"""
Return the autoDetectWaitRecords.
"""
return self._autoDetectWaitRecords
def setAutoDetectThreshold(self, threshold):
"""
Sets the autoDetectThreshold.
TODO: Ensure previously classified points outside of classifier are valid.
"""
if not (isinstance(threshold, float) or isinstance(threshold, int)):
raise HTMPredictionModelInvalidArgument("Invalid argument type \'%s\'. threshold "
"must be a number." % (type(threshold)))
self._autoDetectThreshold = threshold
# Update all the states in the classifier's cache
for state in self.saved_states:
self._updateState(state)
def getAutoDetectThreshold(self):
"""
Return the autoDetectThreshold.
"""
return self._autoDetectThreshold
def _labelToCategoryNumber(self, label):
"""
Since the KNN Classifier stores categories as numbers, we must store each
label as a number. This method converts from a label to a unique number.
Each label is assigned a unique bit so multiple labels may be assigned to
a single record.
"""
if label not in self.saved_categories:
self.saved_categories.append(label)
return pow(2, self.saved_categories.index(label))
def _labelListToCategoryNumber(self, labelList):
"""
This method takes a list of labels and returns a unique category number.
This enables this class to store a list of categories for each point since
the KNN classifier only stores a single number category for each record.
"""
categoryNumber = 0
for label in labelList:
categoryNumber += self._labelToCategoryNumber(label)
return categoryNumber
def _categoryToLabelList(self, category):
"""
Converts a category number into a list of labels
"""
if category is None:
return []
labelList = []
labelNum = 0
while category > 0:
if category % 2 == 1:
labelList.append(self.saved_categories[labelNum])
labelNum += 1
category = category >> 1
return labelList
def _getStateAnomalyVector(self, state):
"""
Returns a state's anomaly vertor converting it from spare to dense
"""
vector = numpy.zeros(self._anomalyVectorLength)
vector[state.anomalyVector] = 1
return vector
def __setstate__(self, state):
version = 1
if "_version" in state:
version = state["_version"]
# Migrate from version 1 to version 2
if version == 1:
self._vectorType = str(Configuration.get(
'nupic.model.temporalAnomaly.anomaly_vector'))
self._autoDetectWaitRecords = state['_classificationDelay']
elif version == 2:
self._autoDetectWaitRecords = state['_classificationDelay']
elif version == 3:
pass
else:
raise Exception("Error while deserializing %s: Invalid version %s"
%(self.__class__, version))
if '_autoDetectThreshold' not in state:
self._autoDetectThreshold = 1.1
for attr, value in state.iteritems():
setattr(self, attr, value)
self._version = HTMPredictionModelClassifierHelper.__VERSION__
| 21,231 | Python | .py | 502 | 35.649402 | 99 | 0.696056 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,962 | exp_description_api.py | numenta_nupic-legacy/src/nupic/frameworks/opf/exp_description_api.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file describes the Description API interface of the Online Prediction
Framework (OPF).
The Description API interface encapsulates the following two important sets of
configuration parameters in OPF
1. model creation parameters (via getDescription)
2. task control parameters (via getExperimentTasks)
The description class objects instantiated in ``description.py``
implements the functionality by subclassing the Description API interface.
This allows ``description.py`` to be generic and oblivious to the specific
experiments.
"""
import os
from abc import ABCMeta, abstractmethod
import types
from pkg_resources import resource_filename
from nupic.frameworks.opf.opf_utils import validateOpfJsonValue
from nupic.frameworks.opf.opf_task_driver import (IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
from nupic.support.enum import Enum
FILE_SCHEME = "file://"
# Enum to characterize potential generation environments
OpfEnvironment = Enum(Nupic='nupic',
Experiment='opfExperiment')
class DescriptionIface(object):
"""
This is the base interface class for description API classes which provide
OPF configuration parameters.
This mechanism abstracts description API from the specific description objects
created by the individual users.
TODO: logging interface?
:param modelConfig: (dict)
Holds user-defined settings for model creation. See OPF
`here <description-template.html>`_ for config dict documentation.
:param control: (dict)
How the model is to be run. The schema of this dictionary depends on the
'environment' parameter, which specifies the context in which the model is
being run.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, modelConfig, control):
pass
@abstractmethod
def getModelDescription(self):
"""
:returns: the model creation parameters based on the settings in the config
dictionary.
"""
@abstractmethod
def getModelControl(self):
""" Returns the task instances of the experiment description.
:returns: (dict) describing how the model is to be run
"""
@abstractmethod
def normalizeStreamSources(self):
"""
Inspects the control task and updates any stream sources it finds that are
not absolute paths into paths generated by pkg_resources relative to
nupic.datafiles.
"""
@abstractmethod
def convertNupicEnvToOPF(self):
""" Converts the control element from Nupic format to a default OPF
format with 1 task. This is useful when you have a base description file
that you want to run both permutations on (which requires the Nupic
environment format) and single OPF experiments on (which requires the
OPF format).
"""
class ExperimentDescriptionAPI(DescriptionIface):
"""Interface for specifying OPF experiments.
This class is used to specify the model and control parameters for an OPF
experiment. The model config includes the necessary information for
constructing an OPF model and the control includes information about the
environment, where to get the data from, etc.
The :mod:`~nupic.frameworks.opf.experiment_runner` takes an instance of
this class as the description of the experiment to run. Similarly,
`scripts/run_opf_experiment.py` looks for an instance of this class in a
variable called "descriptionInterface" in the experiment files that are
passed to it.
:param modelConfig: (dict) a specification of the model to use, including
the following keys:
- **model**: the name of the OPF model to create
- **version**: the config format version to use
- **modelParams**: parameters to pass to the OPF model
There may be other required fields such as predictionSteps,
predictedField, and numRecords
:param control: (dict): a specification of the experimental setup,
including the following keys:
- **environment**: the environment that the model will be run in
- **dataset**: input specification as defined in
`src/nupic/frameworks/opf/jsonschema/stream_def.json`
- **iterationCount**: maximum number of iterations, or -1 to iterate
until the data source is exhausted
- **inferenceArgs**: a dict containing all the supplementary parameters
for inference, including "predictedField" and "predictionSteps"
- **metrics**: a list of MetricSpec instances that specify the metrics to
compute during this experiment
- **loggedMetrics**: a sequence of regular expression strings that
specify which metrics should be logged at each iteration of the
experiment
"""
def __init__(self, modelConfig, control):
environment = control['environment']
if environment == OpfEnvironment.Experiment:
self.__validateExperimentControl(control)
elif environment == OpfEnvironment.Nupic:
self.__validateNupicControl(control)
self.__modelConfig = modelConfig
self.__control = control
def getModelDescription(self):
if (self.__modelConfig['model'] == 'HTMPrediction' and
'version' not in self.__modelConfig):
# The modelConfig is in the old CLA format, update it.
return self.__getHTMPredictionModelDescription()
else:
return self.__modelConfig
def getModelControl(self):
return self.__control
def __validateExperimentControl(self, control):
""" Validates control dictionary for the experiment context"""
# Validate task list
taskList = control.get('tasks', None)
if taskList is not None:
taskLabelsList = []
for task in taskList:
validateOpfJsonValue(task, "opfTaskSchema.json")
validateOpfJsonValue(task['taskControl'], "opfTaskControlSchema.json")
taskLabel = task['taskLabel']
assert isinstance(taskLabel, types.StringTypes), \
"taskLabel type: %r" % type(taskLabel)
assert len(taskLabel) > 0, "empty string taskLabel not is allowed"
taskLabelsList.append(taskLabel.lower())
taskLabelDuplicates = filter(lambda x: taskLabelsList.count(x) > 1,
taskLabelsList)
assert len(taskLabelDuplicates) == 0, \
"Duplcate task labels are not allowed: %s" % taskLabelDuplicates
return
def __validateNupicControl(self, control):
""" Validates control dictionary for the nupic engine context"""
validateOpfJsonValue(control, "nupicControlSchema.json")
def normalizeStreamSource(self, stream):
"""
TODO: document
:param stream:
"""
# The stream source in the task might be in several formats, so we need
# to make sure it gets converted into an absolute path:
source = stream['source'][len(FILE_SCHEME):]
# If the source is already an absolute path, we won't use pkg_resources,
# we'll just trust that the path exists. If not, it's a user problem.
if os.path.isabs(source):
sourcePath = source
else:
# First we'll check to see if this path exists within the nupic.datafiles
# package resource.
sourcePath = resource_filename("nupic.datafiles", source)
if not os.path.exists(sourcePath):
# If this path doesn't exist as a package resource, the last option will
# be to treat it as a relative path to the current working directory.
sourcePath = os.path.join(os.getcwd(), source)
stream['source'] = FILE_SCHEME + sourcePath
def normalizeStreamSources(self):
"""
TODO: document
"""
task = dict(self.__control)
if 'dataset' in task:
for stream in task['dataset']['streams']:
self.normalizeStreamSource(stream)
else:
for subtask in task['tasks']:
for stream in subtask['dataset']['streams']:
self.normalizeStreamSource(stream)
def convertNupicEnvToOPF(self):
"""
TODO: document
"""
# We need to create a task structure, most of which is taken verbatim
# from the Nupic control dict
task = dict(self.__control)
task.pop('environment')
inferenceArgs = task.pop('inferenceArgs')
task['taskLabel'] = 'DefaultTask'
# Create the iterationCycle element that will be placed inside the
# taskControl.
iterationCount = task.get('iterationCount', -1)
iterationCountInferOnly = task.pop('iterationCountInferOnly', 0)
if iterationCountInferOnly == -1:
iterationCycle = [IterationPhaseSpecInferOnly(1000, inferenceArgs=inferenceArgs)]
elif iterationCountInferOnly > 0:
assert iterationCount > 0, "When iterationCountInferOnly is specified, "\
"iterationCount must also be specified and not be -1"
iterationCycle = [IterationPhaseSpecLearnAndInfer(iterationCount
-iterationCountInferOnly, inferenceArgs=inferenceArgs),
IterationPhaseSpecInferOnly(iterationCountInferOnly, inferenceArgs=inferenceArgs)]
else:
iterationCycle = [IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=inferenceArgs)]
taskControl = dict(metrics = task.pop('metrics'),
loggedMetrics = task.pop('loggedMetrics'),
iterationCycle = iterationCycle)
task['taskControl'] = taskControl
# Create the new control
self.__control = dict(environment = OpfEnvironment.Nupic,
tasks = [task])
def __getHTMPredictionModelDescription(self):
config = self.__modelConfig
assert config['model'] == "HTMPrediction"
spParams = dict(
spVerbosity = config['spVerbosity'],
globalInhibition = 1,
columnCount = config['claRegionNColumns'],
inputWidth = 0,
numActiveColumnsPerInhArea = config['spNumActivePerInhArea'],
seed = 1956,
potentialPct = config.get('spCoincInputPoolPct', 1.0),
synPermConnected = config.get('spSynPermConnected', 0.1),
synPermActiveInc = config.get('synPermActiveInc', 0.1),
synPermInactiveDec = config.get('synPermInactiveDec', 0.01),
boostStrength = config.get('boostStrength', 1.0),
)
tmParams = dict(
verbosity = config['tpVerbosity'],
columnCount = config['claRegionNColumns'],
cellsPerColumn = config['tpNCellsPerCol'] if config['tmEnable'] else 1,
inputWidth = spParams['columnCount'],
seed = 1960,
temporalImp = config['tpImplementation'],
newSynapseCount = config['tpNewSynapseCount']
if config['tpNewSynapseCount'] is not None
else config['spNumActivePerInhArea'],
maxSynapsesPerSegment = config['tpMaxSynapsesPerSegment'],
maxSegmentsPerCell = config['tpMaxSegmentsPerCell'],
initialPerm = config['tpInitialPerm'],
permanenceInc = config['tpPermanenceInc'],
permanenceDec = config['tpPermanenceInc']
if config['tpPermanenceDec'] is None
else config['tpPermanenceDec'],
globalDecay = 0.0,
maxAge = 0,
minThreshold = 12 if config['tpMinSegmentMatchSynapseThreshold'] is None
else config['tpMinSegmentMatchSynapseThreshold'],
activationThreshold = 16 if config['tpSegmentActivationThreshold'] is None
else config['tpSegmentActivationThreshold'],
outputType = config.get('tpOutputType', 'normal'),
pamLength = config.get('tpPamLength', 1),
)
sensorParams = dict(
verbosity = config['sensorVerbosity'],
encoders = config['dsEncoderSchema'],
sensorAutoReset = config['sensorAutoReset']
)
if 'clRegionName' in config:
clParams = dict(
regionName = config['clRegionName'],
verbosity = config['verbosity'],
)
if config['clRegionName'] == 'KNNClassifierRegion':
clParams['replaceDuplicates'] = 1
elif config['clRegionName'] == 'SDRClassifierRegion':
clAlpha = config.get('clAlpha', None)
if clAlpha is None:
clAlpha = 0.001
clParams['alpha'] = clAlpha
clParams['steps'] = config.get('clSteps', '1')
if 'clAdvancedParams' in config:
clParams.update(config['clAdvancedParams'])
else:
clParams = None
modelDescription = dict(
version = 1,
model = config['model'],
modelParams = dict(
inferenceType = config['inferenceType'],
predictedField = config.get('predictedField', None),
sensorParams = sensorParams,
spEnable = config.get('spEnable', True),
spParams = spParams,
tmEnable = config['tmEnable'],
tmParams = tmParams,
clParams = clParams,
trainSPNetOnlyIfRequested = config.get(
'claTrainSPNetOnlyIfRequested', False),
)
)
return modelDescription
| 14,015 | Python | .py | 306 | 38.686275 | 107 | 0.688362 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,963 | prediction_metrics_manager.py | numenta_nupic-legacy/src/nupic/frameworks/opf/prediction_metrics_manager.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This module implements
:class:`~nupic.frameworks.opf.prediction_metrics_manager.MetricsManager`,
a helper class that handles pooling of multiple record and field prediction
metrics calculators.
"""
import logging
import copy
import pprint
from collections import (namedtuple,
deque)
from nupic.data.inference_shifter import InferenceShifter
from nupic.frameworks.opf import metrics
from opf_utils import InferenceType, InferenceElement
# MetricValueElement class
#
# Represents an individual metric value element in a list returned by
# PredictionMetricsManager.getMetrics()
#
# spec: A MetricSpec value (a copy) that was used to construct
# the metric instance that generated the metric value
# value: The metric value
MetricValueElement = namedtuple("MetricValueElement", ["spec", "value"])
class MetricsManager(object):
"""
This is a class to handle the computation of metrics properly. This class
takes in an inferenceType, and it assumes that it is associcated with a single
model
:param metricSpecs: (list) of
:class:`~nupic.frameworks.opf.metrics.MetricSpec` objects that specify
which metrics should be calculated.
:param fieldInfo: (list) of :class:`~nupic.data.field_meta.FieldMetaInfo`
objects.
:param inferenceType: (:class:`~nupic.frameworks.opf.opf_utils.InferenceType`
value that specifies the inference type of the associated model. This
affects how metrics are calculated. FOR EXAMPLE, temporal models save
the inference from the previous timestep to match it to the ground
truth value in the current timestep.
"""
# Map from inference element to sensor input element. This helps us find the
# appropriate ground truth field for a given inference element
def __init__(self, metricSpecs, fieldInfo, inferenceType):
self.__metricSpecs = []
self.__metrics = []
self.__metricLabels = []
# Maps field names to indices. Useful for looking up input/predictions by
# field name
self.__fieldNameIndexMap = dict( [(info.name, i) \
for i, info in enumerate(fieldInfo)] )
self.__constructMetricsModules(metricSpecs)
self.__currentGroundTruth = None
self.__currentInference = None
self.__currentResult = None
self.__isTemporal = InferenceType.isTemporal(inferenceType)
if self.__isTemporal:
self.__inferenceShifter = InferenceShifter()
def update(self, results):
"""
Compute the new metrics values, given the next inference/ground-truth values
:param results: (:class:`~nupic.frameworks.opf.opf_utils.ModelResult`)
object that was computed during the last iteration of the model.
:returns: (dict) where each key is the metric-name, and the values are
it scalar value.
"""
#print "\n\n---------------------------------------------------------------"
#print "Model results: \nrawInput:%s \ninferences:%s" % \
# (pprint.pformat(results.rawInput), pprint.pformat(results.inferences))
self._addResults(results)
if not self.__metricSpecs \
or self.__currentInference is None:
return {}
metricResults = {}
for metric, spec, label in zip(self.__metrics,
self.__metricSpecs,
self.__metricLabels):
inferenceElement = spec.inferenceElement
field = spec.field
groundTruth = self._getGroundTruth(inferenceElement)
inference = self._getInference(inferenceElement)
rawRecord = self._getRawGroundTruth()
result = self.__currentResult
if field:
if type(inference) in (list, tuple):
if field in self.__fieldNameIndexMap:
# NOTE: If the predicted field is not fed in at the bottom, we
# won't have it in our fieldNameIndexMap
fieldIndex = self.__fieldNameIndexMap[field]
inference = inference[fieldIndex]
else:
inference = None
if groundTruth is not None:
if type(groundTruth) in (list, tuple):
if field in self.__fieldNameIndexMap:
# NOTE: If the predicted field is not fed in at the bottom, we
# won't have it in our fieldNameIndexMap
fieldIndex = self.__fieldNameIndexMap[field]
groundTruth = groundTruth[fieldIndex]
else:
groundTruth = None
else:
# groundTruth could be a dict based off of field names
groundTruth = groundTruth[field]
metric.addInstance(groundTruth=groundTruth,
prediction=inference,
record=rawRecord,
result=result)
metricResults[label] = metric.getMetric()['value']
return metricResults
def getMetrics(self):
"""
Gets the current metric values
:returns: (dict) where each key is the metric-name, and the values are
it scalar value. Same as the output of
:meth:`~nupic.frameworks.opf.prediction_metrics_manager.MetricsManager.update`
"""
result = {}
for metricObj, label in zip(self.__metrics, self.__metricLabels):
value = metricObj.getMetric()
result[label] = value['value']
return result
def getMetricDetails(self, metricLabel):
"""
Gets detailed info about a given metric, in addition to its value. This
may including any statistics or auxilary data that are computed for a given
metric.
:param metricLabel: (string) label of the given metric (see
:class:`~nupic.frameworks.opf.metrics.MetricSpec`)
:returns: (dict) of metric information, as returned by
:meth:`nupic.frameworks.opf.metrics.MetricsIface.getMetric`.
"""
try:
metricIndex = self.__metricLabels.index(metricLabel)
except IndexError:
return None
return self.__metrics[metricIndex].getMetric()
def getMetricLabels(self):
"""
:returns: (list) of labels for the metrics that are being calculated
"""
return tuple(self.__metricLabels)
def _addResults(self, results):
"""
Stores the current model results in the manager's internal store
Parameters:
-----------------------------------------------------------------------
results: A ModelResults object that contains the current timestep's
input/inferences
"""
# -----------------------------------------------------------------------
# If the model potentially has temporal inferences.
if self.__isTemporal:
shiftedInferences = self.__inferenceShifter.shift(results).inferences
self.__currentResult = copy.deepcopy(results)
self.__currentResult.inferences = shiftedInferences
self.__currentInference = shiftedInferences
# -----------------------------------------------------------------------
# The current model has no temporal inferences.
else:
self.__currentResult = copy.deepcopy(results)
self.__currentInference = copy.deepcopy(results.inferences)
# -----------------------------------------------------------------------
# Save the current ground-truth results
self.__currentGroundTruth = copy.deepcopy(results)
def _getGroundTruth(self, inferenceElement):
"""
Get the actual value for this field
Parameters:
-----------------------------------------------------------------------
sensorInputElement: The inference element (part of the inference) that
is being used for this metric
"""
sensorInputElement = InferenceElement.getInputElement(inferenceElement)
if sensorInputElement is None:
return None
return getattr(self.__currentGroundTruth.sensorInput, sensorInputElement)
def _getInference(self, inferenceElement):
"""
Get what the inferred value for this field was
Parameters:
-----------------------------------------------------------------------
inferenceElement: The inference element (part of the inference) that
is being used for this metric
"""
if self.__currentInference is not None:
return self.__currentInference.get(inferenceElement, None)
return None
def _getRawGroundTruth(self):
"""
Get what the inferred value for this field was
Parameters:
-----------------------------------------------------------------------
inferenceElement: The inference element (part of the inference) that
is being used for this metric
"""
return self.__currentGroundTruth.rawInput
def __constructMetricsModules(self, metricSpecs):
"""
Creates the required metrics modules
Parameters:
-----------------------------------------------------------------------
metricSpecs:
A sequence of MetricSpec objects that specify which metric modules to
instantiate
"""
if not metricSpecs:
return
self.__metricSpecs = metricSpecs
for spec in metricSpecs:
if not InferenceElement.validate(spec.inferenceElement):
raise ValueError("Invalid inference element for metric spec: %r" %spec)
self.__metrics.append(metrics.getModule(spec))
self.__metricLabels.append(spec.getLabel())
def test():
_testMetricsMgr()
_testTemporalShift()
_testMetricLabels()
return
def _testMetricsMgr():
print "*Testing Metrics Managers*..."
from nupic.data.field_meta import (
FieldMetaInfo,
FieldMetaType,
FieldMetaSpecial)
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import ModelResult, SensorInput
onlineMetrics = (MetricSpec(metric="aae", inferenceElement='', \
field="consumption", params={}),)
print "TESTING METRICS MANAGER (BASIC PLUMBING TEST)..."
modelFieldMetaInfo = (
FieldMetaInfo(name='temperature',
type=FieldMetaType.float,
special=FieldMetaSpecial.none),
FieldMetaInfo(name='consumption',
type=FieldMetaType.float,
special=FieldMetaSpecial.none)
)
# -----------------------------------------------------------------------
# Test to make sure that invalid InferenceElements are caught
try:
MetricsManager(
metricSpecs=onlineMetrics,
fieldInfo=modelFieldMetaInfo,
inferenceType=InferenceType.TemporalNextStep)
except ValueError:
print "Caught bad inference element: PASS"
print
onlineMetrics = (MetricSpec(metric="aae",
inferenceElement=InferenceElement.prediction,
field="consumption", params={}),)
temporalMetrics = MetricsManager(
metricSpecs=onlineMetrics,
fieldInfo=modelFieldMetaInfo,
inferenceType=InferenceType.TemporalNextStep)
inputs = [
{
'groundTruthRow' : [9, 7],
'predictionsDict' : {
InferenceType.TemporalNextStep: [12, 17]
}
},
{
'groundTruthRow' : [12, 17],
'predictionsDict' : {
InferenceType.TemporalNextStep: [14, 19]
}
},
{
'groundTruthRow' : [14, 20],
'predictionsDict' : {
InferenceType.TemporalNextStep: [16, 21]
}
},
{
'groundTruthRow' : [9, 7],
'predictionsDict' : {
InferenceType.TemporalNextStep:None
}
},
]
for element in inputs:
groundTruthRow=element['groundTruthRow']
tPredictionRow=element['predictionsDict'][InferenceType.TemporalNextStep]
result = ModelResult(sensorInput=SensorInput(dataRow=groundTruthRow,
dataEncodings=None,
sequenceReset=0,
category=None),
inferences={'prediction':tPredictionRow})
temporalMetrics.update(result)
assert temporalMetrics.getMetrics().values()[0] == 15.0 / 3.0, \
"Expected %f, got %f" %(15.0/3.0,
temporalMetrics.getMetrics().values()[0])
print "ok"
return
def _testTemporalShift():
""" Test to see if the metrics manager correctly shifts records for multistep
prediction cases
"""
print "*Testing Multistep temporal shift*..."
from nupic.data.field_meta import (
FieldMetaInfo,
FieldMetaType,
FieldMetaSpecial)
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import ModelResult, SensorInput
onlineMetrics = ()
modelFieldMetaInfo = (
FieldMetaInfo(name='consumption',
type=FieldMetaType.float,
special=FieldMetaSpecial.none),)
mgr = MetricsManager(metricSpecs=onlineMetrics,
fieldInfo=modelFieldMetaInfo,
inferenceType=InferenceType.TemporalMultiStep)
groundTruths = [{'consumption':i} for i in range(10)]
oneStepInfs = reversed(range(10))
threeStepInfs = range(5, 15)
for iterNum, gt, os, ts in zip(xrange(10), groundTruths,
oneStepInfs, threeStepInfs):
inferences = {InferenceElement.multiStepPredictions:{1: os, 3: ts}}
sensorInput = SensorInput(dataDict = [gt])
result = ModelResult(sensorInput=sensorInput, inferences=inferences)
mgr.update(result)
assert mgr._getGroundTruth(InferenceElement.multiStepPredictions)[0] == gt
if iterNum < 1:
#assert mgr._getInference(InferenceElement.multiStepPredictions) is None
assert mgr._getInference(InferenceElement.multiStepPredictions)[1] is None
else:
prediction = mgr._getInference(InferenceElement.multiStepPredictions)[1]
assert prediction == 10 - iterNum
if iterNum < 3:
inference = mgr._getInference(InferenceElement.multiStepPredictions)
assert inference is None or inference[3] is None
else:
prediction = mgr._getInference(InferenceElement.multiStepPredictions)[3]
assert prediction == iterNum + 2
def _testMetricLabels():
print "\n*Testing Metric Label Generation*..."
from nupic.frameworks.opf.metrics import MetricSpec
testTuples = [
(MetricSpec('rmse', InferenceElement.prediction, 'consumption'),
"prediction:rmse:field=consumption"),
(MetricSpec('rmse', InferenceElement.classification),
"classification:rmse"),
(MetricSpec('rmse', InferenceElement.encodings, 'pounds',
params=dict(window=100)),
"encodings:rmse:window=100:field=pounds"),
(MetricSpec('aae', InferenceElement.prediction, 'pounds',
params=dict(window=100, paramA = 10.2, paramB = 20)),
"prediction:aae:paramA=10.2:paramB=20:window=100:field=pounds"),
(MetricSpec('aae', InferenceElement.prediction,'pounds',
params={'window':100, 'paramA':10.2, '1paramB':20}),
"prediction:aae:1paramB=20:paramA=10.2:window=100:field=pounds"),
(MetricSpec('aae', InferenceElement.prediction,'pounds',
params=dict(window=100, paramA = 10.2, paramB =-20)),
"prediction:aae:paramA=10.2:paramB=-20:window=100:field=pounds"),
(MetricSpec('aae', InferenceElement.prediction, 'pounds',
params=dict(window=100, paramA = 10.2, paramB ='square')),
"prediction:aae:paramA=10.2:paramB='square':window=100:field=pounds"),
]
for test in testTuples:
try:
assert test[0].getLabel() == test[1]
except:
print "Failed Creating label"
print "Expected %s \t Got %s" % (test[1], test[0].getLabel())
return
print "ok"
if __name__ == "__main__":
test()
| 16,689 | Python | .py | 388 | 35.67268 | 92 | 0.642884 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,964 | metrics.py | numenta_nupic-legacy/src/nupic/frameworks/opf/metrics.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Metrics take the predicted and actual values and compute some metric (lower is
better) which is used in the OPF for swarming (and just generally as part of the
output.
One non-obvious thing is that they are computed over a fixed window size,
typically something like 1000 records. So each output record will have a metric
score computed over the 1000 records prior.
Example usage (hot gym example):
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Where:
- ``aae``: average absolute error
- ``altMAPE``: mean absolute percentage error but modified so you never have
divide by zero
.. code-block:: python
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.prediction_metrics_manager import MetricsManager
model = createOpfModel() # assuming this is done elsewhere
metricSpecs = (
MetricSpec(field='kw_energy_consumption', metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': 'aae', 'window': 1000, 'steps': 1}),
MetricSpec(field='kw_energy_consumption', metric='trivial',
inferenceElement='prediction',
params={'errorMetric': 'aae', 'window': 1000, 'steps': 1}),
MetricSpec(field='kw_energy_consumption', metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': 'altMAPE', 'window': 1000, 'steps': 1}),
MetricSpec(field='kw_energy_consumption', metric='trivial',
inferenceElement='prediction',
params={'errorMetric': 'altMAPE', 'window': 1000, 'steps': 1}),
)
metricsManager = MetricsManager(metricSpecs,
model.getFieldInfo(),
model.getInferenceType()
)
for row in inputData: # this is just pseudocode
result = model.run(row)
metrics = metricsManager.update(result)
# You can collect metrics here, or attach to your result object.
result.metrics = metrics
See :meth:`getModule` for a mapping of available metric identifiers to their
implementation classes.
"""
from abc import ABCMeta, abstractmethod
import numbers
import copy
import numpy as np
import nupic.math.roc_utils as roc
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.frameworks.opf.opf_utils import InferenceType
from nupic.utils import MovingAverage
from collections import deque
from operator import itemgetter
from safe_interpreter import SafeInterpreter
from io import StringIO
from functools import partial
###############################################################################
# Public Metric specification class
###############################################################################
class MetricSpec(object):
"""
This class represents a single Metrics specification in the TaskControl block.
:param metric: (string) A metric type name that identifies which metrics
module is to be constructed by
:meth:`nupic.frameworks.opf.metrics.getModule`; e.g., ``rmse``
:param inferenceElement:
(:class:`~nupic.frameworks.opf.opf_utils.InferenceElement`) Some
inference types (such as classification), can output more than one type
of inference (i.e. the predicted class AND the predicted next step).
This field specifies which of these inferences to compute the metrics
on.
:param field: (string) Field name on which this metric is to be collected
:param params: (dict) Custom parameters for the metrics module's constructor
"""
_LABEL_SEPARATOR = ":"
def __init__(self, metric, inferenceElement, field=None, params=None):
self.metric = metric
self.inferenceElement = inferenceElement
self.field = field
self.params = params
return
def __repr__(self):
return "%s(metric=%r, inferenceElement=%r, field=%r, params=%r)" \
% (self.__class__.__name__,
self.metric,
self.inferenceElement,
self.field,
self.params)
def getLabel(self, inferenceType=None):
"""
Helper method that generates a unique label for a :class:`MetricSpec` /
:class:`~nupic.frameworks.opf.opf_utils.InferenceType` pair. The label is
formatted as follows:
::
<predictionKind>:<metric type>:(paramName=value)*:field=<fieldname>
For example:
::
classification:aae:paramA=10.2:paramB=20:window=100:field=pounds
:returns: (string) label for inference type
"""
result = []
if inferenceType is not None:
result.append(InferenceType.getLabel(inferenceType))
result.append(self.inferenceElement)
result.append(self.metric)
params = self.params
if params is not None:
sortedParams= params.keys()
sortedParams.sort()
for param in sortedParams:
# Don't include the customFuncSource - it is too long an unwieldy
if param in ('customFuncSource', 'customFuncDef', 'customExpr'):
continue
value = params[param]
if isinstance(value, str):
result.extend(["%s='%s'"% (param, value)])
else:
result.extend(["%s=%s"% (param, value)])
if self.field:
result.append("field=%s"% (self.field) )
return self._LABEL_SEPARATOR.join(result)
@classmethod
def getInferenceTypeFromLabel(cls, label):
"""
Extracts the PredictionKind (temporal vs. nontemporal) from the given
metric label.
:param label: (string) for a metric spec generated by
:meth:`getMetricLabel`
:returns: (:class:`~nupic.frameworks.opf.opf_utils.InferenceType`)
"""
infType, _, _= label.partition(cls._LABEL_SEPARATOR)
if not InferenceType.validate(infType):
return None
return infType
def getModule(metricSpec):
"""
Factory method to return an appropriate :class:`MetricsIface` module.
- ``rmse``: :class:`MetricRMSE`
- ``nrmse``: :class:`MetricNRMSE`
- ``aae``: :class:`MetricAAE`
- ``acc``: :class:`MetricAccuracy`
- ``avg_err``: :class:`MetricAveError`
- ``trivial``: :class:`MetricTrivial`
- ``two_gram``: :class:`MetricTwoGram`
- ``moving_mean``: :class:`MetricMovingMean`
- ``moving_mode``: :class:`MetricMovingMode`
- ``neg_auc``: :class:`MetricNegAUC`
- ``custom_error_metric``: :class:`CustomErrorMetric`
- ``multiStep``: :class:`MetricMultiStep`
- ``ms_aae``: :class:`MetricMultiStepAAE`
- ``ms_avg_err``: :class:`MetricMultiStepAveError`
- ``passThruPrediction``: :class:`MetricPassThruPrediction`
- ``altMAPE``: :class:`MetricAltMAPE`
- ``MAPE``: :class:`MetricMAPE`
- ``multi``: :class:`MetricMulti`
- ``negativeLogLikelihood``: :class:`MetricNegativeLogLikelihood`
:param metricSpec: (:class:`MetricSpec`) metric to find module for.
``metricSpec.metric`` must be in the list above.
:returns: (:class:`AggregateMetric`) an appropriate metric module
"""
metricName = metricSpec.metric
if metricName == 'rmse':
return MetricRMSE(metricSpec)
if metricName == 'nrmse':
return MetricNRMSE(metricSpec)
elif metricName == 'aae':
return MetricAAE(metricSpec)
elif metricName == 'acc':
return MetricAccuracy(metricSpec)
elif metricName == 'avg_err':
return MetricAveError(metricSpec)
elif metricName == 'trivial':
return MetricTrivial(metricSpec)
elif metricName == 'two_gram':
return MetricTwoGram(metricSpec)
elif metricName == 'moving_mean':
return MetricMovingMean(metricSpec)
elif metricName == 'moving_mode':
return MetricMovingMode(metricSpec)
elif metricName == 'neg_auc':
return MetricNegAUC(metricSpec)
elif metricName == 'custom_error_metric':
return CustomErrorMetric(metricSpec)
elif metricName == 'multiStep':
return MetricMultiStep(metricSpec)
elif metricName == 'multiStepProbability':
return MetricMultiStepProbability(metricSpec)
elif metricName == 'ms_aae':
return MetricMultiStepAAE(metricSpec)
elif metricName == 'ms_avg_err':
return MetricMultiStepAveError(metricSpec)
elif metricName == 'passThruPrediction':
return MetricPassThruPrediction(metricSpec)
elif metricName == 'altMAPE':
return MetricAltMAPE(metricSpec)
elif metricName == 'MAPE':
return MetricMAPE(metricSpec)
elif metricName == 'multi':
return MetricMulti(metricSpec)
elif metricName == 'negativeLogLikelihood':
return MetricNegativeLogLikelihood(metricSpec)
else:
raise Exception("Unsupported metric type: %s" % metricName)
################################################################################
# Helper Methods and Classes #
################################################################################
class _MovingMode(object):
""" Helper class for computing windowed moving
mode of arbitrary values """
def __init__(self, windowSize = None):
"""
:param windowSize: The number of values that are used to compute the
moving average
"""
self._windowSize = windowSize
self._countDict = dict()
self._history = deque([])
def __call__(self, value):
if len(self._countDict) == 0:
pred = ""
else:
pred = max(self._countDict.items(), key = itemgetter(1))[0]
# Update count dict and history buffer
self._history.appendleft(value)
if not value in self._countDict:
self._countDict[value] = 0
self._countDict[value] += 1
if len(self._history) > self._windowSize:
removeElem = self._history.pop()
self._countDict[removeElem] -= 1
assert(self._countDict[removeElem] > -1)
return pred
def _isNumber(value):
return isinstance(value, (numbers.Number, np.number))
class MetricsIface(object):
"""
A Metrics module compares a prediction Y to corresponding ground truth X and
returns a single measure representing the "goodness" of the prediction. It is
up to the implementation to determine how this comparison is made.
:param metricSpec: (:class:`MetricSpec`) spec used to created the metric
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, metricSpec):
pass
@abstractmethod
def addInstance(self, groundTruth, prediction, record = None, result = None):
"""
Add one instance consisting of ground truth and a prediction.
:param groundTruth:
The actual measured value at the current timestep
:param prediction:
The value predicted by the network at the current timestep
:param record: the raw input record as fed to
:meth:`~nupic.frameworks.opf.model.Model.run` by the user. The
typical usage is to feed a record to that method and get a
:class:`~nupic.frameworks.opf.opf_utils.ModelResult`. Then you pass
:class:`~nupic.frameworks.opf.opf_utils.ModelResult`.rawInput into
this function as the record parameter.
:param result: (:class:`~nupic.frameworks.opf.opf_utils.ModelResult`) the
result of running a row of data through an OPF model
:returns:
The average error as computed over the metric's window size
"""
@abstractmethod
def getMetric(self):
"""
``stats`` is expected to contain further information relevant to the given
metric, for example the number of timesteps represented in the current
measurement. All stats are implementation defined, and ``stats`` can be
``None``.
:returns: (dict) representing data from the metric
::
{value : <current measurement>, "stats" : {<stat> : <value> ...}}
"""
class AggregateMetric(MetricsIface):
"""
Partial implementation of Metrics Interface for metrics that accumulate an
error and compute an aggregate score, potentially over some window of previous
data. This is a convenience class that can serve as the base class for a wide
variety of metrics.
"""
___metaclass__ = ABCMeta
#FIXME @abstractmethod - this should be marked abstract method and required to be implemented
def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer, result):
"""
Updates the accumulated error given the prediction and the
ground truth.
:param groundTruth: Actual value that is observed for the current timestep
:param prediction: Value predicted by the network for the given timestep
:param accumulatedError: The total accumulated score from the previous
predictions (possibly over some finite window)
:param historyBuffer: A buffer of the last <self.window> ground truth values
that have been observed.
If historyBuffer = None, it means that no history is being kept.
:param result: An ModelResult class (see opf_utils.py), used for advanced
metric calculation (e.g., MetricNegativeLogLikelihood)
:returns: The new accumulated error. That is:
.. code-block:: python
self.accumulatedError = self.accumulate(
groundTruth, predictions, accumulatedError
)
``historyBuffer`` should also be updated in this method.
``self.spec.params["window"]`` indicates the maximum size of the window.
"""
#FIXME @abstractmethod - this should be marked abstract method and required to be implemented
def aggregate(self, accumulatedError, historyBuffer, steps):
"""
Updates the final aggregated score error given the prediction and the ground
truth.
:param accumulatedError: The total accumulated score from the previous
predictions (possibly over some finite window)
:param historyBuffer: A buffer of the last <self.window> ground truth values
that have been observed. If ``historyBuffer`` = None, it means that
no history is being kept.
:param steps: (int) The total number of (groundTruth, prediction) pairs that
have been passed to the metric. This does not include pairs where
``groundTruth = SENTINEL_VALUE_FOR_MISSING_DATA``
:returns: The new aggregate (final) error measure.
"""
def __init__(self, metricSpec):
""" Initialize this metric
If the params contains the key 'errorMetric', then that is the name of
another metric to which we will pass a modified groundTruth and prediction
to from our addInstance() method. For example, we may compute a moving mean
on the groundTruth and then pass that to the AbsoluteAveError metric
"""
# Init default member variables
self.id = None
self.verbosity = 0
self.window = -1
self.history = None
self.accumulatedError = 0
self.aggregateError = None
self.steps = 0
self.spec = metricSpec
self.disabled = False
# Number of steps ahead we are trying to predict. This is a list of
# prediction steps are processing
self._predictionSteps = [0]
# Where we store the ground truth history
self._groundTruthHistory = deque([])
# The instances of another metric to which we will pass a possibly modified
# groundTruth and prediction to from addInstance(). There is one instance
# for each step present in self._predictionSteps
self._subErrorMetrics = None
# The maximum number of records to process. After this many records have
# been processed, the metric value never changes. This can be used
# as the optimization metric for swarming, while having another metric without
# the maxRecords limit to get an idea as to how well a production model
# would do on the remaining data
self._maxRecords = None
# Parse the metric's parameters
if metricSpec is not None and metricSpec.params is not None:
self.id = metricSpec.params.get('id', None)
self._predictionSteps = metricSpec.params.get('steps', [0])
# Make sure _predictionSteps is a list
if not hasattr(self._predictionSteps, '__iter__'):
self._predictionSteps = [self._predictionSteps]
self.verbosity = metricSpec.params.get('verbosity', 0)
self._maxRecords = metricSpec.params.get('maxRecords', None)
# Get the metric window size
if 'window' in metricSpec.params:
assert metricSpec.params['window'] >= 1
self.history = deque([])
self.window = metricSpec.params['window']
# Get the name of the sub-metric to chain to from addInstance()
if 'errorMetric' in metricSpec.params:
self._subErrorMetrics = []
for step in self._predictionSteps:
subSpec = copy.deepcopy(metricSpec)
# Do all ground truth shifting before we pass onto the sub-metric
subSpec.params.pop('steps', None)
subSpec.params.pop('errorMetric')
subSpec.metric = metricSpec.params['errorMetric']
self._subErrorMetrics.append(getModule(subSpec))
def _getShiftedGroundTruth(self, groundTruth):
""" Utility function that saves the passed in groundTruth into a local
history buffer, and returns the groundTruth from self._predictionSteps ago,
where self._predictionSteps is defined by the 'steps' parameter.
This can be called from the beginning of a derived class's addInstance()
before it passes groundTruth and prediction onto accumulate().
"""
# Save this ground truth into our input history
self._groundTruthHistory.append(groundTruth)
# This is only supported when _predictionSteps has one item in it
assert (len(self._predictionSteps) == 1)
# Return the one from N steps ago
if len(self._groundTruthHistory) > self._predictionSteps[0]:
return self._groundTruthHistory.popleft()
else:
if hasattr(groundTruth, '__iter__'):
return [None] * len(groundTruth)
else:
return None
def addInstance(self, groundTruth, prediction, record = None, result = None):
# This base class does not support time shifting the ground truth or a
# subErrorMetric.
assert (len(self._predictionSteps) == 1)
assert self._predictionSteps[0] == 0
assert self._subErrorMetrics is None
# If missing data,
if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA or prediction is None:
return self.aggregateError
if self.verbosity > 0:
print "groundTruth:\n%s\nPredictions:\n%s\n%s\n" % (groundTruth,
prediction, self.getMetric())
# Ignore if we've reached maxRecords
if self._maxRecords is not None and self.steps >= self._maxRecords:
return self.aggregateError
# If there is a sub-metric, chain into it's addInstance
# Accumulate the error
self.accumulatedError = self.accumulate(groundTruth, prediction,
self.accumulatedError, self.history, result)
self.steps += 1
return self._compute()
def getMetric(self):
return {"value": self.aggregateError, "stats" : {"steps" : self.steps}}
def _compute(self):
self.aggregateError = self.aggregate(self.accumulatedError, self.history,
self.steps)
return self.aggregateError
class MetricNegativeLogLikelihood(AggregateMetric):
"""
Computes negative log-likelihood. Likelihood is the predicted probability of
the true data from a model. It is more powerful than metrics that only
considers the single best prediction (e.g. MSE) as it considers the entire
probability distribution predicted by a model.
It is more appropriate to use likelihood as the error metric when multiple
predictions are possible.
"""
def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer, result):
bucketll = result.inferences['multiStepBucketLikelihoods']
bucketIdxTruth = result.classifierInput.bucketIndex
if bucketIdxTruth is not None:
# a manually set minimum prediction probability so that the log(LL) doesn't blow up
minProb = 0.00001
negLL = 0
for step in bucketll.keys():
outOfBucketProb = 1 - sum(bucketll[step].values())
if bucketIdxTruth in bucketll[step].keys():
prob = bucketll[step][bucketIdxTruth]
else:
prob = outOfBucketProb
if prob < minProb:
prob = minProb
negLL -= np.log(prob)
accumulatedError += negLL
if historyBuffer is not None:
historyBuffer.append(negLL)
if len(historyBuffer) > self.spec.params["window"]:
accumulatedError -= historyBuffer.popleft()
return accumulatedError
def aggregate(self, accumulatedError, historyBuffer, steps):
n = steps
if historyBuffer is not None:
n = len(historyBuffer)
return accumulatedError / float(n)
class MetricRMSE(AggregateMetric):
"""
Computes root-mean-square error.
"""
def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer, result = None):
error = (groundTruth - prediction)**2
accumulatedError += error
if historyBuffer is not None:
historyBuffer.append(error)
if len(historyBuffer) > self.spec.params["window"] :
accumulatedError -= historyBuffer.popleft()
return accumulatedError
def aggregate(self, accumulatedError, historyBuffer, steps):
n = steps
if historyBuffer is not None:
n = len(historyBuffer)
return np.sqrt(accumulatedError / float(n))
class MetricNRMSE(MetricRMSE):
"""
Computes normalized root-mean-square error.
"""
def __init__(self, *args, **kwargs):
super(MetricNRMSE, self).__init__(*args, **kwargs)
self.groundTruths = []
def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer, result = None):
self.groundTruths.append(groundTruth)
return super(MetricNRMSE, self).accumulate(groundTruth,
prediction,
accumulatedError,
historyBuffer,
result)
def aggregate(self, accumulatedError, historyBuffer, steps):
rmse = super(MetricNRMSE, self).aggregate(accumulatedError,
historyBuffer,
steps)
denominator = np.std(self.groundTruths)
return rmse / denominator if denominator > 0 else float("inf")
class MetricAAE(AggregateMetric):
"""
Computes average absolute error.
"""
def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer, result = None):
error = abs(groundTruth - prediction)
accumulatedError += error
if historyBuffer is not None:
historyBuffer.append(error)
if len(historyBuffer) > self.spec.params["window"] :
accumulatedError -= historyBuffer.popleft()
return accumulatedError
def aggregate(self, accumulatedError, historyBuffer, steps):
n = steps
if historyBuffer is not None:
n = len(historyBuffer)
return accumulatedError/ float(n)
class MetricAltMAPE(AggregateMetric):
"""
Computes the "Alternative" Mean Absolute Percent Error.
A generic MAPE computes the percent error for each sample, and then gets
an average. This can suffer from samples where the actual value is very small
or zero - this one sample can drastically alter the mean.
This metric on the other hand first computes the average of the actual values
and the averages of the errors before dividing. This washes out the effects of
a small number of samples with very small actual values.
"""
def __init__(self, metricSpec):
super(MetricAltMAPE, self).__init__(metricSpec)
self._accumulatedGroundTruth = 0
self._accumulatedError = 0
def addInstance(self, groundTruth, prediction, record = None, result = None):
# If missing data,
if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA or prediction is None:
return self.aggregateError
# Compute absolute error
error = abs(groundTruth - prediction)
if self.verbosity > 0:
print "MetricAltMAPE:\n groundTruth: %s\n Prediction: " \
"%s\n Error: %s" % (groundTruth, prediction, error)
# Update the accumulated groundTruth and aggregate error
if self.history is not None:
self.history.append((groundTruth, error))
if len(self.history) > self.spec.params["window"] :
(oldGT, oldErr) = self.history.popleft()
self._accumulatedGroundTruth -= oldGT
self._accumulatedError -= oldErr
self._accumulatedGroundTruth += abs(groundTruth)
self._accumulatedError += error
# Compute aggregate pct error
if self._accumulatedGroundTruth > 0:
self.aggregateError = 100.0 * self._accumulatedError / \
self._accumulatedGroundTruth
else:
self.aggregateError = 0
if self.verbosity >= 1:
print " accumGT:", self._accumulatedGroundTruth
print " accumError:", self._accumulatedError
print " aggregateError:", self.aggregateError
self.steps += 1
return self.aggregateError
class MetricMAPE(AggregateMetric):
"""
Computes the "Classic" Mean Absolute Percent Error.
This computes the percent error for each sample, and then gets
an average. Note that this can suffer from samples where the actual value is
very small or zero - this one sample can drastically alter the mean. To
avoid this potential issue, use 'altMAPE' instead.
This metric is provided mainly as a convenience when comparing results against
other investigations that have also used MAPE.
"""
def __init__(self, metricSpec):
super(MetricMAPE, self).__init__(metricSpec)
self._accumulatedPctError = 0
def addInstance(self, groundTruth, prediction, record = None, result = None):
# If missing data,
if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA or prediction is None:
return self.aggregateError
# Compute absolute error
if groundTruth != 0:
pctError = float(abs(groundTruth - prediction))/groundTruth
else:
# Ignore this sample
if self.verbosity > 0:
print "Ignoring sample with groundTruth of 0"
self.steps += 1
return self.aggregateError
if self.verbosity > 0:
print "MetricMAPE:\n groundTruth: %s\n Prediction: " \
"%s\n Error: %s" % (groundTruth, prediction, pctError)
# Update the accumulated groundTruth and aggregate error
if self.history is not None:
self.history.append(pctError)
if len(self.history) > self.spec.params["window"] :
(oldPctErr) = self.history.popleft()
self._accumulatedPctError -= oldPctErr
self._accumulatedPctError += pctError
# Compute aggregate pct error
self.aggregateError = 100.0 * self._accumulatedPctError / len(self.history)
if self.verbosity >= 1:
print " accumPctError:", self._accumulatedPctError
print " aggregateError:", self.aggregateError
self.steps += 1
return self.aggregateError
class MetricPassThruPrediction(MetricsIface):
"""
This is not a metric, but rather a facility for passing the predictions
generated by a baseline metric through to the prediction output cache produced
by a model.
For example, if you wanted to see the predictions generated for the TwoGram
metric, you would specify 'PassThruPredictions' as the 'errorMetric'
parameter.
This metric class simply takes the prediction and outputs that as the
aggregateMetric value.
"""
def __init__(self, metricSpec):
self.spec = metricSpec
self.window = metricSpec.params.get("window", 1)
self.avg = MovingAverage(self.window)
self.value = None
def addInstance(self, groundTruth, prediction, record = None, result = None):
"""Compute and store metric value"""
self.value = self.avg(prediction)
def getMetric(self):
"""Return the metric value """
return {"value": self.value}
#def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer):
# # Simply return the prediction as the accumulated error
# return prediction
#
#def aggregate(self, accumulatedError, historyBuffer, steps):
# # Simply return the prediction as the aggregateError
# return accumulatedError
class MetricMovingMean(AggregateMetric):
"""
Computes error metric based on moving mean prediction.
"""
def __init__(self, metricSpec):
# This metric assumes a default 'steps' of 1
if not 'steps' in metricSpec.params:
metricSpec.params['steps'] = 1
super(MetricMovingMean, self).__init__(metricSpec)
# Only supports 1 item in _predictionSteps
assert (len(self._predictionSteps) == 1)
self.mean_window = 10
if metricSpec.params.has_key('mean_window'):
assert metricSpec.params['mean_window'] >= 1
self.mean_window = metricSpec.params['mean_window']
# Construct moving average instance
self._movingAverage = MovingAverage(self.mean_window)
def getMetric(self):
return self._subErrorMetrics[0].getMetric()
def addInstance(self, groundTruth, prediction, record = None, result = None):
# If missing data,
if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA:
return self._subErrorMetrics[0].aggregateError
if self.verbosity > 0:
print "groundTruth:\n%s\nPredictions:\n%s\n%s\n" % (groundTruth, prediction, self.getMetric())
# Use ground truth from 'steps' steps ago as our most recent ground truth
lastGT = self._getShiftedGroundTruth(groundTruth)
if lastGT is None:
return self._subErrorMetrics[0].aggregateError
mean = self._movingAverage(lastGT)
return self._subErrorMetrics[0].addInstance(groundTruth, mean, record)
def evalCustomErrorMetric(expr, prediction, groundTruth, tools):
sandbox = SafeInterpreter(writer=StringIO())
if isinstance(prediction, dict):
sandbox.symtable['prediction'] = tools.mostLikely(prediction)
sandbox.symtable['EXP'] = tools.expValue(prediction)
sandbox.symtable['probabilityDistribution'] = prediction
else:
sandbox.symtable['prediction'] = prediction
sandbox.symtable['groundTruth'] = groundTruth
sandbox.symtable['tools'] = tools
error = sandbox(expr)
return error
class CustomErrorMetric(MetricsIface):
"""
Custom Error Metric class that handles user defined error metrics.
"""
class CircularBuffer():
"""
implementation of a fixed size constant random access circular buffer
"""
def __init__(self,length):
#Create an array to back the buffer
#If the length<0 create a zero length array
self.data = [None for i in range(max(length,0))]
self.elements = 0
self.index = 0
self.dataLength = length
def getItem(self,n):
#Get item from n steps back
if n >= self.elements or (n >= self.dataLength and not self.dataLength < 0):
assert False,"Trying to access data not in the stored window"
return None
if self.dataLength>=0:
getInd = (self.index-n-1)%min(self.elements,self.dataLength)
else:
getInd = (self.index-n-1)%self.elements
return self.data[getInd]
def pushToEnd(self,obj):
ret = None
#If storing everything simply append right to the list
if(self.dataLength < 0 ):
self.data.append(obj)
self.index+=1
self.elements+=1
return None
if(self.elements==self.dataLength):
#pop last added element
ret = self.data[self.index % self.dataLength]
else:
#else push new element and increment the element counter
self.elements += 1
self.data[self.index % self.dataLength] = obj
self.index += 1
return ret
def __len__(self):
return self.elements
def __init__(self,metricSpec):
self.metricSpec = metricSpec
self.steps = 0
self.error = 0
self.averageError = None
self.errorMatrix = None
self.evalError = self.evalAbsErr
self.errorWindow = 1
self.storeWindow=-1
self.userDataStore = dict()
if "errorWindow" in metricSpec.params:
self.errorWindow = metricSpec.params["errorWindow"]
assert self.errorWindow != 0 , "Window Size cannon be zero"
if "storeWindow" in metricSpec.params:
self.storeWindow = metricSpec.params["storeWindow"]
assert self.storeWindow != 0 , "Window Size cannon be zero"
self.errorStore = self.CircularBuffer(self.errorWindow)
self.recordStore = self.CircularBuffer(self.storeWindow)
if "customExpr" in metricSpec.params:
assert not "customFuncDef" in metricSpec.params
assert not "customFuncSource" in metricSpec.params
self.evalError = partial(evalCustomErrorMetric, metricSpec.params["customExpr"])
elif "customFuncSource" in metricSpec.params:
assert not "customFuncDef" in metricSpec.params
assert not "customExpr" in metricSpec.params
exec(metricSpec.params["customFuncSource"])
#pull out defined function from locals
self.evalError = locals()["getError"]
elif "customFuncDef" in metricSpec.params:
assert not "customFuncSource" in metricSpec.params
assert not "customExpr" in metricSpec.params
self.evalError = metricSpec.params["customFuncDef"]
def getPrediction(self,n):
#Get prediction from n steps ago
return self.recordStore.getItem(n)["prediction"]
def getFieldValue(self,n,field):
#Get field value from record n steps ago
record = self.recordStore.getItem(n)["record"]
value = record[field]
return value
def getGroundTruth(self,n):
#Get the groundTruth from n steps ago
return self.recordStore.getItem(n)["groundTruth"]
def getBufferLen(self):
return len(self.recordStore)
def storeData(self,name,obj):
#Store custom user data
self.userDataStore[name] = obj
def getData(self,name):
#Retrieve user data
if name in self.userDataStore:
return self.userDataStore[name]
return None
def mostLikely(self, pred):
""" Helper function to return a scalar value representing the most
likely outcome given a probability distribution
"""
if len(pred) == 1:
return pred.keys()[0]
mostLikelyOutcome = None
maxProbability = 0
for prediction, probability in pred.items():
if probability > maxProbability:
mostLikelyOutcome = prediction
maxProbability = probability
return mostLikelyOutcome
def expValue(self, pred):
""" Helper function to return a scalar value representing the expected
value of a probability distribution
"""
if len(pred) == 1:
return pred.keys()[0]
return sum([x*p for x,p in pred.items()])
def evalAbsErr(self,pred,ground):
return abs(pred-ground)
def getMetric(self):
return {'value': self.averageError, "stats" : {"steps" : self.steps}}
def addInstance(self, groundTruth, prediction, record = None, result = None):
#If missing data,
if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA or prediction is None:
return self.averageError
self.recordStore.pushToEnd({"groundTruth":groundTruth,
"prediction":prediction,"record":record})
if isinstance(prediction, dict):
assert not any(True for p in prediction if p is None), \
"Invalid prediction of `None` in call to %s.addInstance()" % \
self.__class__.__name__
error = self.evalError(prediction,groundTruth,self)
popped = self.errorStore.pushToEnd({"error":error})
if not popped is None:
#Subtract error that dropped out of the buffer
self.error -= popped["error"]
self.error+= error
self.averageError = float(self.error)/self.errorStore.elements
self.steps+=1
return self.averageError
class MetricMovingMode(AggregateMetric):
"""
Computes error metric based on moving mode prediction.
"""
def __init__(self, metricSpec):
super(MetricMovingMode, self).__init__(metricSpec)
self.mode_window = 100
if metricSpec.params.has_key('mode_window'):
assert metricSpec.params['mode_window'] >= 1
self.mode_window = metricSpec.params['mode_window']
# Only supports one stepsize
assert len(self._predictionSteps) == 1
# Construct moving average instance
self._movingMode = _MovingMode(self.mode_window)
def getMetric(self):
return self._subErrorMetrics[0].getMetric()
def addInstance(self, groundTruth, prediction, record = None, result = None):
# If missing data,
if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA:
return self._subErrorMetrics[0].aggregateError
if self.verbosity > 0:
print "groundTruth:\n%s\nPredictions:\n%s\n%s\n" % (groundTruth, prediction,
self.getMetric())
# Use ground truth from 'steps' steps ago as our most recent ground truth
lastGT = self._getShiftedGroundTruth(groundTruth)
if lastGT is None:
return self._subErrorMetrics[0].aggregateError
mode = self._movingMode(lastGT)
result = self._subErrorMetrics[0].addInstance(groundTruth, mode, record)
return result
class MetricTrivial(AggregateMetric):
"""
Computes a metric against the ground truth N steps ago. The metric to
compute is designated by the ``errorMetric`` entry in the metric params.
"""
def __init__(self, metricSpec):
# This metric assumes a default 'steps' of 1
if not 'steps' in metricSpec.params:
metricSpec.params['steps'] = 1
super(MetricTrivial, self).__init__(metricSpec)
# Only supports one stepsize
assert len(self._predictionSteps) == 1
# Must have a suberror metric
assert self._subErrorMetrics is not None, "This metric requires that you" \
+ " specify the name of another base metric via the 'errorMetric' " \
+ " parameter."
def getMetric(self):
return self._subErrorMetrics[0].getMetric()
def addInstance(self, groundTruth, prediction, record = None, result = None):
# Use ground truth from 'steps' steps ago as our "prediction"
prediction = self._getShiftedGroundTruth(groundTruth)
if self.verbosity > 0:
print "groundTruth:\n%s\nPredictions:\n%s\n%s\n" % (groundTruth,
prediction, self.getMetric())
# If missing data,
if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA:
return self._subErrorMetrics[0].aggregateError
# Our "prediction" is simply what happened 'steps' steps ago
return self._subErrorMetrics[0].addInstance(groundTruth, prediction, record)
class MetricTwoGram(AggregateMetric):
"""
Computes error metric based on one-grams. The groundTruth passed into
this metric is the encoded output of the field (an array of 1's and 0's).
"""
def __init__(self, metricSpec):
# This metric assumes a default 'steps' of 1
if not 'steps' in metricSpec.params:
metricSpec.params['steps'] = 1
super(MetricTwoGram, self).__init__(metricSpec)
# Only supports 1 stepsize
assert len(self._predictionSteps) == 1
# Must supply the predictionField
assert(metricSpec.params.has_key('predictionField'))
self.predictionField = metricSpec.params['predictionField']
self.twoGramDict = dict()
def getMetric(self):
return self._subErrorMetrics[0].getMetric()
def addInstance(self, groundTruth, prediction, record = None, result = None):
# If missing data return previous error (assuming one gram will always
# receive an instance of ndarray)
if groundTruth.any() == False:
return self._subErrorMetrics[0].aggregateError
# Get actual ground Truth value from record. For this metric, the
# "groundTruth" parameter is the encoder output and we use actualGroundTruth
# to hold the input to the encoder (either a scalar or a category string).
#
# We will use 'groundTruthKey' (the stringified encoded value of
# groundTruth) as the key for our one-gram dict and the 'actualGroundTruth'
# as the values in our dict, which are used to compute our prediction.
actualGroundTruth = record[self.predictionField]
# convert binary array to a string
groundTruthKey = str(groundTruth)
# Get the ground truth key from N steps ago, that is what we will base
# our prediction on. Note that our "prediction" is the prediction for the
# current time step, to be compared to actualGroundTruth
prevGTKey = self._getShiftedGroundTruth(groundTruthKey)
# -------------------------------------------------------------------------
# Get the prediction based on the previously known ground truth
# If no previous, just default to "" or 0, depending on the groundTruth
# data type.
if prevGTKey == None:
if isinstance(actualGroundTruth,str):
pred = ""
else:
pred = 0
# If the previous was never seen before, create a new dict for it.
elif not prevGTKey in self.twoGramDict:
if isinstance(actualGroundTruth,str):
pred = ""
else:
pred = 0
# Create a new dict for it
self.twoGramDict[prevGTKey] = {actualGroundTruth:1}
# If it was seen before, compute the prediction from the past history
else:
# Find most often occurring 1-gram
if isinstance(actualGroundTruth,str):
# Get the most frequent category that followed the previous timestep
twoGramMax = max(self.twoGramDict[prevGTKey].items(), key=itemgetter(1))
pred = twoGramMax[0]
else:
# Get average of all possible values that followed the previous
# timestep
pred = sum(self.twoGramDict[prevGTKey].iterkeys())
pred /= len(self.twoGramDict[prevGTKey])
# Add current ground truth to dict
if actualGroundTruth in self.twoGramDict[prevGTKey]:
self.twoGramDict[prevGTKey][actualGroundTruth] += 1
else:
self.twoGramDict[prevGTKey][actualGroundTruth] = 1
if self.verbosity > 0:
print "\nencoding:%s\nactual:%s\nprevEncoding:%s\nprediction:%s\nmetric:%s" % \
(groundTruth, actualGroundTruth, prevGTKey, pred, self.getMetric())
return self._subErrorMetrics[0].addInstance(actualGroundTruth, pred, record)
class MetricAccuracy(AggregateMetric):
"""
Computes simple accuracy for an enumerated type. all inputs are treated as
discrete members of a set, therefore for example 0.5 is only a correct
response if the ground truth is exactly 0.5. Inputs can be strings, integers,
or reals.
"""
def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer, result = None):
# This is really an accuracy measure rather than an "error" measure
error = 1.0 if groundTruth == prediction else 0.0
accumulatedError += error
if historyBuffer is not None:
historyBuffer.append(error)
if len(historyBuffer) > self.spec.params["window"] :
accumulatedError -= historyBuffer.popleft()
return accumulatedError
def aggregate(self, accumulatedError, historyBuffer, steps):
n = steps
if historyBuffer is not None:
n = len(historyBuffer)
return accumulatedError/ float(n)
class MetricAveError(AggregateMetric):
"""
Simply the inverse of the Accuracy metric. More consistent with scalar
metrics because they all report an error to be minimized.
"""
def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer, result = None):
error = 1.0 if groundTruth != prediction else 0.0
accumulatedError += error
if historyBuffer is not None:
historyBuffer.append(error)
if len(historyBuffer) > self.spec.params["window"] :
accumulatedError -= historyBuffer.popleft()
return accumulatedError
def aggregate(self, accumulatedError, historyBuffer, steps):
n = steps
if historyBuffer is not None:
n = len(historyBuffer)
return accumulatedError/ float(n)
class MetricNegAUC(AggregateMetric):
"""
Computes -1 * AUC (Area Under the Curve) of the ROC (Receiver Operator
Characteristics) curve. We compute -1 * AUC because metrics are optimized to
be LOWER when swarming.
For this, we assuming that category 1 is the "positive" category and we are
generating an ROC curve with the TPR (True Positive Rate) of category 1 on the
y-axis and the FPR (False Positive Rate) on the x-axis.
"""
def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer, result = None):
"""
Accumulate history of groundTruth and "prediction" values.
For this metric, groundTruth is the actual category and "prediction" is a
dict containing one top-level item with a key of 0 (meaning this is the
0-step classificaton) and a value which is another dict, which contains the
probability for each category as output from the classifier. For example,
this is what "prediction" would be if the classifier said that category 0
had a 0.6 probability and category 1 had a 0.4 probability: {0:0.6, 1: 0.4}
"""
# We disable it within aggregate() if we find that the classifier classes
# are not compatible with AUC calculations.
if self.disabled:
return 0
# Just store the groundTruth, probability into our history buffer. We will
# wait until aggregate gets called to actually compute AUC.
if historyBuffer is not None:
historyBuffer.append((groundTruth, prediction[0]))
if len(historyBuffer) > self.spec.params["window"] :
historyBuffer.popleft()
# accumulatedError not used in this metric
return 0
def aggregate(self, accumulatedError, historyBuffer, steps):
# If disabled, do nothing.
if self.disabled:
return 0.0
if historyBuffer is not None:
n = len(historyBuffer)
else:
return 0.0
# For performance reasons, only re-compute this every 'computeEvery' steps
frequency = self.spec.params.get('computeEvery', 1)
if ((steps+1) % frequency) != 0:
return self.aggregateError
# Compute the ROC curve and the area underneath it
actuals = [gt for (gt, probs) in historyBuffer]
classes = np.unique(actuals)
# We can only compute ROC when we have at least 1 sample of each category
if len(classes) < 2:
return -1 * 0.5
# Print warning the first time this metric is asked to be computed on a
# problem with more than 2 classes
if sorted(classes) != [0,1]:
print "WARNING: AUC only implemented for binary classifications where " \
"the categories are category 0 and 1. In this network, the " \
"categories are: %s" % (classes)
print "WARNING: Computation of this metric is disabled for the remainder of " \
"this experiment."
self.disabled = True
return 0.0
# Compute the ROC and AUC. Note that because we are online, there's a
# chance that some of the earlier classification probabilities don't
# have the True class (category 1) yet because it hasn't been seen yet.
# Therefore, we use probs.get() with a default value of 0.
scores = [probs.get(1, 0) for (gt, probs) in historyBuffer]
(fpr, tpr, thresholds) = roc.ROCCurve(actuals, scores)
auc = roc.AreaUnderCurve(fpr, tpr)
# Debug?
if False:
print
print "AUC metric debug info (%d steps):" % (steps)
print " actuals:", actuals
print " probabilities:", ["%.2f" % x for x in scores]
print " fpr:", fpr
print " tpr:", tpr
print " thresholds:", thresholds
print " AUC:", auc
return -1 * auc
class MetricMultiStep(AggregateMetric):
"""
This is an "uber" metric which is used to apply one of the other basic
metrics to a specific step in a multi-step prediction.
The specParams are expected to contain:
- ``errorMetric``: name of basic metric to apply
- ``steps``: compare prediction['steps'] to the current ground truth.
Note that the metrics manager has already performed the time shifting
for us - it passes us the prediction element from 'steps' steps ago
and asks us to compare that to the current ground truth.
When multiple steps of prediction are requested, we average the results of
the underlying metric for each step.
"""
def __init__(self, metricSpec):
super(MetricMultiStep, self).__init__(metricSpec)
assert self._subErrorMetrics is not None
def getMetric(self):
return {'value': self.aggregateError, "stats" : {"steps" : self.steps}}
def addInstance(self, groundTruth, prediction, record = None, result = None):
# If missing data,
if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA:
return self.aggregateError
# Get the prediction for this time step
aggErrSum = 0
try:
for step, subErrorMetric in \
zip(self._predictionSteps, self._subErrorMetrics):
stepPrediction = prediction[step]
# Unless this is a custom_error_metric, when we have a dict of
# probabilities, get the most probable one. For custom error metrics,
# we pass the probabilities in so that it can decide how best to deal with
# them.
if isinstance(stepPrediction, dict) \
and not isinstance(subErrorMetric, CustomErrorMetric):
predictions = [(prob,value) for (value, prob) in \
stepPrediction.iteritems()]
predictions.sort()
stepPrediction = predictions[-1][1]
# Get sum of the errors
aggErr = subErrorMetric.addInstance(groundTruth, stepPrediction, record, result)
if self.verbosity >= 2:
print "MetricMultiStep %s: aggErr for stepSize %d: %s" % \
(self._predictionSteps, step, aggErr)
aggErrSum += aggErr
except:
pass
# Return average aggregate error across all step sizes
self.aggregateError = aggErrSum / len(self._subErrorMetrics)
if self.verbosity >= 2:
print "MetricMultiStep %s: aggErrAvg: %s" % (self._predictionSteps,
self.aggregateError)
self.steps += 1
if self.verbosity >= 1:
print "\nMetricMultiStep %s: \n groundTruth: %s\n Predictions: %s" \
"\n Metric: %s" % (self._predictionSteps, groundTruth, prediction,
self.getMetric())
return self.aggregateError
class MetricMultiStepProbability(AggregateMetric):
"""
This is an "uber" metric which is used to apply one of the other basic
metrics to a specific step in a multi-step prediction.
The specParams are expected to contain:
- ``errorMetric``: name of basic metric to apply
- ``steps``: compare prediction['steps'] to the current ground truth.
Note that the metrics manager has already performed the time shifting
for us - it passes us the prediction element from 'steps' steps ago
and asks us to compare that to the current ground truth.
"""
def __init__(self, metricSpec):
# Default window should be 1
if not 'window' in metricSpec.params:
metricSpec.params['window'] = 1
super(MetricMultiStepProbability, self).__init__(metricSpec)
# Must have a suberror metric
assert self._subErrorMetrics is not None, "This metric requires that you" \
+ " specify the name of another base metric via the 'errorMetric' " \
+ " parameter."
# Force all subErrorMetric windows to 1. This is necessary because by
# default they each do their own history averaging assuming that their
# addInstance() gets called once per interation. But, in this metric
# we actually call into each subErrorMetric multiple times per iteration
for subErrorMetric in self._subErrorMetrics:
subErrorMetric.window = 1
subErrorMetric.spec.params['window'] = 1
self._movingAverage = MovingAverage(self.window)
def getMetric(self):
return {'value': self.aggregateError, "stats" :
{"steps" : self.steps}}
def addInstance(self, groundTruth, prediction, record = None, result = None):
# If missing data,
if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA:
return self.aggregateError
if self.verbosity >= 1:
print "\nMetricMultiStepProbability %s: \n groundTruth: %s\n " \
"Predictions: %s" % (self._predictionSteps, groundTruth,
prediction)
# Get the aggregateErrors for all requested step sizes and average them
aggErrSum = 0
for step, subErrorMetric in \
zip(self._predictionSteps, self._subErrorMetrics):
stepPrediction = prediction[step]
# If it's a dict of probabilities, get the expected value
error = 0
if isinstance(stepPrediction, dict):
expectedValue = 0
# For every possible prediction multiply its error by its probability
for (pred, prob) in stepPrediction.iteritems():
error += subErrorMetric.addInstance(groundTruth, pred, record) \
* prob
else:
error += subErrorMetric.addInstance(groundTruth, stepPrediction,
record)
if self.verbosity >= 2:
print ("MetricMultiStepProbability %s: aggErr for stepSize %d: %s" %
(self._predictionSteps, step, error))
aggErrSum += error
# Return aggregate error
avgAggErr = aggErrSum / len(self._subErrorMetrics)
self.aggregateError = self._movingAverage(avgAggErr)
if self.verbosity >= 2:
print ("MetricMultiStepProbability %s: aggErr over all steps, this "
"iteration (%d): %s" % (self._predictionSteps, self.steps, avgAggErr))
print ("MetricMultiStepProbability %s: aggErr moving avg: %s" %
(self._predictionSteps, self.aggregateError))
self.steps += 1
if self.verbosity >= 1:
print "MetricMultiStepProbability %s: \n Error: %s\n Metric: %s" % \
(self._predictionSteps, avgAggErr, self.getMetric())
return self.aggregateError
class MetricMulti(MetricsIface):
"""
Multi metric can combine multiple other (sub)metrics and weight them to
provide combined score.
"""
def __init__(self, metricSpec):
"""MetricMulti constructor using metricSpec is not allowed."""
raise ValueError("MetricMulti cannot be constructed from metricSpec string! "
"Use MetricMulti(weights,metrics) constructor instead.")
def __init__(self, weights, metrics, window=None):
"""MetricMulti
@param weights - [list of floats] used as weights
@param metrics - [list of submetrics]
@param window - (opt) window size for moving average, or None when disabled
"""
if (weights is None or not isinstance(weights, list) or
not len(weights) > 0 or
not isinstance(weights[0], float)):
raise ValueError("MetricMulti requires 'weights' parameter as a [list of floats]")
self.weights = weights
if (metrics is None or not isinstance(metrics, list) or
not len(metrics) > 0 or
not isinstance(metrics[0], MetricsIface)):
raise ValueError("MetricMulti requires 'metrics' parameter as a [list of Metrics]")
self.metrics = metrics
if window is not None:
self.movingAvg = MovingAverage(windowSize=window)
else:
self.movingAvg = None
def addInstance(self, groundTruth, prediction, record = None, result = None):
err = 0.0
subResults = [m.addInstance(groundTruth, prediction, record) for m in self.metrics]
for i in xrange(len(self.weights)):
if subResults[i] is not None:
err += subResults[i]*self.weights[i]
else: # submetric returned None, propagate
self.err = None
return None
if self.verbosity > 2:
print "IN=",groundTruth," pred=",prediction,": w=",self.weights[i]," metric=",self.metrics[i]," value=",m," err=",err
if self.movingAvg is not None:
err=self.movingAvg(err)
self.err = err
return err
def __repr__(self):
return "MetricMulti(weights=%s, metrics=%s)" % (self.weights, self.metrics)
def getMetric(self):
return {'value': self.err, "stats" : {"weights" : self.weights}}
| 57,390 | Python | .py | 1,258 | 39.011924 | 123 | 0.68748 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,965 | htm_prediction_model_callbacks.py | numenta_nupic-legacy/src/nupic/frameworks/opf/htm_prediction_model_callbacks.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# HTMPredictionModel-specific experiment task callbacks that may be used
# in setup, postIter, and finish callback lists
import os
from nupic.support.fs_helpers import makeDirectoryFromAbsolutePath
from htm_prediction_model import HTMPredictionModel
def htmPredictionModelControlEnableSPLearningCb(htmPredictionModel):
""" Enables learning in the HTMPredictionModel's Spatial Pooler
See also htmPredictionModelControlDisableSPLearningCb.
htmPredictionModel: pointer to a HTMPredictionModel instance
Returns: nothing
"""
assert isinstance(htmPredictionModel, HTMPredictionModel)
htmPredictionModel._getSPRegion().setParameter('learningMode', True)
return
def htmPredictionModelControlDisableSPLearningCb(htmPredictionModel):
""" Disables learning in the HTMPredictionModel's Spatial Pooler, while
retaining the ability to re-enable SP learning in the future.
See also: htmPredictionModelControlEnableSPLearningCb.
See also: model_callbacks.modelControlFinishLearningCb.
htmPredictionModel: pointer to a HTMPredictionModel instance
Returns: nothing
"""
assert isinstance(htmPredictionModel, HTMPredictionModel)
htmPredictionModel._getSPRegion().setParameter('learningMode', False)
return
def htmPredictionModelControlEnableTPLearningCb(htmPredictionModel):
""" Enables learning in the HTMPredictionModel's Temporal Pooler
See also htmPredictionModelControlDisableTPLearningCb.
htmPredictionModel: pointer to a HTMPredictionModel instance
Returns: nothing
"""
assert isinstance(htmPredictionModel, HTMPredictionModel)
htmPredictionModel._getTPRegion().setParameter('learningMode', True)
return
def htmPredictionModelControlDisableTPLearningCb(htmPredictionModel):
""" Disables learning in the HTMPredictionModel's Temporal Pooler, while
retaining the ability to re-enable TM learning in the future.
See also: htmPredictionModelControlEnableTPLearningCb.
See also: model_callbacks.modelControlFinishLearningCb.
htmPredictionModel: pointer to a HTMPredictionModel instance
Returns: nothing
"""
assert isinstance(htmPredictionModel, HTMPredictionModel)
htmPredictionModel._getTPRegion().setParameter('learningMode', False)
return
class HTMPredictionModelPickleSPInitArgs(object):
""" Saves SP initialization args
"""
def __init__(self, filePath):
"""
filePath: path of file where SP __init__ args are to be saved
"""
self.__filePath = filePath
return
def __call__(self, htmPredictionModel):
import pickle
# Get the SP args dictionary
assert isinstance(htmPredictionModel, HTMPredictionModel)
spRegion = htmPredictionModel._getSPRegion().getSelf()
sfdr = spRegion._sfdr
initArgsDict = sfdr._initArgsDict
# Write it out to a file as json
absFilePath = os.path.abspath(self.__filePath)
absDir = os.path.dirname(absFilePath)
makeDirectoryFromAbsolutePath(absDir)
with open(absFilePath, 'wb') as pickleFile:
pickle.dump(initArgsDict, pickleFile)
return
class HTMPredictionModelPickleTPInitArgs(object):
""" Saves BacktrackingTMCPP initialization args
"""
def __init__(self, filePath):
"""
filePath: path of file where TM __init__ args are to be saved
"""
self.__filePath = filePath
return
def __call__(self, htmPredictionModel):
import pickle
# Get the TM args dictionary
assert isinstance(htmPredictionModel, HTMPredictionModel)
tpRegion = htmPredictionModel._getTPRegion().getSelf()
tfdr = tpRegion._tfdr
initArgsDict = tfdr._initArgsDict
# Write it out to a file as json
absFilePath = os.path.abspath(self.__filePath)
absDir = os.path.dirname(absFilePath)
makeDirectoryFromAbsolutePath(absDir)
with open(absFilePath, 'wb') as pickleFile:
pickle.dump(initArgsDict, pickleFile)
return
| 4,861 | Python | .py | 111 | 40.036036 | 74 | 0.767949 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,966 | __init__.py | numenta_nupic-legacy/src/nupic/frameworks/opf/__init__.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
| 976 | Python | .py | 20 | 47.8 | 72 | 0.665272 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,967 | previous_value_model.py | numenta_nupic-legacy/src/nupic/frameworks/opf/previous_value_model.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Module containing the trivial predictor OPF model implementation. """
import itertools
from nupic.data import field_meta
from nupic.frameworks.opf import model
from nupic.frameworks.opf import opf_utils
from opf_utils import InferenceType
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.frameworks.opf.previous_value_model_capnp import (
PreviousValueModelProto)
class PreviousValueModel(model.Model):
"""
Previous value model.
:param inferenceType: (:class:`nupic.frameworks.opf.opf_utils.InferenceType`)
:param fieldNames: a list of field names
:param fieldTypes: a list of the types for the fields mentioned in
``fieldNames``
:param predictedField: the field from ``fieldNames`` which is to be predicted
:param predictionSteps: a list of steps for which a prediction is made. This is
only needed in the case of multi step predictions. For example, to get
predictions 1, 5, and 10 steps ahead: ``[1,5,10]``.
"""
def __init__(self, inferenceType=InferenceType.TemporalNextStep,
fieldNames=[],
fieldTypes=[],
predictedField=None,
predictionSteps=[]):
super(PreviousValueModel, self).__init__(inferenceType)
self._logger = opf_utils.initLogger(self)
self._predictedField = predictedField
self._fieldNames = fieldNames
self._fieldTypes = fieldTypes
# only implement multistep and temporalnextstep
if inferenceType == InferenceType.TemporalNextStep:
self._predictionSteps = [1]
elif inferenceType == InferenceType.TemporalMultiStep:
self._predictionSteps = predictionSteps
else:
assert False, "Previous Value Model only works for next step or multi-step."
def run(self, inputRecord):
# set the results. note that there is no translation to sensorInput
results = super(PreviousValueModel, self).run(inputRecord)
results.sensorInput = opf_utils.SensorInput(dataRow= \
[inputRecord[fn] for fn in self._fieldNames])
# select the current value for the prediction with probablity of 1
results.inferences = {opf_utils.InferenceElement.multiStepBestPredictions : \
dict((steps, inputRecord[self._predictedField]) \
for steps in self._predictionSteps),
opf_utils.InferenceElement.multiStepPredictions : \
dict((steps, {inputRecord[self._predictedField] : 1}) \
for steps in self._predictionSteps)
}
# set the next step prediction if step of 1 is selected
if 1 in self._predictionSteps:
results.inferences[opf_utils.InferenceElement.prediction] = \
inputRecord[self._predictedField]
return results
def finishLearning(self):
"""
The PVM does not learn, so this function has no effect.
"""
pass
def setFieldStatistics(self,fieldStats):
"""
Since the PVM has no use for this information, this is a no-op
"""
pass
def getFieldInfo(self):
return tuple(field_meta.FieldMetaInfo(*args) for args in
itertools.izip(
self._fieldNames, self._fieldTypes,
itertools.repeat(field_meta.FieldMetaSpecial.none)))
def getRuntimeStats(self):
# TODO: Add debugging stats.
# > what sort of stats are we supposed to return?
return dict()
def _getLogger(self):
return self._logger
def resetSequenceStates(self):
self._reset = True
@staticmethod
def getSchema():
return PreviousValueModelProto
def write(self, proto):
""" Serialize via capnp
:param proto: capnp PreviousValueModelProto message builder
"""
super(PreviousValueModel, self).writeBaseToProto(proto.modelBase)
proto.fieldNames = self._fieldNames
proto.fieldTypes = self._fieldTypes
if self._predictedField:
proto.predictedField = self._predictedField
proto.predictionSteps = self._predictionSteps
@classmethod
def read(cls, proto):
"""Deserialize via capnp
:param proto: capnp PreviousValueModelProto message reader
:returns: new instance of PreviousValueModel deserialized from the given
proto
"""
instance = object.__new__(cls)
super(PreviousValueModel, instance).__init__(proto=proto.modelBase)
instance._logger = opf_utils.initLogger(instance)
if len(proto.predictedField):
instance._predictedField = proto.predictedField
else:
instance._predictedField = None
instance._fieldNames = list(proto.fieldNames)
instance._fieldTypes = list(proto.fieldTypes)
instance._predictionSteps = list(proto.predictionSteps)
return instance
def __getstate__(self):
# NOTE This deletion doesn't seem to make sense, as someone might want to
# serialize and then continue to use the model instance.
del self._logger
return self.__dict__
def __setstate__(self):
self._logger = opf_utils.initLogger(self)
| 6,009 | Python | .py | 141 | 36.695035 | 82 | 0.697048 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,968 | htm_prediction_model.py | numenta_nupic-legacy/src/nupic/frameworks/opf/htm_prediction_model.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Encapsulation of HTM network that implements the base
:class:`~nupic.frameworks.opf.model.Model` to perform temporal prediction.
"""
import copy
import math
import os
import json
import itertools
import logging
import traceback
from collections import deque
from operator import itemgetter
from functools import wraps
import numpy
from nupic.frameworks.opf.model import Model
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.data.field_meta import FieldMetaSpecial, FieldMetaInfo
from nupic.encoders import MultiEncoder, DeltaEncoder
from nupic.engine import Network
from nupic.support.fs_helpers import makeDirectoryFromAbsolutePath
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement,
SensorInput,
ClassifierInput,
initLogger)
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.frameworks.opf.HTMPredictionModelProto_capnp \
import HTMPredictionModelProto
DEFAULT_LIKELIHOOD_THRESHOLD = 0.0001
DEFAULT_MAX_PREDICTIONS_PER_STEP = 8
DEFAULT_ANOMALY_TRAINRECORDS = 4000
DEFAULT_ANOMALY_THRESHOLD = 1.1
DEFAULT_ANOMALY_CACHESIZE = 10000
EPSILON_ROUND = 7
def requireAnomalyModel(func):
"""
Decorator for functions that require anomaly models.
"""
@wraps(func)
def _decorator(self, *args, **kwargs):
if not self.getInferenceType() == InferenceType.TemporalAnomaly:
raise RuntimeError("Method required a TemporalAnomaly model.")
if self._getAnomalyClassifier() is None:
raise RuntimeError("Model does not support this command. Model must"
"be an active anomalyDetector model.")
return func(self, *args, **kwargs)
return _decorator
class NetworkInfo(object):
""" Data type used as return value type by
HTMPredictionModel.__createHTMNetwork()
"""
def __init__(self, net, statsCollectors):
"""
net: The CLA Network instance
statsCollectors:
Sequence of 0 or more CLAStatistic-based instances
"""
self.net = net
self.statsCollectors = statsCollectors
return
def __repr__(self):
return "NetworkInfo(net=%r, statsCollectors=%r)" % (
self.net, self.statsCollectors)
def __eq__(self, other):
return self.net == other.net and \
self.statsCollectors == other.statsCollectors
def __ne__(self, other):
return not self.__eq__(other)
class HTMPredictionModel(Model):
"""
This model is for temporal predictions multiple steps ahead. After creating
this model, you must call
:meth:`~nupic.frameworks.opf.model.Model.enableInference` to specify a
predicted field, like this:
.. code-block:: python
model.enableInference({"predictedField": "myPredictedField"})
Where ``myPredictedField`` is the field name in your data input that should be
predicted.
:param inferenceType: (:class:`~nupic.frameworks.opf.opf_utils.InferenceType`)
:param sensorParams: (dict) specifying the sensor parameters.
:param spEnable: (bool) Whether or not to use a spatial pooler.
:param spParams: (dict) specifying the spatial pooler parameters. These are
passed to the spatial pooler.
:param trainSPNetOnlyIfRequested: (bool) If set, don't create an SP network
unless the user requests SP metrics.
:param tmEnable: (bool) Whether to use a temporal memory.
:param tmParams: (dict) specifying the temporal memory parameters. These are
passed to the temporal memory.
:param clEnable: (bool) Whether to use the classifier. If false, the
classifier will not be created and no predictions will be generated.
:param clParams: (dict) specifying the classifier parameters. These are passed
to the classifier.
:param anomalyParams: (dict) Anomaly detection parameters
:param minLikelihoodThreshold: (float) The minimum likelihood value to include
in inferences. Currently only applies to multistep inferences.
:param maxPredictionsPerStep: (int) Maximum number of predictions to include
for each step in inferences. The predictions with highest likelihood are
included.
"""
__supportedInferenceKindSet = set((InferenceType.TemporalNextStep,
InferenceType.TemporalClassification,
InferenceType.NontemporalClassification,
InferenceType.NontemporalAnomaly,
InferenceType.TemporalAnomaly,
InferenceType.TemporalMultiStep,
InferenceType.NontemporalMultiStep))
__myClassName = "HTMPredictionModel"
def __init__(self,
sensorParams={},
inferenceType=InferenceType.TemporalNextStep,
spEnable=True,
spParams={},
# TODO: We can't figure out what this is. Remove?
trainSPNetOnlyIfRequested=False,
tmEnable=True,
tmParams={},
clEnable=True,
clParams={},
anomalyParams={},
minLikelihoodThreshold=DEFAULT_LIKELIHOOD_THRESHOLD,
maxPredictionsPerStep=DEFAULT_MAX_PREDICTIONS_PER_STEP,
network=None,
baseProto=None):
"""
:param network: if not None, the deserialized nupic.engine.Network instance
to use instead of creating a new Network
:param baseProto: if not None, capnp ModelProto message reader for
deserializing; supersedes inferenceType
"""
if not inferenceType in self.__supportedInferenceKindSet:
raise ValueError("{0} received incompatible inference type: {1}"\
.format(self.__class__, inferenceType))
# Call super class constructor
if baseProto is None:
super(HTMPredictionModel, self).__init__(inferenceType)
else:
super(HTMPredictionModel, self).__init__(proto=baseProto)
# self.__restoringFromState is set to True by our __setstate__ method
# and back to False at completion of our _deSerializeExtraData() method.
self.__restoringFromState = False
self.__restoringFromV1 = False
# Intitialize logging
self.__logger = initLogger(self)
self.__logger.debug("Instantiating %s." % self.__myClassName)
self._minLikelihoodThreshold = minLikelihoodThreshold
self._maxPredictionsPerStep = maxPredictionsPerStep
# set up learning parameters (note: these may be replaced via
# enable/disable//SP/TM//Learning methods)
self.__spLearningEnabled = bool(spEnable)
self.__tpLearningEnabled = bool(tmEnable)
# Explicitly exclude the TM if this type of inference doesn't require it
if not InferenceType.isTemporal(self.getInferenceType()) \
or self.getInferenceType() == InferenceType.NontemporalMultiStep:
tmEnable = False
self._netInfo = None
self._hasSP = spEnable
self._hasTP = tmEnable
self._hasCL = clEnable
self._classifierInputEncoder = None
self._predictedFieldIdx = None
self._predictedFieldName = None
self._numFields = None
# init anomaly
# -----------------------------------------------------------------------
if network is not None:
# Most likely in the scope of deserialization
self._netInfo = NetworkInfo(net=network, statsCollectors=[])
else:
# Create the network
self._netInfo = self.__createHTMNetwork(
sensorParams, spEnable, spParams, tmEnable, tmParams, clEnable,
clParams, anomalyParams)
# Initialize Spatial Anomaly detection parameters
if self.getInferenceType() == InferenceType.NontemporalAnomaly:
self._getSPRegion().setParameter("anomalyMode", True)
# Initialize Temporal Anomaly detection parameters
if self.getInferenceType() == InferenceType.TemporalAnomaly:
self._getTPRegion().setParameter("anomalyMode", True)
# -----------------------------------------------------------------------
# This flag, if present tells us not to train the SP network unless
# the user specifically asks for the SP inference metric
self.__trainSPNetOnlyIfRequested = trainSPNetOnlyIfRequested
self.__numRunCalls = 0
# Tracks whether finishedLearning() has been called
self.__finishedLearning = False
self.__logger.debug("Instantiated %s" % self.__class__.__name__)
self._input = None
return
def getParameter(self, paramName):
"""
Currently only supports a parameter named ``__numRunCalls``.
:param paramName: (string) name of parameter to get. If not
``__numRunCalls`` an exception is thrown.
:returns: (int) the value of ``self.__numRunCalls``
"""
if paramName == '__numRunCalls':
return self.__numRunCalls
else:
raise RuntimeError("'%s' parameter is not exposed by htm_prediction_model." % \
(paramName))
def resetSequenceStates(self):
if self._hasTP:
# Reset TM's sequence states
self._getTPRegion().executeCommand(['resetSequenceStates'])
self.__logger.debug("HTMPredictionModel.resetSequenceStates(): reset temporal "
"pooler's sequence states")
return
def finishLearning(self):
assert not self.__finishedLearning
if self._hasSP:
# Finish SP learning
self._getSPRegion().executeCommand(['finishLearning'])
self.__logger.debug(
"HTMPredictionModel.finishLearning(): finished SP learning")
if self._hasTP:
# Finish temporal network's TM learning
self._getTPRegion().executeCommand(['finishLearning'])
self.__logger.debug(
"HTMPredictionModel.finishLearning(): finished TM learning")
self.__spLearningEnabled = self.__tpLearningEnabled = False
self.__finishedLearning = True
return
def setFieldStatistics(self, fieldStats):
encoder = self._getEncoder()
# Set the stats for the encoders. The first argument to setFieldStats
# is the field name of the encoder. Since we are using a multiencoder
# we leave it blank, the multiencoder will propagate the field names to the
# underlying encoders
encoder.setFieldStats('',fieldStats)
def enableInference(self, inferenceArgs=None):
super(HTMPredictionModel, self).enableInference(inferenceArgs)
if inferenceArgs is not None and "predictedField" in inferenceArgs:
self._getSensorRegion().setParameter("predictedField",
str(inferenceArgs["predictedField"]))
def enableLearning(self):
super(HTMPredictionModel, self).enableLearning()
self.setEncoderLearning(True)
def disableLearning(self):
super(HTMPredictionModel, self).disableLearning()
self.setEncoderLearning(False)
def setEncoderLearning(self,learningEnabled):
self._getEncoder().setLearning(learningEnabled)
# Anomaly Accessor Methods
@requireAnomalyModel
def setAnomalyParameter(self, param, value):
"""
Set a parameter of the anomaly classifier within this model.
:param param: (string) name of parameter to set
:param value: (object) value to set
"""
self._getAnomalyClassifier().setParameter(param, value)
@requireAnomalyModel
def getAnomalyParameter(self, param):
"""
Get a parameter of the anomaly classifier within this model by key.
:param param: (string) name of parameter to retrieve
"""
return self._getAnomalyClassifier().getParameter(param)
@requireAnomalyModel
def anomalyRemoveLabels(self, start, end, labelFilter):
"""
Remove labels from the anomaly classifier within this model. Removes all
records if ``labelFilter==None``, otherwise only removes the labels equal to
``labelFilter``.
:param start: (int) index to start removing labels
:param end: (int) index to end removing labels
:param labelFilter: (string) If specified, only removes records that match
"""
self._getAnomalyClassifier().getSelf().removeLabels(start, end, labelFilter)
@requireAnomalyModel
def anomalyAddLabel(self, start, end, labelName):
"""
Add labels from the anomaly classifier within this model.
:param start: (int) index to start label
:param end: (int) index to end label
:param labelName: (string) name of label
"""
self._getAnomalyClassifier().getSelf().addLabel(start, end, labelName)
@requireAnomalyModel
def anomalyGetLabels(self, start, end):
"""
Get labels from the anomaly classifier within this model.
:param start: (int) index to start getting labels
:param end: (int) index to end getting labels
"""
return self._getAnomalyClassifier().getSelf().getLabels(start, end)
def run(self, inputRecord):
assert not self.__restoringFromState
assert inputRecord
results = super(HTMPredictionModel, self).run(inputRecord)
self.__numRunCalls += 1
if self.__logger.isEnabledFor(logging.DEBUG):
self.__logger.debug("HTMPredictionModel.run() inputRecord=%s", (inputRecord))
results.inferences = {}
self._input = inputRecord
# -------------------------------------------------------------------------
# Turn learning on or off?
if '_learning' in inputRecord:
if inputRecord['_learning']:
self.enableLearning()
else:
self.disableLearning()
###########################################################################
# Predictions and Learning
###########################################################################
self._sensorCompute(inputRecord)
self._spCompute()
self._tpCompute()
results.sensorInput = self._getSensorInputRecord(inputRecord)
inferences = {}
# TODO: Reconstruction and temporal classification not used. Remove
if self._isReconstructionModel():
inferences = self._reconstructionCompute()
elif self._isMultiStepModel():
inferences = self._multiStepCompute(rawInput=inputRecord)
# For temporal classification. Not used, and might not work anymore
elif self._isClassificationModel():
inferences = self._classificationCompute()
results.inferences.update(inferences)
inferences = self._anomalyCompute()
results.inferences.update(inferences)
# -----------------------------------------------------------------------
# Store the index and name of the predictedField
results.predictedFieldIdx = self._predictedFieldIdx
results.predictedFieldName = self._predictedFieldName
results.classifierInput = self._getClassifierInputRecord(inputRecord)
# =========================================================================
# output
assert (not self.isInferenceEnabled() or results.inferences is not None), \
"unexpected inferences: %r" % results.inferences
#self.__logger.setLevel(logging.DEBUG)
if self.__logger.isEnabledFor(logging.DEBUG):
self.__logger.debug("inputRecord: %r, results: %r" % (inputRecord,
results))
return results
def _getSensorInputRecord(self, inputRecord):
"""
inputRecord - dict containing the input to the sensor
Return a 'SensorInput' object, which represents the 'parsed'
representation of the input record
"""
sensor = self._getSensorRegion()
dataRow = copy.deepcopy(sensor.getSelf().getOutputValues('sourceOut'))
dataDict = copy.deepcopy(inputRecord)
inputRecordEncodings = sensor.getSelf().getOutputValues('sourceEncodings')
inputRecordCategory = int(sensor.getOutputData('categoryOut')[0])
resetOut = sensor.getOutputData('resetOut')[0]
return SensorInput(dataRow=dataRow,
dataDict=dataDict,
dataEncodings=inputRecordEncodings,
sequenceReset=resetOut,
category=inputRecordCategory)
def _getClassifierInputRecord(self, inputRecord):
"""
inputRecord - dict containing the input to the sensor
Return a 'ClassifierInput' object, which contains the mapped
bucket index for input Record
"""
absoluteValue = None
bucketIdx = None
if self._predictedFieldName is not None and self._classifierInputEncoder is not None:
absoluteValue = inputRecord[self._predictedFieldName]
bucketIdx = self._classifierInputEncoder.getBucketIndices(absoluteValue)[0]
return ClassifierInput(dataRow=absoluteValue,
bucketIndex=bucketIdx)
def _sensorCompute(self, inputRecord):
sensor = self._getSensorRegion()
self._getDataSource().push(inputRecord)
sensor.setParameter('topDownMode', False)
sensor.prepareInputs()
try:
sensor.compute()
except StopIteration as e:
raise Exception("Unexpected StopIteration", e,
"ACTUAL TRACEBACK: %s" % traceback.format_exc())
def _spCompute(self):
sp = self._getSPRegion()
if sp is None:
return
sp.setParameter('topDownMode', False)
sp.setParameter('inferenceMode', self.isInferenceEnabled())
sp.setParameter('learningMode', self.isLearningEnabled())
sp.prepareInputs()
sp.compute()
def _tpCompute(self):
tm = self._getTPRegion()
if tm is None:
return
if (self.getInferenceType() == InferenceType.TemporalAnomaly or
self._isReconstructionModel()):
topDownCompute = True
else:
topDownCompute = False
tm = self._getTPRegion()
tm.setParameter('topDownMode', topDownCompute)
tm.setParameter('inferenceMode', self.isInferenceEnabled())
tm.setParameter('learningMode', self.isLearningEnabled())
tm.prepareInputs()
tm.compute()
def _isReconstructionModel(self):
inferenceType = self.getInferenceType()
inferenceArgs = self.getInferenceArgs()
if inferenceType == InferenceType.TemporalNextStep:
return True
if inferenceArgs:
return inferenceArgs.get('useReconstruction', False)
return False
def _isMultiStepModel(self):
return self.getInferenceType() in (InferenceType.NontemporalMultiStep,
InferenceType.NontemporalClassification,
InferenceType.TemporalMultiStep,
InferenceType.TemporalAnomaly)
def _isClassificationModel(self):
return self.getInferenceType() in InferenceType.TemporalClassification
def _multiStepCompute(self, rawInput):
patternNZ = None
if self._getTPRegion() is not None:
tm = self._getTPRegion()
tpOutput = tm.getSelf()._tfdr.infActiveState['t']
patternNZ = tpOutput.reshape(-1).nonzero()[0]
elif self._getSPRegion() is not None:
sp = self._getSPRegion()
spOutput = sp.getOutputData('bottomUpOut')
patternNZ = spOutput.nonzero()[0]
elif self._getSensorRegion() is not None:
sensor = self._getSensorRegion()
sensorOutput = sensor.getOutputData('dataOut')
patternNZ = sensorOutput.nonzero()[0]
else:
raise RuntimeError("Attempted to make multistep prediction without"
"TM, SP, or Sensor regions")
inputTSRecordIdx = rawInput.get('_timestampRecordIdx')
return self._handleSDRClassifierMultiStep(
patternNZ=patternNZ,
inputTSRecordIdx=inputTSRecordIdx,
rawInput=rawInput)
def _classificationCompute(self):
inference = {}
classifier = self._getClassifierRegion()
classifier.setParameter('inferenceMode', True)
classifier.setParameter('learningMode', self.isLearningEnabled())
classifier.prepareInputs()
classifier.compute()
# What we get out is the score for each category. The argmax is
# then the index of the winning category
classificationDist = classifier.getOutputData('categoriesOut')
classification = classificationDist.argmax()
probabilities = classifier.getOutputData('categoryProbabilitiesOut')
numCategories = classifier.getParameter('activeOutputCount')
classConfidences = dict(zip(xrange(numCategories), probabilities))
inference[InferenceElement.classification] = classification
inference[InferenceElement.classConfidences] = {0: classConfidences}
return inference
def _reconstructionCompute(self):
if not self.isInferenceEnabled():
return {}
sp = self._getSPRegion()
sensor = self._getSensorRegion()
#--------------------------------------------------
# SP Top-down flow
sp.setParameter('topDownMode', True)
sp.prepareInputs()
sp.compute()
#--------------------------------------------------
# Sensor Top-down flow
sensor.setParameter('topDownMode', True)
sensor.prepareInputs()
sensor.compute()
# Need to call getOutputValues() instead of going through getOutputData()
# because the return values may contain strings, which cannot be passed
# through the Region.cpp code.
# predictionRow is a list of values, one for each field. The value is
# in the same type as the original input to the encoder and may be a
# string for category fields for example.
predictionRow = copy.copy(sensor.getSelf().getOutputValues('temporalTopDownOut'))
predictionFieldEncodings = sensor.getSelf().getOutputValues('temporalTopDownEncodings')
inferences = {}
inferences[InferenceElement.prediction] = tuple(predictionRow)
inferences[InferenceElement.encodings] = tuple(predictionFieldEncodings)
return inferences
def _anomalyCompute(self):
"""
Compute Anomaly score, if required
"""
inferenceType = self.getInferenceType()
inferences = {}
sp = self._getSPRegion()
score = None
if inferenceType == InferenceType.NontemporalAnomaly:
score = sp.getOutputData("anomalyScore")[0] #TODO move from SP to Anomaly ?
elif inferenceType == InferenceType.TemporalAnomaly:
tm = self._getTPRegion()
if sp is not None:
activeColumns = sp.getOutputData("bottomUpOut").nonzero()[0]
else:
sensor = self._getSensorRegion()
activeColumns = sensor.getOutputData('dataOut').nonzero()[0]
if not self._predictedFieldName in self._input:
raise ValueError(
"Expected predicted field '%s' in input row, but was not found!"
% self._predictedFieldName
)
# Calculate the anomaly score using the active columns
# and previous predicted columns.
score = tm.getOutputData("anomalyScore")[0]
# Calculate the classifier's output and use the result as the anomaly
# label. Stores as string of results.
# TODO: make labels work with non-SP models
if sp is not None:
self._getAnomalyClassifier().setParameter(
"activeColumnCount", len(activeColumns))
self._getAnomalyClassifier().prepareInputs()
self._getAnomalyClassifier().compute()
labels = self._getAnomalyClassifier().getSelf().getLabelResults()
inferences[InferenceElement.anomalyLabel] = "%s" % labels
inferences[InferenceElement.anomalyScore] = score
return inferences
def _handleSDRClassifierMultiStep(self, patternNZ,
inputTSRecordIdx,
rawInput):
""" Handle the CLA Classifier compute logic when implementing multi-step
prediction. This is where the patternNZ is associated with one of the
other fields from the dataset 0 to N steps in the future. This method is
used by each type of network (encoder only, SP only, SP +TM) to handle the
compute logic through the CLA Classifier. It fills in the inference dict with
the results of the compute.
Parameters:
-------------------------------------------------------------------
patternNZ: The input to the CLA Classifier as a list of active input indices
inputTSRecordIdx: The index of the record as computed from the timestamp
and aggregation interval. This normally increments by 1
each time unless there are missing records. If there is no
aggregation interval or timestamp in the data, this will be
None.
rawInput: The raw input to the sensor, as a dict.
"""
inferenceArgs = self.getInferenceArgs()
predictedFieldName = inferenceArgs.get('predictedField', None)
if predictedFieldName is None:
raise ValueError(
"No predicted field was enabled! Did you call enableInference()?"
)
self._predictedFieldName = predictedFieldName
classifier = self._getClassifierRegion()
if not self._hasCL or classifier is None:
# No classifier so return an empty dict for inferences.
return {}
sensor = self._getSensorRegion()
minLikelihoodThreshold = self._minLikelihoodThreshold
maxPredictionsPerStep = self._maxPredictionsPerStep
needLearning = self.isLearningEnabled()
inferences = {}
# Get the classifier input encoder, if we don't have it already
if self._classifierInputEncoder is None:
if predictedFieldName is None:
raise RuntimeError("This experiment description is missing "
"the 'predictedField' in its config, which is required "
"for multi-step prediction inference.")
encoderList = sensor.getSelf().encoder.getEncoderList()
self._numFields = len(encoderList)
# This is getting index of predicted field if being fed to CLA.
fieldNames = sensor.getSelf().encoder.getScalarNames()
if predictedFieldName in fieldNames:
self._predictedFieldIdx = fieldNames.index(predictedFieldName)
else:
# Predicted field was not fed into the network, only to the classifier
self._predictedFieldIdx = None
# In a multi-step model, the classifier input encoder is separate from
# the other encoders and always disabled from going into the bottom of
# the network.
if sensor.getSelf().disabledEncoder is not None:
encoderList = sensor.getSelf().disabledEncoder.getEncoderList()
else:
encoderList = []
if len(encoderList) >= 1:
fieldNames = sensor.getSelf().disabledEncoder.getScalarNames()
self._classifierInputEncoder = encoderList[fieldNames.index(
predictedFieldName)]
else:
# Legacy multi-step networks don't have a separate encoder for the
# classifier, so use the one that goes into the bottom of the network
encoderList = sensor.getSelf().encoder.getEncoderList()
self._classifierInputEncoder = encoderList[self._predictedFieldIdx]
# Get the actual value and the bucket index for this sample. The
# predicted field may not be enabled for input to the network, so we
# explicitly encode it outside of the sensor
# TODO: All this logic could be simpler if in the encoder itself
if not predictedFieldName in rawInput:
raise ValueError("Input row does not contain a value for the predicted "
"field configured for this model. Missing value for '%s'"
% predictedFieldName)
absoluteValue = rawInput[predictedFieldName]
bucketIdx = self._classifierInputEncoder.getBucketIndices(absoluteValue)[0]
# Convert the absolute values to deltas if necessary
# The bucket index should be handled correctly by the underlying delta encoder
if isinstance(self._classifierInputEncoder, DeltaEncoder):
# Make the delta before any values have been seen 0 so that we do not mess up the
# range for the adaptive scalar encoder.
if not hasattr(self,"_ms_prevVal"):
self._ms_prevVal = absoluteValue
prevValue = self._ms_prevVal
self._ms_prevVal = absoluteValue
actualValue = absoluteValue - prevValue
else:
actualValue = absoluteValue
if isinstance(actualValue, float) and math.isnan(actualValue):
actualValue = SENTINEL_VALUE_FOR_MISSING_DATA
# Pass this information to the classifier's custom compute method
# so that it can assign the current classification to possibly
# multiple patterns from the past and current, and also provide
# the expected classification for some time step(s) in the future.
classifier.setParameter('inferenceMode', True)
classifier.setParameter('learningMode', needLearning)
classificationIn = {'bucketIdx': bucketIdx,
'actValue': actualValue}
# Handle missing records
if inputTSRecordIdx is not None:
recordNum = inputTSRecordIdx
else:
recordNum = self.__numRunCalls
clResults = classifier.getSelf().customCompute(recordNum=recordNum,
patternNZ=patternNZ,
classification=classificationIn)
# ---------------------------------------------------------------
# Get the prediction for every step ahead learned by the classifier
predictionSteps = classifier.getParameter('steps')
predictionSteps = [int(x) for x in predictionSteps.split(',')]
# We will return the results in this dict. The top level keys
# are the step number, the values are the relative likelihoods for
# each classification value in that time step, represented as
# another dict where the keys are the classification values and
# the values are the relative likelihoods.
inferences[InferenceElement.multiStepPredictions] = dict()
inferences[InferenceElement.multiStepBestPredictions] = dict()
inferences[InferenceElement.multiStepBucketLikelihoods] = dict()
# ======================================================================
# Plug in the predictions for each requested time step.
for steps in predictionSteps:
# From the clResults, compute the predicted actual value. The
# SDRClassifier classifies the bucket index and returns a list of
# relative likelihoods for each bucket. Let's find the max one
# and then look up the actual value from that bucket index
likelihoodsVec = clResults[steps]
bucketValues = clResults['actualValues']
# Create a dict of value:likelihood pairs. We can't simply use
# dict(zip(bucketValues, likelihoodsVec)) because there might be
# duplicate bucketValues (this happens early on in the model when
# it doesn't have actual values for each bucket so it returns
# multiple buckets with the same default actual value).
likelihoodsDict = dict()
bestActValue = None
bestProb = None
for (actValue, prob) in zip(bucketValues, likelihoodsVec):
if actValue in likelihoodsDict:
likelihoodsDict[actValue] += prob
else:
likelihoodsDict[actValue] = prob
# Keep track of best
if bestProb is None or likelihoodsDict[actValue] > bestProb:
bestProb = likelihoodsDict[actValue]
bestActValue = actValue
# Remove entries with 0 likelihood or likelihood less than
# minLikelihoodThreshold, but don't leave an empty dict.
likelihoodsDict = HTMPredictionModel._removeUnlikelyPredictions(
likelihoodsDict, minLikelihoodThreshold, maxPredictionsPerStep)
# calculate likelihood for each bucket
bucketLikelihood = {}
for k in likelihoodsDict.keys():
bucketLikelihood[self._classifierInputEncoder.getBucketIndices(k)[0]] = (
likelihoodsDict[k])
# ---------------------------------------------------------------------
# If we have a delta encoder, we have to shift our predicted output value
# by the sum of the deltas
if isinstance(self._classifierInputEncoder, DeltaEncoder):
# Get the prediction history for this number of timesteps.
# The prediction history is a store of the previous best predicted values.
# This is used to get the final shift from the current absolute value.
if not hasattr(self, '_ms_predHistories'):
self._ms_predHistories = dict()
predHistories = self._ms_predHistories
if not steps in predHistories:
predHistories[steps] = deque()
predHistory = predHistories[steps]
# Find the sum of the deltas for the steps and use this to generate
# an offset from the current absolute value
sumDelta = sum(predHistory)
offsetDict = dict()
for (k, v) in likelihoodsDict.iteritems():
if k is not None:
# Reconstruct the absolute value based on the current actual value,
# the best predicted values from the previous iterations,
# and the current predicted delta
offsetDict[absoluteValue+float(k)+sumDelta] = v
# calculate likelihood for each bucket
bucketLikelihoodOffset = {}
for k in offsetDict.keys():
bucketLikelihoodOffset[self._classifierInputEncoder.getBucketIndices(k)[0]] = (
offsetDict[k])
# Push the current best delta to the history buffer for reconstructing the final delta
if bestActValue is not None:
predHistory.append(bestActValue)
# If we don't need any more values in the predictionHistory, pop off
# the earliest one.
if len(predHistory) >= steps:
predHistory.popleft()
# Provide the offsetDict as the return value
if len(offsetDict)>0:
inferences[InferenceElement.multiStepPredictions][steps] = offsetDict
inferences[InferenceElement.multiStepBucketLikelihoods][steps] = bucketLikelihoodOffset
else:
inferences[InferenceElement.multiStepPredictions][steps] = likelihoodsDict
inferences[InferenceElement.multiStepBucketLikelihoods][steps] = bucketLikelihood
if bestActValue is None:
inferences[InferenceElement.multiStepBestPredictions][steps] = None
else:
inferences[InferenceElement.multiStepBestPredictions][steps] = (
absoluteValue + sumDelta + bestActValue)
# ---------------------------------------------------------------------
# Normal case, no delta encoder. Just plug in all our multi-step predictions
# with likelihoods as well as our best prediction
else:
# The multiStepPredictions element holds the probabilities for each
# bucket
inferences[InferenceElement.multiStepPredictions][steps] = (
likelihoodsDict)
inferences[InferenceElement.multiStepBestPredictions][steps] = (
bestActValue)
inferences[InferenceElement.multiStepBucketLikelihoods][steps] = (
bucketLikelihood)
return inferences
@classmethod
def _removeUnlikelyPredictions(cls, likelihoodsDict, minLikelihoodThreshold,
maxPredictionsPerStep):
"""Remove entries with 0 likelihood or likelihood less than
minLikelihoodThreshold, but don't leave an empty dict.
"""
maxVal = (None, None)
for (k, v) in likelihoodsDict.items():
if len(likelihoodsDict) <= 1:
break
if maxVal[0] is None or v >= maxVal[1]:
if maxVal[0] is not None and maxVal[1] < minLikelihoodThreshold:
del likelihoodsDict[maxVal[0]]
maxVal = (k, v)
elif v < minLikelihoodThreshold:
del likelihoodsDict[k]
# Limit the number of predictions to include.
likelihoodsDict = dict(sorted(likelihoodsDict.iteritems(),
key=itemgetter(1),
reverse=True)[:maxPredictionsPerStep])
return likelihoodsDict
def getRuntimeStats(self):
"""
Only returns data for a stat called ``numRunCalls``.
:return:
"""
ret = {"numRunCalls" : self.__numRunCalls}
#--------------------------------------------------
# Query temporal network stats
temporalStats = dict()
if self._hasTP:
for stat in self._netInfo.statsCollectors:
sdict = stat.getStats()
temporalStats.update(sdict)
ret[InferenceType.getLabel(InferenceType.TemporalNextStep)] = temporalStats
return ret
def getFieldInfo(self, includeClassifierOnlyField=False):
encoder = self._getEncoder()
fieldNames = encoder.getScalarNames()
fieldTypes = encoder.getDecoderOutputFieldTypes()
assert len(fieldNames) == len(fieldTypes)
# Also include the classifierOnly field?
encoder = self._getClassifierOnlyEncoder()
if includeClassifierOnlyField and encoder is not None:
addFieldNames = encoder.getScalarNames()
addFieldTypes = encoder.getDecoderOutputFieldTypes()
assert len(addFieldNames) == len(addFieldTypes)
fieldNames = list(fieldNames) + addFieldNames
fieldTypes = list(fieldTypes) + addFieldTypes
fieldMetaList = map(FieldMetaInfo._make,
zip(fieldNames,
fieldTypes,
itertools.repeat(FieldMetaSpecial.none)))
return tuple(fieldMetaList)
def _getLogger(self):
""" Get the logger for this object. This is a protected method that is used
by the Model to access the logger created by the subclass
return:
A logging.Logger object. Should not be None
"""
return self.__logger
def _getSPRegion(self):
"""
Returns reference to the network's SP region
"""
return self._netInfo.net.regions.get('SP', None)
def _getTPRegion(self):
"""
Returns reference to the network's TM region
"""
return self._netInfo.net.regions.get('TM', None)
def _getSensorRegion(self):
"""
Returns reference to the network's Sensor region
"""
return self._netInfo.net.regions['sensor']
def _getClassifierRegion(self):
"""
Returns reference to the network's Classifier region
"""
if (self._netInfo.net is not None and
"Classifier" in self._netInfo.net.regions):
return self._netInfo.net.regions["Classifier"]
else:
return None
def _getAnomalyClassifier(self):
return self._netInfo.net.regions.get("AnomalyClassifier", None)
def _getEncoder(self):
"""
Returns: sensor region's encoder for the given network
"""
return self._getSensorRegion().getSelf().encoder
def _getClassifierOnlyEncoder(self):
"""
Returns: sensor region's encoder that is sent only to the classifier,
not to the bottom of the network
"""
return self._getSensorRegion().getSelf().disabledEncoder
def _getDataSource(self):
"""
Returns: data source that we installed in sensor region
"""
return self._getSensorRegion().getSelf().dataSource
def __createHTMNetwork(self, sensorParams, spEnable, spParams, tmEnable,
tmParams, clEnable, clParams, anomalyParams):
""" Create a CLA network and return it.
description: HTMPredictionModel description dictionary (TODO: define schema)
Returns: NetworkInfo instance;
"""
#--------------------------------------------------
# Create the network
n = Network()
#--------------------------------------------------
# Add the Sensor
n.addRegion("sensor", "py.RecordSensor", json.dumps(dict(verbosity=sensorParams['verbosity'])))
sensor = n.regions['sensor'].getSelf()
enabledEncoders = copy.deepcopy(sensorParams['encoders'])
for name, params in enabledEncoders.items():
if params is not None:
classifierOnly = params.pop('classifierOnly', False)
if classifierOnly:
enabledEncoders.pop(name)
# Disabled encoders are encoders that are fed to SDRClassifierRegion but not
# SP or TM Regions. This is to handle the case where the predicted field
# is not fed through the SP/TM. We typically just have one of these now.
disabledEncoders = copy.deepcopy(sensorParams['encoders'])
for name, params in disabledEncoders.items():
if params is None:
disabledEncoders.pop(name)
else:
classifierOnly = params.pop('classifierOnly', False)
if not classifierOnly:
disabledEncoders.pop(name)
encoder = MultiEncoder(enabledEncoders)
sensor.encoder = encoder
sensor.disabledEncoder = MultiEncoder(disabledEncoders)
sensor.dataSource = DataBuffer()
prevRegion = "sensor"
prevRegionWidth = encoder.getWidth()
# SP is not enabled for spatial classification network
if spEnable:
spParams = spParams.copy()
spParams['inputWidth'] = prevRegionWidth
self.__logger.debug("Adding SPRegion; spParams: %r" % spParams)
n.addRegion("SP", "py.SPRegion", json.dumps(spParams))
# Link SP region
n.link("sensor", "SP", "UniformLink", "")
n.link("sensor", "SP", "UniformLink", "", srcOutput="resetOut",
destInput="resetIn")
n.link("SP", "sensor", "UniformLink", "", srcOutput="spatialTopDownOut",
destInput="spatialTopDownIn")
n.link("SP", "sensor", "UniformLink", "", srcOutput="temporalTopDownOut",
destInput="temporalTopDownIn")
prevRegion = "SP"
prevRegionWidth = spParams['columnCount']
if tmEnable:
tmParams = tmParams.copy()
if prevRegion == 'sensor':
tmParams['inputWidth'] = tmParams['columnCount'] = prevRegionWidth
else:
assert tmParams['columnCount'] == prevRegionWidth
tmParams['inputWidth'] = tmParams['columnCount']
self.__logger.debug("Adding TMRegion; tmParams: %r" % tmParams)
n.addRegion("TM", "py.TMRegion", json.dumps(tmParams))
# Link TM region
n.link(prevRegion, "TM", "UniformLink", "")
if prevRegion != "sensor":
n.link("TM", prevRegion, "UniformLink", "", srcOutput="topDownOut",
destInput="topDownIn")
else:
n.link("TM", prevRegion, "UniformLink", "", srcOutput="topDownOut",
destInput="temporalTopDownIn")
n.link("sensor", "TM", "UniformLink", "", srcOutput="resetOut",
destInput="resetIn")
prevRegion = "TM"
prevRegionWidth = tmParams['inputWidth']
if clEnable and clParams is not None:
clParams = clParams.copy()
clRegionName = clParams.pop('regionName')
self.__logger.debug("Adding %s; clParams: %r" % (clRegionName,
clParams))
n.addRegion("Classifier", "py.%s" % str(clRegionName), json.dumps(clParams))
# SDR Classifier-specific links
if str(clRegionName) == "SDRClassifierRegion":
n.link("sensor", "Classifier", "UniformLink", "", srcOutput="actValueOut",
destInput="actValueIn")
n.link("sensor", "Classifier", "UniformLink", "", srcOutput="bucketIdxOut",
destInput="bucketIdxIn")
# This applies to all (SDR and KNN) classifiers
n.link("sensor", "Classifier", "UniformLink", "", srcOutput="categoryOut",
destInput="categoryIn")
n.link(prevRegion, "Classifier", "UniformLink", "")
if self.getInferenceType() == InferenceType.TemporalAnomaly:
anomalyClParams = dict(
trainRecords=anomalyParams.get('autoDetectWaitRecords', None),
cacheSize=anomalyParams.get('anomalyCacheRecords', None)
)
self._addAnomalyClassifierRegion(n, anomalyClParams, spEnable, tmEnable)
#--------------------------------------------------
# NuPIC doesn't initialize the network until you try to run it
# but users may want to access components in a setup callback
n.initialize()
return NetworkInfo(net=n, statsCollectors=[])
def __getstate__(self):
"""
Return serializable state. This function will return a version of the
__dict__ with data that shouldn't be pickled stripped out. In particular,
the CLA Network is stripped out because it has it's own serialization
mechanism)
See also: _serializeExtraData()
"""
# Remove ephemeral member variables from state
state = self.__dict__.copy()
state["_netInfo"] = NetworkInfo(net=None,
statsCollectors=self._netInfo.statsCollectors)
for ephemeral in [self.__manglePrivateMemberName("__restoringFromState"),
self.__manglePrivateMemberName("__logger")]:
state.pop(ephemeral)
return state
def __setstate__(self, state):
"""
Set the state of ourself from a serialized state.
See also: _deSerializeExtraData
"""
self.__dict__.update(state)
# Mark beginning of restoration.
#
# self.__restoringFromState will be reset to False upon completion of
# object restoration in _deSerializeExtraData()
self.__restoringFromState = True
# set up logging
self.__logger = initLogger(self)
# =========================================================================
# TODO: Temporary migration solution
if not hasattr(self, "_Model__inferenceType"):
self.__restoringFromV1 = True
self._hasSP = True
if self.__temporalNetInfo is not None:
self._Model__inferenceType = InferenceType.TemporalNextStep
self._netInfo = self.__temporalNetInfo
self._hasTP = True
else:
raise RuntimeError("The Nontemporal inference type is not supported")
self._Model__inferenceArgs = {}
self._Model__learningEnabled = True
self._Model__inferenceEnabled = True
# Remove obsolete members
self.__dict__.pop("_HTMPredictionModel__encoderNetInfo", None)
self.__dict__.pop("_HTMPredictionModel__nonTemporalNetInfo", None)
self.__dict__.pop("_HTMPredictionModel__temporalNetInfo", None)
# -----------------------------------------------------------------------
# Migrate from v2
if not hasattr(self, "_netInfo"):
self._hasSP = False
self._hasTP = False
if self.__encoderNetInfo is not None:
self._netInfo = self.__encoderNetInfo
elif self.__nonTemporalNetInfo is not None:
self._netInfo = self.__nonTemporalNetInfo
self._hasSP = True
else:
self._netInfo = self.__temporalNetInfo
self._hasSP = True
self._hasTP = True
# Remove obsolete members
self.__dict__.pop("_HTMPredictionModel__encoderNetInfo", None)
self.__dict__.pop("_HTMPredictionModel__nonTemporalNetInfo", None)
self.__dict__.pop("_HTMPredictionModel__temporalNetInfo", None)
# This gets filled in during the first infer because it can only be
# determined at run-time
self._classifierInputEncoder = None
if not hasattr(self, '_minLikelihoodThreshold'):
self._minLikelihoodThreshold = DEFAULT_LIKELIHOOD_THRESHOLD
if not hasattr(self, '_maxPredictionsPerStep'):
self._maxPredictionsPerStep = DEFAULT_MAX_PREDICTIONS_PER_STEP
if not hasattr(self, '_hasCL'):
self._hasCL = (self._getClassifierRegion() is not None)
self.__logger.debug("Restoring %s from state..." % self.__class__.__name__)
@staticmethod
def getSchema():
return HTMPredictionModelProto
def write(self, proto):
"""
:param proto: capnp HTMPredictionModelProto message builder
"""
super(HTMPredictionModel, self).writeBaseToProto(proto.modelBase)
proto.numRunCalls = self.__numRunCalls
proto.minLikelihoodThreshold = self._minLikelihoodThreshold
proto.maxPredictionsPerStep = self._maxPredictionsPerStep
self._netInfo.net.write(proto.network)
proto.spLearningEnabled = self.__spLearningEnabled
proto.tpLearningEnabled = self.__tpLearningEnabled
if self._predictedFieldIdx is None:
proto.predictedFieldIdx.none = None
else:
proto.predictedFieldIdx.value = self._predictedFieldIdx
if self._predictedFieldName is None:
proto.predictedFieldName.none = None
else:
proto.predictedFieldName.value = self._predictedFieldName
if self._numFields is None:
proto.numFields.none = None
else:
proto.numFields.value = self._numFields
proto.trainSPNetOnlyIfRequested = self.__trainSPNetOnlyIfRequested
proto.finishedLearning = self.__finishedLearning
@classmethod
def read(cls, proto):
"""
:param proto: capnp HTMPredictionModelProto message reader
"""
obj = object.__new__(cls)
# model.capnp
super(HTMPredictionModel, obj).__init__(proto=proto.modelBase)
# HTMPredictionModelProto.capnp
obj._minLikelihoodThreshold = round(proto.minLikelihoodThreshold,
EPSILON_ROUND)
obj._maxPredictionsPerStep = proto.maxPredictionsPerStep
network = Network.read(proto.network)
obj._hasSP = ("SP" in network.regions)
obj._hasTP = ("TM" in network.regions)
obj._hasCL = ("Classifier" in network.regions)
obj._netInfo = NetworkInfo(net=network, statsCollectors=[])
obj.__spLearningEnabled = bool(proto.spLearningEnabled)
obj.__tpLearningEnabled = bool(proto.tpLearningEnabled)
obj.__numRunCalls = proto.numRunCalls
obj._classifierInputEncoder = None
if proto.predictedFieldIdx.which() == "none":
obj._predictedFieldIdx = None
else:
obj._predictedFieldIdx = proto.predictedFieldIdx.value
if proto.predictedFieldName.which() == "none":
obj._predictedFieldName = None
else:
obj._predictedFieldName = proto.predictedFieldName.value
obj._numFields = proto.numFields
if proto.numFields.which() == "none":
obj._numFields = None
else:
obj._numFields = proto.numFields.value
obj.__trainSPNetOnlyIfRequested = proto.trainSPNetOnlyIfRequested
obj.__finishedLearning = proto.finishedLearning
obj._input = None
sensor = network.regions['sensor'].getSelf()
sensor.dataSource = DataBuffer()
network.initialize()
obj.__logger = initLogger(obj)
obj.__logger.debug("Instantiating %s." % obj.__myClassName)
# Mark end of restoration from state
obj.__restoringFromState = False
obj.__restoringFromV1 = False
return obj
def _serializeExtraData(self, extraDataDir):
""" [virtual method override] This method is called during serialization
with an external directory path that can be used to bypass pickle for saving
large binary states.
extraDataDir:
Model's extra data directory path
"""
makeDirectoryFromAbsolutePath(extraDataDir)
#--------------------------------------------------
# Save the network
outputDir = self.__getNetworkStateDirectory(extraDataDir=extraDataDir)
self.__logger.debug("Serializing network...")
self._netInfo.net.save(outputDir)
self.__logger.debug("Finished serializing network")
return
def _deSerializeExtraData(self, extraDataDir):
""" [virtual method override] This method is called during deserialization
(after __setstate__) with an external directory path that can be used to
bypass pickle for loading large binary states.
extraDataDir:
Model's extra data directory path
"""
assert self.__restoringFromState
#--------------------------------------------------
# Check to make sure that our Network member wasn't restored from
# serialized data
assert (self._netInfo.net is None), "Network was already unpickled"
#--------------------------------------------------
# Restore the network
stateDir = self.__getNetworkStateDirectory(extraDataDir=extraDataDir)
self.__logger.debug(
"(%s) De-serializing network...", self)
self._netInfo.net = Network(stateDir)
self.__logger.debug(
"(%s) Finished de-serializing network", self)
# NuPIC doesn't initialize the network until you try to run it
# but users may want to access components in a setup callback
self._netInfo.net.initialize()
# Used for backwards compatibility for anomaly classification models.
# Previous versions used the HTMPredictionModelClassifierHelper class for utilizing
# the KNN classifier. Current version uses KNNAnomalyClassifierRegion to
# encapsulate all the classifier functionality.
if self.getInferenceType() == InferenceType.TemporalAnomaly:
classifierType = self._getAnomalyClassifier().getSelf().__class__.__name__
if classifierType is 'KNNClassifierRegion':
anomalyClParams = dict(
trainRecords=self._classifier_helper._autoDetectWaitRecords,
cacheSize=self._classifier_helper._history_length,
)
spEnable = (self._getSPRegion() is not None)
tmEnable = True
# Store original KNN region
knnRegion = self._getAnomalyClassifier().getSelf()
# Add new KNNAnomalyClassifierRegion
self._addAnomalyClassifierRegion(self._netInfo.net, anomalyClParams,
spEnable, tmEnable)
# Restore state
self._getAnomalyClassifier().getSelf()._iteration = self.__numRunCalls
self._getAnomalyClassifier().getSelf()._recordsCache = (
self._classifier_helper.saved_states)
self._getAnomalyClassifier().getSelf().saved_categories = (
self._classifier_helper.saved_categories)
self._getAnomalyClassifier().getSelf()._knnclassifier = knnRegion
# Set TM to output neccessary information
self._getTPRegion().setParameter('anomalyMode', True)
# Remove old classifier_helper
del self._classifier_helper
self._netInfo.net.initialize()
#--------------------------------------------------
# Mark end of restoration from state
self.__restoringFromState = False
self.__logger.debug("(%s) Finished restoring from state", self)
return
def _addAnomalyClassifierRegion(self, network, params, spEnable, tmEnable):
"""
Attaches an 'AnomalyClassifier' region to the network. Will remove current
'AnomalyClassifier' region if it exists.
Parameters
-----------
network - network to add the AnomalyClassifier region
params - parameters to pass to the region
spEnable - True if network has an SP region
tmEnable - True if network has a TM region; Currently requires True
"""
allParams = copy.deepcopy(params)
knnParams = dict(k=1,
distanceMethod='rawOverlap',
distanceNorm=1,
doBinarization=1,
replaceDuplicates=0,
maxStoredPatterns=1000)
allParams.update(knnParams)
# Set defaults if not set
if allParams['trainRecords'] is None:
allParams['trainRecords'] = DEFAULT_ANOMALY_TRAINRECORDS
if allParams['cacheSize'] is None:
allParams['cacheSize'] = DEFAULT_ANOMALY_CACHESIZE
# Remove current instance if already created (used for deserializing)
if self._netInfo is not None and self._netInfo.net is not None \
and self._getAnomalyClassifier() is not None:
self._netInfo.net.removeRegion('AnomalyClassifier')
network.addRegion("AnomalyClassifier",
"py.KNNAnomalyClassifierRegion",
json.dumps(allParams))
# Attach link to SP
if spEnable:
network.link("SP", "AnomalyClassifier", "UniformLink", "",
srcOutput="bottomUpOut", destInput="spBottomUpOut")
else:
network.link("sensor", "AnomalyClassifier", "UniformLink", "",
srcOutput="dataOut", destInput="spBottomUpOut")
# Attach link to TM
if tmEnable:
network.link("TM", "AnomalyClassifier", "UniformLink", "",
srcOutput="topDownOut", destInput="tpTopDownOut")
network.link("TM", "AnomalyClassifier", "UniformLink", "",
srcOutput="lrnActiveStateT", destInput="tpLrnActiveStateT")
else:
raise RuntimeError("TemporalAnomaly models require a TM region.")
def __getNetworkStateDirectory(self, extraDataDir):
"""
extraDataDir:
Model's extra data directory path
Returns: Absolute directory path for saving CLA Network
"""
if self.__restoringFromV1:
if self.getInferenceType() == InferenceType.TemporalNextStep:
leafName = 'temporal'+ "-network.nta"
else:
leafName = 'nonTemporal'+ "-network.nta"
else:
leafName = InferenceType.getLabel(self.getInferenceType()) + "-network.nta"
path = os.path.join(extraDataDir, leafName)
path = os.path.abspath(path)
return path
def __manglePrivateMemberName(self, privateMemberName, skipCheck=False):
""" Mangles the given mangled (private) member name; a mangled member name
is one whose name begins with two or more underscores and ends with one
or zero underscores.
privateMemberName:
The private member name (e.g., "__logger")
skipCheck: Pass True to skip test for presence of the demangled member
in our instance.
Returns: The demangled member name (e.g., "_HTMPredictionModel__logger")
"""
assert privateMemberName.startswith("__"), \
"%r doesn't start with __" % privateMemberName
assert not privateMemberName.startswith("___"), \
"%r starts with ___" % privateMemberName
assert not privateMemberName.endswith("__"), \
"%r ends with more than one underscore" % privateMemberName
realName = "_" + (self.__myClassName).lstrip("_") + privateMemberName
if not skipCheck:
# This will throw an exception if the member is missing
getattr(self, realName)
return realName
class DataBuffer(object):
"""
A simple FIFO stack. Add data when it's available, and
implement getNextRecordDict() so DataBuffer can be used as a DataSource
in a CLA Network.
Currently, DataBuffer requires the stack to contain 0 or 1 records.
This requirement may change in the future, and is trivially supported
by removing the assertions.
"""
def __init__(self):
self.stack = []
def push(self, data):
assert len(self.stack) == 0
# Copy the data, because sensor's pre-encoding filters (e.g.,
# AutoResetFilter) may modify it. Our caller relies on the input record
# remaining unmodified.
data = data.__class__(data)
self.stack.append(data)
def getNextRecordDict(self):
assert len(self.stack) > 0
return self.stack.pop()
| 59,523 | Python | .py | 1,284 | 38.813084 | 99 | 0.672761 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,969 | model_callbacks.py | numenta_nupic-legacy/src/nupic/frameworks/opf/model_callbacks.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# Generic model experiment task callbacks that may be used
# in setup, postIter, and finish callback lists
def modelControlFinishLearningCb(model):
""" Passes the "finish learning" command to the model. NOTE: Upon completion
of this command, learning may not be resumed on the given instance of
the model (e.g., the implementation may prune data structures that are
necessary for learning)
model: pointer to the Model instance
Returns: nothing
"""
model.finishLearning()
return
| 1,482 | Python | .py | 32 | 44.5 | 79 | 0.704577 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,970 | opf_utils.py | numenta_nupic-legacy/src/nupic/frameworks/opf/opf_utils.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# This file contains utility functions that are used
# internally by the prediction framework. It should not be
# imported by description files. (see helpers.py)
import os
import inspect
import logging
import re
from collections import namedtuple
import nupic.data.json_helpers as jsonhelpers
from nupic.support.enum import Enum
class InferenceElement(Enum(
prediction="prediction",
encodings="encodings",
classification="classification",
anomalyScore="anomalyScore",
anomalyLabel="anomalyLabel",
classConfidences="classConfidences",
multiStepPredictions="multiStepPredictions",
multiStepBestPredictions="multiStepBestPredictions",
multiStepBucketLikelihoods="multiStepBucketLikelihoods",
multiStepBucketValues="multiStepBucketValues",
)):
"""
The concept of InferenceElements is a key part of the OPF. A model's inference
may have multiple parts to it. For example, a model may output both a
prediction and an anomaly score. Models output their set of inferences as a
dictionary that is keyed by the enumerated type InferenceElement. Each entry
in an inference dictionary is considered a separate inference element, and is
handled independently by the OPF.
"""
__inferenceInputMap = {
"prediction": "dataRow",
"encodings": "dataEncodings",
"classification": "category",
"classConfidences": "category",
"multiStepPredictions": "dataDict",
"multiStepBestPredictions": "dataDict",
}
__temporalInferenceElements = None
@staticmethod
def getInputElement(inferenceElement):
"""
Get the sensor input element that corresponds to the given inference
element. This is mainly used for metrics and prediction logging
:param inferenceElement: (:class:`.InferenceElement`)
:return: (string) name of sensor input element
"""
return InferenceElement.__inferenceInputMap.get(inferenceElement, None)
@staticmethod
def isTemporal(inferenceElement):
"""
.. note:: This should only be checked IF THE MODEL'S INFERENCE TYPE IS ALSO
TEMPORAL. That is, a temporal model CAN have non-temporal inference
elements, but a non-temporal model CANNOT have temporal inference
elements.
:param inferenceElement: (:class:`.InferenceElement`)
:return: (bool) ``True`` if the inference from this time step is predicted
the input for the NEXT time step.
"""
if InferenceElement.__temporalInferenceElements is None:
InferenceElement.__temporalInferenceElements = \
set([InferenceElement.prediction])
return inferenceElement in InferenceElement.__temporalInferenceElements
@staticmethod
def getTemporalDelay(inferenceElement, key=None):
"""
:param inferenceElement: (:class:`.InferenceElement`) value being delayed
:param key: (string) If the inference is a dictionary type, this specifies
key for the sub-inference that is being delayed.
:return: (int) the number of records that elapse between when an inference
is made and when the corresponding input record will appear. For
example, a multistep prediction for 3 timesteps out will have a
delay of 3.
"""
# -----------------------------------------------------------------------
# For next step prediction, we shift by 1
if inferenceElement in (InferenceElement.prediction,
InferenceElement.encodings):
return 1
# -----------------------------------------------------------------------
# For classification, anomaly scores, the inferences immediately succeed the
# inputs
if inferenceElement in (InferenceElement.anomalyScore,
InferenceElement.anomalyLabel,
InferenceElement.classification,
InferenceElement.classConfidences):
return 0
# -----------------------------------------------------------------------
# For multistep prediction, the delay is based on the key in the inference
# dictionary
if inferenceElement in (InferenceElement.multiStepPredictions,
InferenceElement.multiStepBestPredictions,
InferenceElement.multiStepBucketLikelihoods):
return int(key)
# -----------------------------------------------------------------------
# default: return 0
return 0
@staticmethod
def getMaxDelay(inferences):
"""
:param inferences: (dict) where the keys are :class:`.InferenceElement`
objects.
:return: (int) the maximum delay for the :class:`.InferenceElement` objects
in the inference dictionary.
"""
maxDelay = 0
for inferenceElement, inference in inferences.iteritems():
if isinstance(inference, dict):
for key in inference.iterkeys():
maxDelay = max(InferenceElement.getTemporalDelay(inferenceElement,
key),
maxDelay)
else:
maxDelay = max(InferenceElement.getTemporalDelay(inferenceElement),
maxDelay)
return maxDelay
class InferenceType(Enum("TemporalNextStep",
"TemporalClassification",
"NontemporalClassification",
"TemporalAnomaly",
"NontemporalAnomaly",
"TemporalMultiStep",
"NontemporalMultiStep")):
"""
Enum: one of the following:
- ``TemporalNextStep``
- ``TemporalClassification``
- ``NontemporalClassification``
- ``TemporalAnomaly``
- ``NontemporalAnomaly``
- ``TemporalMultiStep``
- ``NontemporalMultiStep``
"""
__temporalInferenceTypes = None
@staticmethod
def isTemporal(inferenceType):
"""
:param inferenceType: (:class:`.InferenceType`)
:return: (bool) `True` if the inference type is 'temporal', i.e. requires a
temporal memory in the network.
"""
if InferenceType.__temporalInferenceTypes is None:
InferenceType.__temporalInferenceTypes = \
set([InferenceType.TemporalNextStep,
InferenceType.TemporalClassification,
InferenceType.TemporalAnomaly,
InferenceType.TemporalMultiStep,
InferenceType.NontemporalMultiStep])
return inferenceType in InferenceType.__temporalInferenceTypes
class SensorInput(object):
"""
Represents the mapping of a given inputRecord by the sensor region's encoder.
This represents the input record, as it appears right before it is encoded.
This may differ from the raw input in that certain input fields (such as
DateTime fields) may be split into multiple encoded fields.
:param dataRow: A data row that is the sensor's ``sourceOut`` mapping of the
supplied inputRecord.
:param dataEncodings: A list of the corresponding bit-array encodings of each
value in "dataRow"
:param sequenceReset: The sensor's "resetOut" signal (0 or 1) emitted by the
sensor's compute logic on the supplied inputRecord; provided
for analysis and diagnostics.
:param dataDict: The raw encoded input to the sensor
:param category: the categoryOut on the sensor region
"""
__slots__ = (
"dataRow", "dataDict", "dataEncodings", "sequenceReset", "category"
)
def __init__(self, dataRow=None, dataDict=None, dataEncodings=None,
sequenceReset=None, category=None):
self.dataRow = dataRow
self.dataDict = dataDict
self.dataEncodings = dataEncodings
self.sequenceReset = sequenceReset
self.category = category
def __repr__(self):
return "SensorInput("\
"\tdataRow={0}\n"\
"\tdataDict={1}\n"\
"\tdataEncodings={2}\n"\
"\tsequenceReset={3}\n"\
"\tcategory={4}\n"\
")".format(self.dataRow,
self.dataDict,
self.dataEncodings,
self.sequenceReset,
self.category)
def _asdict(self):
return dict(dataRow=self.dataRow,
dataDict=self.dataDict,
dataEncodings=self.dataEncodings,
sequenceReset=self.sequenceReset,
category=self.category)
class ClassifierInput(object):
"""
Represents the mapping of a given inputRecord by the classifier input encoder.
:param dataRow: A data row that is the sensor's "sourceOut" mapping of the
supplied inputRecord. See :class:`.SensorInput` class for
additional details.
:param bucketIndex: (int) the classifier input encoder's mapping of the
dataRow.
"""
__slots__ = ("dataRow", "bucketIndex")
def __init__(self, dataRow=None, bucketIndex=None):
self.dataRow = dataRow
self.bucketIndex = bucketIndex
def __repr__(self):
return "ClassifierInput("\
"\tdataRow={0}\n"\
"\tbucketIndex={1}\n"\
")".format(self.dataRow,
self.bucketIndex)
def _asdict(self):
return dict(dataRow=self.dataRow,
bucketIndex=self.bucketIndex)
class ModelResult(object):
"""
A structure that contains the input to a model and the resulting predictions
as well as any related information related to the predictions.
All params below are accesses as properties of the ModelResult object.
:param predictionNumber: (int) This should start at 0 and increase with each
new ModelResult.
:param rawInput: (object) The input record, as input by the user. This is a
dictionary-like object which has attributes whose names are the same as
the input field names
:param sensorInput: (:class:`~.SensorInput`) object that represents the input
record
:param inferences: (dict) Each key is a :class:`~.InferenceType` constant
which corresponds to the type of prediction being made. Each value is
a an element that corresponds to the actual prediction by the model,
including auxillary information.
:param metrics: (:class:`~nupic.frameworks.opf.metrics.MetricsIface`)
The metrics corresponding to the most-recent prediction/ground truth
pair
:param predictedFieldIdx: (int) predicted field index
:param predictedFieldName: (string) predicted field name
:param classifierInput: (:class:`.ClassifierInput`) input from classifier
"""
__slots__= ("predictionNumber", "rawInput", "sensorInput", "inferences",
"metrics", "predictedFieldIdx", "predictedFieldName",
"classifierInput")
def __init__(self,
predictionNumber=None,
rawInput=None,
sensorInput=None,
inferences=None,
metrics=None,
predictedFieldIdx=None,
predictedFieldName=None,
classifierInput=None):
self.predictionNumber = predictionNumber
self.rawInput = rawInput
self.sensorInput = sensorInput
self.inferences = inferences
self.metrics = metrics
self.predictedFieldIdx = predictedFieldIdx
self.predictedFieldName = predictedFieldName
self.classifierInput = classifierInput
def __repr__(self):
return ("ModelResult("
"\tpredictionNumber={0}\n"
"\trawInput={1}\n"
"\tsensorInput={2}\n"
"\tinferences={3}\n"
"\tmetrics={4}\n"
"\tpredictedFieldIdx={5}\n"
"\tpredictedFieldName={6}\n"
"\tclassifierInput={7}\n"
")").format(self.predictionNumber,
self.rawInput,
self.sensorInput,
self.inferences,
self.metrics,
self.predictedFieldIdx,
self.predictedFieldName,
self.classifierInput)
def validateOpfJsonValue(value, opfJsonSchemaFilename):
"""
Validate a python object against an OPF json schema file
:param value: target python object to validate (typically a dictionary)
:param opfJsonSchemaFilename: (string) OPF json schema filename containing the
json schema object. (e.g., opfTaskControlSchema.json)
:raises: jsonhelpers.ValidationError when value fails json validation
"""
# Create a path by joining the filename with our local json schema root
jsonSchemaPath = os.path.join(os.path.dirname(__file__),
"jsonschema",
opfJsonSchemaFilename)
# Validate
jsonhelpers.validate(value, schemaPath=jsonSchemaPath)
return
def initLogger(obj):
"""
Helper function to create a logger object for the current object with
the standard Numenta prefix.
:param obj: (object) to add a logger to
"""
if inspect.isclass(obj):
myClass = obj
else:
myClass = obj.__class__
logger = logging.getLogger(".".join(
['com.numenta', myClass.__module__, myClass.__name__]))
return logger
def matchPatterns(patterns, keys):
"""
Returns a subset of the keys that match any of the given patterns
:param patterns: (list) regular expressions to match
:param keys: (list) keys to search for matches
"""
results = []
if patterns:
for pattern in patterns:
prog = re.compile(pattern)
for key in keys:
if prog.match(key):
results.append(key)
else:
return None
return results
| 14,839 | Python | .py | 340 | 34.973529 | 80 | 0.641763 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,971 | exceptions.py | numenta_nupic-legacy/src/nupic/frameworks/opf/exceptions.py |
class HTMPredictionModelException(Exception):
"""
Base exception class for
:class:`~nupic.frameworks.opf.htm_prediction_model.HTMPredictionModel`
exceptions.
:param errorString: (string) Error code/msg: e.g., "Invalid request
object."
:param debugInfo: (object) An optional sequence of debug information; must
be convertible to JSON; pass None to ignore.
"""
def __init__(self, errorString, debugInfo=None):
super(HTMPredictionModelException, self).__init__(errorString, debugInfo)
self.errorString = errorString
self.debugInfo = debugInfo
return
class HTMPredictionModelInvalidArgument(HTMPredictionModelException):
"""
Raised when a supplied value to a method is invalid.
"""
pass
class HTMPredictionModelInvalidRangeError(HTMPredictionModelException):
"""
Raised when supplied ranges to a method are invalid.
"""
pass
| 919 | Python | .py | 25 | 31.84 | 77 | 0.742081 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,972 | opf_environment.py | numenta_nupic-legacy/src/nupic/frameworks/opf/opf_environment.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file describes the interfaces for adapting
:class:`~nupic.frameworks.opf.opf_task_driver.OPFTaskDriver` to specific
environments.
These interfaces encapsulate external specifics, such as
data source (e.g., .csv file or database, etc.), prediction sink (.csv file or
databse, etc.), report and serialization destination, etc.
"""
from abc import ABCMeta, abstractmethod
from collections import namedtuple
class PredictionLoggerIface(object):
"""
This class defines the interface for OPF prediction logger implementations.
"""
__metaclass__ = ABCMeta
@abstractmethod
def close(self):
""" Closes connect to output store and cleans up any resources associated
with writing.
"""
@abstractmethod
def writeRecord(self, modelResult):
""" Emits a set of inputs data, inferences, and metrics from a model
resulting from a single record.
:param modelResult: (:class:`nupic.frameworks.opf.opf_utils.ModelResult`)
contains the model input and output for the current timestep.
"""
@abstractmethod
def writeRecords(self, modelResults, progressCB=None):
"""
Same as :meth:`writeRecord`, but emits multiple rows in one shot.
:param modelResults: (list) of
:class:`nupic.frameworks.opf.opf_utils.ModelResult` objects, each
represents one record.
:param progressCB: (func) optional callback method that will be called after
each batch of records is written.
"""
@abstractmethod
def setLoggedMetrics(self, metricNames):
""" Sets which metrics should be written to the prediction log.
:param metricNames: (list) metric names that match the labels of the
metrics that should be written to the prediction log
"""
@abstractmethod
def checkpoint(self, checkpointSink, maxRows):
""" Save a checkpoint of the prediction output stream. The checkpoint
comprises up to maxRows of the most recent inference records.
:param checkpointSink: A File-like object where predictions checkpoint data,
if any, will be stored.
:param maxRows: (int) Maximum number of most recent inference rows to
checkpoint.
"""
# PredictionLoggingElement class
#
# This named tuple class defines an element in the sequence of predictions
# that are passed to PredictionLoggerIface.emit()
#
# predictionKind: A PredictionKind constant representing this prediction
# predictionRow: A sequence (list, tuple, or nupic array) of field values
# comprising the prediction. The fields are in the order as
# described for the inputRecordSensorMappings arg of the
# PredictionLoggerIface.__call__ method
PredictionLoggingElement = namedtuple("PredictionLoggingElement",
("predictionKind", "predictionRow",
"classification"))
| 3,900 | Python | .py | 85 | 41.411765 | 80 | 0.703137 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,973 | model.py | numenta_nupic-legacy/src/nupic/frameworks/opf/model.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Module defining the OPF Model base class."""
import cPickle as pickle
import json
import os
import shutil
from abc import ABCMeta, abstractmethod
from nupic.frameworks.opf.opf_utils import InferenceType
import nupic.frameworks.opf.opf_utils as opf_utils
from nupic.serializable import Serializable
# Capnp reader traveral limit (see capnp::ReaderOptions)
_TRAVERSAL_LIMIT_IN_WORDS = 1 << 63
class Model(Serializable):
""" This is the base class that all OPF Model implementations should
subclass.
It includes a number of virtual methods, to be overridden by subclasses,
as well as some shared functionality for saving/loading models
:param inferenceType: (:class:`~nupic.frameworks.opf.opf_utils.InferenceType`)
A value that specifies the type of inference.
"""
__metaclass__ = ABCMeta
def __init__(self, inferenceType=None, proto=None):
"""
:param opf_utils.InferenceType inferenceType: mutually-exclusive with proto
arg
:param proto: capnp ModelProto message reader for deserializing;
mutually-exclusive with the other constructor args.
"""
assert inferenceType is not None and proto is None or (
inferenceType is None and proto is not None), (
"proto and other constructor args are mutually exclusive")
if proto is None:
self._numPredictions = 0
self.__inferenceType = inferenceType
self.__learningEnabled = True
self.__inferenceEnabled = True
self.__inferenceArgs = {}
else:
self._numPredictions = proto.numPredictions
inferenceType = str(proto.inferenceType)
# upper-case first letter to be compatible with enum InferenceType naming
inferenceType = inferenceType[:1].upper() + inferenceType[1:]
self.__inferenceType = InferenceType.getValue(inferenceType)
self.__learningEnabled = proto.learningEnabled
self.__inferenceEnabled = proto.inferenceEnabled
self.__inferenceArgs = json.loads(proto.inferenceArgs)
def run(self, inputRecord):
"""
Run one iteration of this model.
:param inputRecord: (object)
A record object formatted according to
:meth:`~nupic.data.record_stream.RecordStreamIface.getNextRecord` or
:meth:`~nupic.data.record_stream.RecordStreamIface.getNextRecordDict`
result format.
:returns: (:class:`~nupic.frameworks.opf.opf_utils.ModelResult`)
An ModelResult namedtuple. The contents of ModelResult.inferences
depends on the the specific inference type of this model, which
can be queried by :meth:`.getInferenceType`.
"""
# 0-based prediction index for ModelResult
predictionNumber = self._numPredictions
self._numPredictions += 1
result = opf_utils.ModelResult(predictionNumber=predictionNumber,
rawInput=inputRecord)
return result
@abstractmethod
def finishLearning(self):
""" Place the model in a permanent "finished learning" mode.
In such a mode the model will not be able to learn from subsequent input
records.
.. note:: Upon completion of this command, learning may not be resumed on
the given instance of the model (e.g., the implementation may optimize
itself by pruning data structures that are necessary for learning).
"""
@abstractmethod
def resetSequenceStates(self):
"""
Signal that the input record is the start of a new sequence. Normally called
to force the delineation of a sequence, such as between OPF tasks.
"""
@abstractmethod
def getFieldInfo(self, includeClassifierOnlyField=False):
"""
Return the sequence of :class:`~nupic.data.field_meta.FieldMetaInfo` objects
specifying the format of Model's output.
This may be different than the list of
:class:`~nupic.data.field_meta.FieldMetaInfo` objects supplied at
initialization (e.g., due to the transcoding of some input fields into
meta-fields, such as datetime -> dayOfWeek, timeOfDay, etc.).
:param includeClassifierOnlyField: (bool)
If True, any field which is only sent to the classifier (i.e. not
sent in to the bottom of the network) is also included
:returns: (list) of :class:`~nupic.data.field_meta.FieldMetaInfo` objects.
"""
@abstractmethod
def setFieldStatistics(self, fieldStats):
""" Propagate field statistics to the model in case some of its machinery
needs it.
:param fieldStats: (dict)
A dict of dicts with first key being the fieldname and the second
key is min,max or other supported statistics.
"""
@abstractmethod
def getRuntimeStats(self):
""" Get runtime statistics specific to this model, i.e.
``activeCellOverlapAvg``.
:returns: (dict) A {statistic names: stats} dictionary
"""
@abstractmethod
def _getLogger(self):
""" Get the logger for this object.
This is a protected method that is used by the ModelBase to access the
logger created by the subclass.
:returns: (Logger) A Logger object, it should not be None.
"""
###############################################################################
# Common learning/inference methods
###############################################################################
def getInferenceType(self):
""" Return the InferenceType of this model.
This is immutable.
:returns: :class:`~nupic.frameworks.opf.opf_utils.InferenceType`
"""
return self.__inferenceType
def enableLearning(self):
""" Turn Learning on for the current model. """
self.__learningEnabled = True
return
def disableLearning(self):
""" Turn Learning off for the current model. """
self.__learningEnabled = False
return
def isLearningEnabled(self):
""" Return the Learning state of the current model.
:returns: (bool) The learning state
"""
return self.__learningEnabled
def enableInference(self, inferenceArgs=None):
""" Enable inference for this model.
:param inferenceArgs: (dict)
A dictionary of arguments required for inference. These depend on
the InferenceType of the current model
"""
self.__inferenceEnabled = True
self.__inferenceArgs = inferenceArgs
def getInferenceArgs(self):
""" Return the dict of arguments for the current inference mode.
:returns: (dict) The arguments of the inference mode
"""
return self.__inferenceArgs
def disableInference(self):
""" Turn Inference off for the current model. """
self.__inferenceEnabled = False
def isInferenceEnabled(self):
""" Return the inference state of the current model.
:returns: (bool) The inference state
"""
return self.__inferenceEnabled
@staticmethod
def getSchema():
"""Return the pycapnp proto type that the class uses for serialization.
This is used to convert the proto into the proper type before passing it
into the read or write method of the subclass.
"""
raise NotImplementedError()
@staticmethod
def _getModelCheckpointFilePath(checkpointDir):
""" Return the absolute path of the model's checkpoint file.
:param checkpointDir: (string)
Directory of where the experiment is to be or was saved
:returns: (string) An absolute path.
"""
path = os.path.join(checkpointDir, "model.data")
path = os.path.abspath(path)
return path
def writeToCheckpoint(self, checkpointDir):
"""Serializes model using capnproto and writes data to ``checkpointDir``"""
proto = self.getSchema().new_message()
self.write(proto)
checkpointPath = self._getModelCheckpointFilePath(checkpointDir)
# Clean up old saved state, if any
if os.path.exists(checkpointDir):
if not os.path.isdir(checkpointDir):
raise Exception(("Existing filesystem entry <%s> is not a model"
" checkpoint -- refusing to delete (not a directory)") \
% checkpointDir)
if not os.path.isfile(checkpointPath):
raise Exception(("Existing filesystem entry <%s> is not a model"
" checkpoint -- refusing to delete"\
" (%s missing or not a file)") % \
(checkpointDir, checkpointPath))
shutil.rmtree(checkpointDir)
# Create a new directory for saving state
self.__makeDirectoryFromAbsolutePath(checkpointDir)
with open(checkpointPath, 'wb') as f:
proto.write(f)
@classmethod
def readFromCheckpoint(cls, checkpointDir):
"""Deserializes model from checkpointDir using capnproto"""
checkpointPath = cls._getModelCheckpointFilePath(checkpointDir)
with open(checkpointPath, 'r') as f:
proto = cls.getSchema().read(f,
traversal_limit_in_words=_TRAVERSAL_LIMIT_IN_WORDS)
model = cls.read(proto)
return model
def writeBaseToProto(self, proto):
"""Save the state maintained by the Model base class
:param proto: capnp ModelProto message builder
"""
inferenceType = self.getInferenceType()
# lower-case first letter to be compatible with capnproto enum naming
inferenceType = inferenceType[:1].lower() + inferenceType[1:]
proto.inferenceType = inferenceType
proto.numPredictions = self._numPredictions
proto.learningEnabled = self.__learningEnabled
proto.inferenceEnabled = self.__inferenceEnabled
proto.inferenceArgs = json.dumps(self.__inferenceArgs)
def write(self, proto):
"""Write state to proto object.
The type of proto is determined by :meth:`getSchema`.
"""
raise NotImplementedError()
@classmethod
def read(cls, proto):
"""Read state from proto object.
The type of proto is determined by :meth:`getSchema`.
"""
raise NotImplementedError()
###############################################################################
# Implementation of common save/load functionality
###############################################################################
def save(self, saveModelDir):
""" Save the model in the given directory.
:param saveModelDir: (string)
Absolute directory path for saving the model. This directory should
only be used to store a saved model. If the directory does not exist,
it will be created automatically and populated with model data. A
pre-existing directory will only be accepted if it contains previously
saved model data. If such a directory is given, the full contents of
the directory will be deleted and replaced with current model data.
"""
logger = self._getLogger()
logger.debug("(%s) Creating local checkpoint in %r...",
self, saveModelDir)
modelPickleFilePath = self._getModelPickleFilePath(saveModelDir)
# Clean up old saved state, if any
if os.path.exists(saveModelDir):
if not os.path.isdir(saveModelDir):
raise Exception(("Existing filesystem entry <%s> is not a model"
" checkpoint -- refusing to delete (not a directory)") \
% saveModelDir)
if not os.path.isfile(modelPickleFilePath):
raise Exception(("Existing filesystem entry <%s> is not a model"
" checkpoint -- refusing to delete"\
" (%s missing or not a file)") % \
(saveModelDir, modelPickleFilePath))
shutil.rmtree(saveModelDir)
# Create a new directory for saving state
self.__makeDirectoryFromAbsolutePath(saveModelDir)
with open(modelPickleFilePath, 'wb') as modelPickleFile:
logger.debug("(%s) Pickling Model instance...", self)
pickle.dump(self, modelPickleFile, protocol=pickle.HIGHEST_PROTOCOL)
logger.debug("(%s) Finished pickling Model instance", self)
# Tell the model to save extra data, if any, that's too big for pickling
self._serializeExtraData(extraDataDir=self._getModelExtraDataDir(saveModelDir))
logger.debug("(%s) Finished creating local checkpoint", self)
return
def _serializeExtraData(self, extraDataDir):
""" Protected method that is called during serialization with an external
directory path. It can be overridden by subclasses to bypass pickle for
saving large binary states.
This is called by ModelBase only.
:param extraDataDir: (string) Model's extra data directory path
"""
pass
@classmethod
def load(cls, savedModelDir):
""" Load saved model.
:param savedModelDir: (string)
Directory of where the experiment is to be or was saved
:returns: (:class:`Model`) The loaded model instance
"""
logger = opf_utils.initLogger(cls)
logger.debug("Loading model from local checkpoint at %r...", savedModelDir)
# Load the model
modelPickleFilePath = Model._getModelPickleFilePath(savedModelDir)
with open(modelPickleFilePath, 'rb') as modelPickleFile:
logger.debug("Unpickling Model instance...")
model = pickle.load(modelPickleFile)
logger.debug("Finished unpickling Model instance")
# Tell the model to load extra data, if any, that was too big for pickling
model._deSerializeExtraData(
extraDataDir=Model._getModelExtraDataDir(savedModelDir))
logger.debug("Finished Loading model from local checkpoint")
return model
def _deSerializeExtraData(self, extraDataDir):
""" Protected method that is called during deserialization
(after __setstate__) with an external directory path.
It can be overridden by subclasses to bypass pickle for loading large
binary states.
This is called by ModelBase only.
:param extraDataDir: (string) Model's extra data directory path
"""
pass
@staticmethod
def _getModelPickleFilePath(saveModelDir):
""" Return the absolute path of the model's pickle file.
:param saveModelDir: (string)
Directory of where the experiment is to be or was saved
:returns: (string) An absolute path.
"""
path = os.path.join(saveModelDir, "model.pkl")
path = os.path.abspath(path)
return path
@staticmethod
def _getModelExtraDataDir(saveModelDir):
""" Return the absolute path to the directory where the model's own
"extra data" are stored (i.e., data that's too big for pickling).
:param saveModelDir: (string)
Directory of where the experiment is to be or was saved
:returns: (string) An absolute path.
"""
path = os.path.join(saveModelDir, "modelextradata")
path = os.path.abspath(path)
return path
@staticmethod
def __makeDirectoryFromAbsolutePath(absDirPath):
""" Make directory for the given directory path if it doesn't already
exist in the filesystem.
:param absDirPath: (string) Absolute path of the directory to create
@exception (Exception) OSError if directory creation fails
"""
assert os.path.isabs(absDirPath)
# Create the experiment directory
# TODO Is default mode (0777) appropriate?
try:
os.makedirs(absDirPath)
except OSError as e:
if e.errno != os.errno.EEXIST:
raise
return
| 16,243 | Python | .py | 360 | 38.825 | 86 | 0.686478 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,974 | safe_interpreter.py | numenta_nupic-legacy/src/nupic/frameworks/opf/safe_interpreter.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Safe Python interpreter for user-submitted code."""
import asteval
class SafeInterpreter(asteval.Interpreter):
blacklisted_nodes = set(('while', 'for', ))
def __init__(self, *args, **kwargs):
"""Initialize interpreter with blacklisted nodes removed from supported
nodes.
"""
self.supported_nodes = tuple(set(self.supported_nodes) -
self.blacklisted_nodes)
asteval.Interpreter.__init__(self, *args, **kwargs)
| 1,453 | Python | .py | 31 | 43.741935 | 75 | 0.666667 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,975 | helpers.py | numenta_nupic-legacy/src/nupic/frameworks/opf/helpers.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file contains utility functions that may be imported
by clients of the framework. Functions that are used only by
the prediction framework should be in opf_utils.py
TODO: Rename as helpers.py once we're ready to replace the legacy
helpers.py
"""
import imp
import os
import exp_description_api
def loadExperiment(path):
"""Loads the experiment description file from the path.
:param path: (string) The path to a directory containing a description.py file
or the file itself.
:returns: (config, control)
"""
if not os.path.isdir(path):
path = os.path.dirname(path)
descriptionPyModule = loadExperimentDescriptionScriptFromDir(path)
expIface = getExperimentDescriptionInterfaceFromModule(descriptionPyModule)
return expIface.getModelDescription(), expIface.getModelControl()
def loadExperimentDescriptionScriptFromDir(experimentDir):
""" Loads the experiment description python script from the given experiment
directory.
:param experimentDir: (string) experiment directory path
:returns: module of the loaded experiment description scripts
"""
descriptionScriptPath = os.path.join(experimentDir, "description.py")
module = _loadDescriptionFile(descriptionScriptPath)
return module
def getExperimentDescriptionInterfaceFromModule(module):
"""
:param module: imported description.py module
:returns: (:class:`nupic.frameworks.opf.exp_description_api.DescriptionIface`)
represents the experiment description
"""
result = module.descriptionInterface
assert isinstance(result, exp_description_api.DescriptionIface), \
"expected DescriptionIface-based instance, but got %s" % type(result)
return result
g_descriptionImportCount = 0
def _loadDescriptionFile(descriptionPyPath):
"""Loads a description file and returns it as a module.
descriptionPyPath: path of description.py file to load
"""
global g_descriptionImportCount
if not os.path.isfile(descriptionPyPath):
raise RuntimeError(("Experiment description file %s does not exist or " + \
"is not a file") % (descriptionPyPath,))
mod = imp.load_source("pf_description%d" % g_descriptionImportCount,
descriptionPyPath)
g_descriptionImportCount += 1
if not hasattr(mod, "descriptionInterface"):
raise RuntimeError("Experiment description file %s does not define %s" % \
(descriptionPyPath, "descriptionInterface"))
if not isinstance(mod.descriptionInterface, exp_description_api.DescriptionIface):
raise RuntimeError(("Experiment description file %s defines %s but it " + \
"is not DescriptionIface-based") % \
(descriptionPyPath, name))
return mod
| 3,750 | Python | .py | 80 | 42.5625 | 84 | 0.727098 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,976 | opf_task_driver.py | numenta_nupic-legacy/src/nupic/frameworks/opf/opf_task_driver.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This script is part of the Online Prediction Framework (OPF) suite.
It implements the TaskDriver for an OPF experiment.
It's used by OPF RunExperiment and may also be useful for swarming's
HyperSearch Worker
The TaskDriver is a simple state machine that:
1. Accepts incoming records from the client one data row at a time.
2. Cycles through phases in the requested iteration cycle; the phases may
include any combination of learnOnly, inferOnly, and learnAndInfer.
3. For each data row, generates an OPF Model workflow that corresponds to
the current phase in the iteration cycle and the requested inference types.
4. Emits inference results via user-supplied PredictionLogger
5. Gathers requested inference metrics.
6. Invokes user-proved callbaks (setup, postIter, finish)
.. note:: For the purposes of testing predictions and generating metrics, it
assumes that all incoming dataset records are "sensor data" - i.e., ground
truth. However, if you're using OPFTaskDriver only to generate predictions,
and not for testing predictions/generating metrics, they don't need to be
"ground truth" records.
"""
from abc import (
ABCMeta,
abstractmethod)
from collections import defaultdict
import itertools
import logging
from prediction_metrics_manager import (
MetricsManager,
)
class IterationPhaseSpecLearnOnly(object):
""" This class represents the Learn-only phase of the Iteration Cycle in
the TaskControl block of description.py
:param nIters: (int) iterations to remain in this phase. An iteration
corresponds to a single :meth:`OPFTaskDriver.handleInputRecord`
call.
"""
def __init__(self, nIters):
assert nIters > 0, "nIter=%s" % nIters
self.__nIters = nIters
return
def __repr__(self):
s = "%s(nIters=%r)" % (self.__class__.__name__, self.__nIters)
return s
def _getImpl(self, model):
""" Creates and returns the _IterationPhase-based instance corresponding
to this phase specification
model: Model instance
"""
impl = _IterationPhaseLearnOnly(model=model,
nIters=self.__nIters)
return impl
class IterationPhaseSpecInferOnly(object):
""" This class represents the Infer-only phase of the Iteration Cycle in
the TaskControl block of description.py
:param nIters: (int) Number of iterations to remain in this phase. An
iteration corresponds to a single
:meth:`OPFTaskDriver.handleInputRecord` call.
:param inferenceArgs: (dict) A dictionary of arguments required for inference.
These depend on the
:class:`~nupic.frameworks.opf.opf_utils.InferenceType` of the current
model.
"""
def __init__(self, nIters, inferenceArgs=None):
assert nIters > 0, "nIters=%s" % nIters
self.__nIters = nIters
self.__inferenceArgs = inferenceArgs
return
def __repr__(self):
s = "%s(nIters=%r)" % (self.__class__.__name__, self.__nIters)
return s
def _getImpl(self, model):
""" Creates and returns the _IterationPhase-based instance corresponding
to this phase specification
model: Model instance
"""
impl = _IterationPhaseInferOnly(model=model,
nIters=self.__nIters,
inferenceArgs=self.__inferenceArgs)
return impl
class IterationPhaseSpecLearnAndInfer(object):
""" This class represents the Learn-and-Infer phase of the Iteration Cycle in
the TaskControl block of description.py
:param nIters: (int) Number of iterations to remain in this phase. An
iteration corresponds to a single
:meth:`OPFTaskDriver.handleInputRecord` call.
:param inferenceArgs: (dict) A dictionary of arguments required for inference.
These depend on the
:class:`~nupic.frameworks.opf.opf_utils.InferenceType` of the current
model.
"""
def __init__(self, nIters, inferenceArgs=None):
assert nIters > 0, "nIters=%s" % nIters
self.__nIters = nIters
self.__inferenceArgs = inferenceArgs
return
def __repr__(self):
s = "%s(nIters=%r)" % (self.__class__.__name__, self.__nIters)
return s
def _getImpl(self, model):
""" Creates and returns the _IterationPhase-based instance corresponding
to this phase specification
model: Model instance
"""
impl = _IterationPhaseLearnAndInfer(model=model,
nIters=self.__nIters,
inferenceArgs=self.__inferenceArgs)
return impl
class OPFTaskDriver(object):
"""
Task Phase Driver implementation
Conceptually, the client injects input records, one at a time, into
an OPFTaskDriver instance for execution according to the
current IterationPhase as maintained by the OPFTaskDriver instance.
:param taskControl: (dict) conforming to opfTaskControlSchema.json that
defines the actions to be performed on the given model.
:param model: (:class:`nupic.frameworks.opf.model.Model`) that this
OPFTaskDriver instance will drive.
"""
def __init__(self, taskControl, model):
#validateOpfJsonValue(taskControl, "opfTaskControlSchema.json")
self.__reprstr = ("%s(" + \
"taskControl=%r, " + \
"model=%r)") % \
(self.__class__.__name__,
taskControl,
model)
# Init logging
#
self.logger = logging.getLogger(".".join(
['com.numenta', self.__class__.__module__, self.__class__.__name__]))
self.logger.debug(("Instantiating %s; %r.") % \
(self.__class__.__name__,
self.__reprstr))
# -----------------------------------------------------------------------
# Save args of interest
#
self.__taskControl = taskControl
self.__model = model
# -----------------------------------------------------------------------
# Create Metrics Manager.
#
self.__metricsMgr = None
metrics = taskControl.get('metrics', None)
self.__metricsMgr = MetricsManager(metricSpecs=metrics,
inferenceType=model.getInferenceType(),
fieldInfo=model.getFieldInfo())
# -----------------------------------------------------------------------
# Figure out which metrics should be logged
#
# The logged metrics won't within the current task
self.__loggedMetricLabels = set([])
loggedMetricPatterns = taskControl.get('loggedMetrics', None)
# -----------------------------------------------------------------------
# Create our phase manager
#
self.__phaseManager = _PhaseManager(
model=model,
phaseSpecs=taskControl.get('iterationCycle', []))
# -----------------------------------------------------------------------
# Initialize the callbacks container
#
self.__userCallbacks = defaultdict(list, taskControl.get('callbacks', {}))
return
def __repr__(self):
return self.__reprstr
def replaceIterationCycle(self, phaseSpecs):
""" Replaces the Iteration Cycle phases
:param phaseSpecs: Iteration cycle description consisting of a sequence of
IterationPhaseSpecXXXXX elements that are performed in the
given order
"""
# -----------------------------------------------------------------------
# Replace our phase manager
#
self.__phaseManager = _PhaseManager(
model=self.__model,
phaseSpecs=phaseSpecs)
return
def setup(self):
""" Performs initial setup activities, including 'setup' callbacks. This
method MUST be called once before the first call to
:meth:`handleInputRecord`.
"""
# Execute task-setup callbacks
for cb in self.__userCallbacks['setup']:
cb(self.__model)
return
def finalize(self):
""" Perform final activities, including 'finish' callbacks. This
method MUST be called once after the last call to :meth:`handleInputRecord`.
"""
# Execute task-finish callbacks
for cb in self.__userCallbacks['finish']:
cb(self.__model)
return
def handleInputRecord(self, inputRecord):
"""
Processes the given record according to the current iteration cycle phase
:param inputRecord: (object) record expected to be returned from
:meth:`nupic.data.record_stream.RecordStreamIface.getNextRecord`.
:returns: :class:`nupic.frameworks.opf.opf_utils.ModelResult`
"""
assert inputRecord, "Invalid inputRecord: %r" % inputRecord
results = self.__phaseManager.handleInputRecord(inputRecord)
metrics = self.__metricsMgr.update(results)
# Execute task-postIter callbacks
for cb in self.__userCallbacks['postIter']:
cb(self.__model)
results.metrics = metrics
# Return the input and predictions for this record
return results
def getMetrics(self):
""" Gets the current metric values
:returns: A dictionary of metric values. The key for each entry is the label
for the metric spec, as generated by
:meth:`nupic.frameworks.opf.metrics.MetricSpec.getLabel`. The
value for each entry is a dictionary containing the value of the
metric as returned by
:meth:`nupic.frameworks.opf.metrics.MetricsIface.getMetric`.
"""
return self.__metricsMgr.getMetrics()
def getMetricLabels(self):
"""
:returns: (list) labels for the metrics that are being calculated
"""
return self.__metricsMgr.getMetricLabels()
class _PhaseManager(object):
""" Manages iteration cycle phase drivers
"""
def __init__(self, model, phaseSpecs):
"""
model: Model instance
phaseSpecs: Iteration period description consisting of a sequence of
IterationPhaseSpecXXXXX elements that are performed in the
given order
"""
self.__model = model
# Instantiate Iteration Phase drivers
self.__phases = tuple(map(lambda x: x._getImpl(model=model),
phaseSpecs))
# Init phase-management structures
if self.__phases:
self.__phaseCycler = itertools.cycle(self.__phases)
self.__advancePhase()
return
def __repr__(self):
return "%s(phases=%r)" % \
(self.__class__.__name__,
self.__phases)
def __advancePhase(self):
""" Advance to the next iteration cycle phase
"""
self.__currentPhase = self.__phaseCycler.next()
self.__currentPhase.enterPhase()
return
def handleInputRecord(self, inputRecord):
""" Processes the given record according to the current phase
inputRecord: record object formatted according to
nupic.data.FileSource.getNext() result format.
Returns: An opf_utils.ModelResult object with the inputs and inferences
after the current record is processed by the model
"""
results = self.__model.run(inputRecord)
shouldContinue = self.__currentPhase.advance()
if not shouldContinue:
self.__advancePhase()
return results
###############################################################################
# Iteration cycle phase drivers
###############################################################################
class _IterationPhase(object):
""" Interface for IterationPhaseXXXXX classes
"""
__metaclass__ = ABCMeta
def __init__(self, nIters):
"""
nIters: Number of iterations; MUST be greater than 0
"""
assert nIters > 0, "nIters=%s" % nIters
self.__nIters = nIters
return
@abstractmethod
def enterPhase(self):
"""
Performs initialization that is necessary upon entry to the phase. Must
be called before handleInputRecord() at the beginning of each phase
"""
self.__iter = iter(xrange(self.__nIters))
# Prime the iterator
self.__iter.next()
def advance(self):
""" Advances the iteration;
Returns: True if more iterations remain; False if this is the final
iteration.
"""
hasMore = True
try:
self.__iter.next()
except StopIteration:
self.__iter = None
hasMore = False
return hasMore
class _IterationPhaseLearnOnly(_IterationPhase):
""" This class implements the "learn-only" phase of the Iteration Cycle
"""
def __init__(self, model, nIters):
"""
model: Model instance
nIters: Number of iterations; MUST be greater than 0
"""
super(_IterationPhaseLearnOnly, self).__init__(nIters=nIters)
self.__model = model
return
def enterPhase(self):
""" [_IterationPhase method implementation]
Performs initialization that is necessary upon entry to the phase. Must
be called before handleInputRecord() at the beginning of each phase
"""
super(_IterationPhaseLearnOnly, self).enterPhase()
self.__model.enableLearning()
self.__model.disableInference()
return
class _IterationPhaseInferCommon(_IterationPhase):
""" Basic class providing common implementation for
_IterationPhaseInferOnly and _IterationPhaseLearnAndInfer classes
"""
def __init__(self, model, nIters, inferenceArgs):
"""
model: Model instance
nIters: Number of iterations; MUST be greater than 0
inferenceArgs:
A dictionary of arguments required for inference. These
depend on the InferenceType of the current model
"""
super(_IterationPhaseInferCommon, self).__init__(nIters=nIters)
self._model = model
self._inferenceArgs = inferenceArgs
return
def enterPhase(self):
""" [_IterationPhase method implementation]
Performs initialization that is necessary upon entry to the phase. Must
be called before handleInputRecord() at the beginning of each phase
"""
super(_IterationPhaseInferCommon, self).enterPhase()
self._model.enableInference(inferenceArgs=self._inferenceArgs)
return
class _IterationPhaseInferOnly(_IterationPhaseInferCommon):
""" This class implements the "infer-only" phase of the Iteration Cycle
"""
def __init__(self, model, nIters, inferenceArgs):
"""
model: Model instance
nIters: Number of iterations; MUST be greater than 0
inferenceArgs:
A dictionary of arguments required for inference. These
depend on the InferenceType of the current model
"""
super(_IterationPhaseInferOnly, self).__init__(
model=model,
nIters=nIters,
inferenceArgs=inferenceArgs)
return
def enterPhase(self):
""" [_IterationPhase method implementation]
Performs initialization that is necessary upon entry to the phase. Must
be called before handleInputRecord() at the beginning of each phase
"""
super(_IterationPhaseInferOnly, self).enterPhase()
self._model.disableLearning()
return
class _IterationPhaseLearnAndInfer(_IterationPhaseInferCommon):
""" This class implements the "learn-and-infer" phase of the Iteration Cycle
"""
def __init__(self, model, nIters, inferenceArgs):
"""
model: Model instance
nIters: Number of iterations; MUST be greater than 0
inferenceArgs:
A dictionary of arguments required for inference. These
depend on the InferenceType of the current model
"""
super(_IterationPhaseLearnAndInfer, self).__init__(
model=model,
nIters=nIters,
inferenceArgs=inferenceArgs)
return
def enterPhase(self):
""" [_IterationPhase method implementation]
Performs initialization that is necessary upon entry to the phase. Must
be called before handleInputRecord() at the beginning of each phase
"""
super(_IterationPhaseLearnAndInfer, self).enterPhase()
self._model.enableLearning()
return
| 17,039 | Python | .py | 414 | 34.722222 | 80 | 0.652591 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,977 | cluster_params.py | numenta_nupic-legacy/src/nupic/frameworks/opf/common_models/cluster_params.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import json
import numpy as np
import os
from pkg_resources import resource_stream
def getScalarMetricWithTimeOfDayAnomalyParams(metricData,
minVal=None,
maxVal=None,
minResolution=None,
tmImplementation = "cpp"):
"""
Return a dict that can be used to create an anomaly model via
:meth:`nupic.frameworks.opf.model_factory.ModelFactory.create`.
Example:
.. code-block:: python
from nupic.frameworks.opf.model_factory import ModelFactory
from nupic.frameworks.opf.common_models.cluster_params import (
getScalarMetricWithTimeOfDayAnomalyParams)
params = getScalarMetricWithTimeOfDayAnomalyParams(
metricData=[0],
tmImplementation="cpp",
minVal=0.0,
maxVal=100.0)
model = ModelFactory.create(modelConfig=params["modelConfig"])
model.enableLearning()
model.enableInference(params["inferenceArgs"])
:param metricData: numpy array of metric data. Used to calculate ``minVal``
and ``maxVal`` if either is unspecified
:param minVal: minimum value of metric. Used to set up encoders. If ``None``
will be derived from ``metricData``.
:param maxVal: maximum value of metric. Used to set up input encoders. If
``None`` will be derived from ``metricData``
:param minResolution: minimum resolution of metric. Used to set up
encoders. If ``None``, will use default value of ``0.001``.
:param tmImplementation: (string) specifying type of temporal memory
implementation. Valid strings : ``["cpp", "tm_cpp"]``
:returns: (dict) containing ``modelConfig`` and ``inferenceArgs`` top-level
properties. The value of the ``modelConfig`` property is for passing to
:meth:`~nupic.frameworks.opf.model_factory.ModelFactory.create` method as
the ``modelConfig`` parameter. The ``inferenceArgs`` property is for passing
to the resulting model's
:meth:`~nupic.frameworks.opf.model.Model.enableInference` method as the
``inferenceArgs`` parameter.
.. note:: The timestamp field corresponds to input ``c0``; the predicted
field corresponds to input ``c1``.
"""
# Default values
if minResolution is None:
minResolution = 0.001
# Compute min and/or max from the data if not specified
if minVal is None or maxVal is None:
compMinVal, compMaxVal = _rangeGen(metricData)
if minVal is None:
minVal = compMinVal
if maxVal is None:
maxVal = compMaxVal
# Handle the corner case where the incoming min and max are the same
if minVal == maxVal:
maxVal = minVal + 1
# Load model parameters and update encoder params
if (tmImplementation is "cpp"):
paramFileRelativePath = os.path.join(
"anomaly_params_random_encoder",
"best_single_metric_anomaly_params_cpp.json")
elif (tmImplementation is "tm_cpp"):
paramFileRelativePath = os.path.join(
"anomaly_params_random_encoder",
"best_single_metric_anomaly_params_tm_cpp.json")
else:
raise ValueError("Invalid string for tmImplementation. Try cpp or tm_cpp")
with resource_stream(__name__, paramFileRelativePath) as infile:
paramSet = json.load(infile)
_fixupRandomEncoderParams(paramSet, minVal, maxVal, minResolution)
return paramSet
def _rangeGen(data, std=1):
"""
Return reasonable min/max values to use given the data.
"""
dataStd = np.std(data)
if dataStd == 0:
dataStd = 1
minval = np.min(data) - std * dataStd
maxval = np.max(data) + std * dataStd
return minval, maxval
def _fixupRandomEncoderParams(params, minVal, maxVal, minResolution):
"""
Given model params, figure out the correct parameters for the
RandomDistributed encoder. Modifies params in place.
"""
encodersDict = (
params["modelConfig"]["modelParams"]["sensorParams"]["encoders"]
)
for encoder in encodersDict.itervalues():
if encoder is not None:
if encoder["type"] == "RandomDistributedScalarEncoder":
resolution = max(minResolution,
(maxVal - minVal) / encoder.pop("numBuckets")
)
encodersDict["c1"]["resolution"] = resolution
| 5,244 | Python | .py | 118 | 38.711864 | 80 | 0.686813 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,978 | __init__.py | numenta_nupic-legacy/src/nupic/frameworks/opf/common_models/__init__.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
| 979 | Python | .py | 20 | 47.95 | 73 | 0.668405 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,979 | category.py | numenta_nupic-legacy/src/nupic/encoders/category.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy
from nupic.data.field_meta import FieldMetaType
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.encoders.base import Encoder, EncoderResult
from nupic.encoders.scalar import ScalarEncoder
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.category_capnp import CategoryEncoderProto
UNKNOWN = "<UNKNOWN>"
class CategoryEncoder(Encoder):
"""
Encodes a list of discrete categories (described by strings), that aren't
related to each other, so we never emit a mixture of categories.
The value of zero is reserved for "unknown category"
Internally we use a :class:`.ScalarEncoder` with a radius of 1, but since we
only encode integers, we never get mixture outputs.
The :class:`.SDRCategoryEncoder` uses a different method to encode categories.
:param categoryList: list of discrete string categories
:param forced: if True, skip checks for parameters' settings; see
:class:`.ScalarEncoder` for details. (default False)
"""
def __init__(self, w, categoryList, name="category", verbosity=0, forced=False):
self.encoders = None
self.verbosity = verbosity
# number of categories includes "unknown"
self.ncategories = len(categoryList) + 1
self.categoryToIndex = dict()
self.indexToCategory = dict()
self.indexToCategory[0] = UNKNOWN
for i in xrange(len(categoryList)):
self.categoryToIndex[categoryList[i]] = i+1
self.indexToCategory[i+1] = categoryList[i]
self.encoder = ScalarEncoder(w, minval=0, maxval=self.ncategories - 1,
radius=1, periodic=False, forced=forced)
self.width = w * self.ncategories
assert self.encoder.getWidth() == self.width
self.description = [(name, 0)]
self.name = name
# These are used to support the topDownCompute method
self._topDownMappingM = None
# This gets filled in by getBucketValues
self._bucketValues = None
def getDecoderOutputFieldTypes(self):
""" [Encoder class virtual method override]
"""
# TODO: change back to string meta-type after the decoding logic is fixed
# to output strings instead of internal index values.
#return (FieldMetaType.string,)
return (FieldMetaType.integer,)
def getWidth(self):
return self.width
def getDescription(self):
return self.description
def getScalars(self, input):
""" See method description in base.py """
if input == SENTINEL_VALUE_FOR_MISSING_DATA:
return numpy.array([None])
else:
return numpy.array([self.categoryToIndex.get(input, 0)])
def getBucketIndices(self, input):
""" See method description in base.py """
# Get the bucket index from the underlying scalar encoder
if input == SENTINEL_VALUE_FOR_MISSING_DATA:
return [None]
else:
return self.encoder.getBucketIndices(self.categoryToIndex.get(input, 0))
def encodeIntoArray(self, input, output):
# if not found, we encode category 0
if input == SENTINEL_VALUE_FOR_MISSING_DATA:
output[0:] = 0
val = "<missing>"
else:
val = self.categoryToIndex.get(input, 0)
self.encoder.encodeIntoArray(val, output)
if self.verbosity >= 2:
print "input:", input, "va:", val, "output:", output
print "decoded:", self.decodedToStr(self.decode(output))
def decode(self, encoded, parentFieldName=''):
""" See the function description in base.py
"""
# Get the scalar values from the underlying scalar encoder
(fieldsDict, fieldNames) = self.encoder.decode(encoded)
if len(fieldsDict) == 0:
return (fieldsDict, fieldNames)
# Expect only 1 field
assert(len(fieldsDict) == 1)
# Get the list of categories the scalar values correspond to and
# generate the description from the category name(s).
(inRanges, inDesc) = fieldsDict.values()[0]
outRanges = []
desc = ""
for (minV, maxV) in inRanges:
minV = int(round(minV))
maxV = int(round(maxV))
outRanges.append((minV, maxV))
while minV <= maxV:
if len(desc) > 0:
desc += ", "
desc += self.indexToCategory[minV]
minV += 1
# Return result
if parentFieldName != '':
fieldName = "%s.%s" % (parentFieldName, self.name)
else:
fieldName = self.name
return ({fieldName: (outRanges, desc)}, [fieldName])
def closenessScores(self, expValues, actValues, fractional=True,):
""" See the function description in base.py
kwargs will have the keyword "fractional", which is ignored by this encoder
"""
expValue = expValues[0]
actValue = actValues[0]
if expValue == actValue:
closeness = 1.0
else:
closeness = 0.0
if not fractional:
closeness = 1.0 - closeness
return numpy.array([closeness])
def getBucketValues(self):
""" See the function description in base.py """
if self._bucketValues is None:
numBuckets = len(self.encoder.getBucketValues())
self._bucketValues = []
for bucketIndex in range(numBuckets):
self._bucketValues.append(self.getBucketInfo([bucketIndex])[0].value)
return self._bucketValues
def getBucketInfo(self, buckets):
""" See the function description in base.py
"""
# For the category encoder, the bucket index is the category index
bucketInfo = self.encoder.getBucketInfo(buckets)[0]
categoryIndex = int(round(bucketInfo.value))
category = self.indexToCategory[categoryIndex]
return [EncoderResult(value=category, scalar=categoryIndex,
encoding=bucketInfo.encoding)]
def topDownCompute(self, encoded):
""" See the function description in base.py
"""
encoderResult = self.encoder.topDownCompute(encoded)[0]
value = encoderResult.value
categoryIndex = int(round(value))
category = self.indexToCategory[categoryIndex]
return EncoderResult(value=category, scalar=categoryIndex,
encoding=encoderResult.encoding)
@classmethod
def getSchema(cls):
return CategoryEncoderProto
@classmethod
def read(cls, proto):
encoder = object.__new__(cls)
encoder.verbosity = proto.verbosity
encoder.encoder = ScalarEncoder.read(proto.encoder)
encoder.width = proto.width
encoder.description = [(proto.name, 0)]
encoder.name = proto.name
encoder.indexToCategory = {x.index: x.category
for x in proto.indexToCategory}
encoder.categoryToIndex = {category: index
for index, category
in encoder.indexToCategory.items()
if category != UNKNOWN}
encoder._topDownMappingM = None
encoder.ncategories = len(proto.indexToCategory)
encoder._bucketValues = None
encoder.encoders = None
return encoder
def write(self, proto):
proto.width = self.width
proto.indexToCategory = [
{"index": index, "category": category}
for index, category in self.indexToCategory.items()
]
proto.name = self.name
proto.verbosity = self.verbosity
self.encoder.write(proto.encoder)
| 8,166 | Python | .py | 199 | 35.311558 | 82 | 0.688354 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,980 | scalar_space.py | numenta_nupic-legacy/src/nupic/encoders/scalar_space.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from nupic.encoders.base import Encoder
from nupic.encoders import (DeltaEncoder,
AdaptiveScalarEncoder)
class ScalarSpaceEncoder(Encoder):
"""
An encoder that can be used to permute the encodings through different spaces
These include absolute value, delta, log space, etc.
:param space: (string) if "absolute", an :class:`.AdaptiveScalarEncoder` is
returned. Otherwise, a :class:`.DeltaEncoder` is returned.
"""
SPACE_ABSOLUTE = "absolute"
SPACE_DELTA = "delta"
def __init__(self):
pass
def __new__(self, w, minval=None, maxval=None, periodic=False, n=0, radius=0,
resolution=0, name=None, verbosity=0, clipInput=False,
space="absolute", forced=False):
self._encoder = None
if space == "absolute":
ret = AdaptiveScalarEncoder(w, minval, maxval, periodic, n, radius,
resolution, name, verbosity, clipInput,
forced=forced)
else:
ret = DeltaEncoder(w, minval, maxval, periodic, n, radius, resolution,
name, verbosity, clipInput, forced=forced)
return ret
| 2,154 | Python | .py | 46 | 40.956522 | 79 | 0.647928 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,981 | adaptive_scalar.py | numenta_nupic-legacy/src/nupic/encoders/adaptive_scalar.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import math
import numpy as np
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.encoders.scalar import ScalarEncoder
from nupic.utils import MovingAverage
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.adaptive_scalar_capnp import AdaptiveScalarEncoderProto
class AdaptiveScalarEncoder(ScalarEncoder):
"""
This is an implementation of the scalar encoder that adapts the min and
max of the scalar encoder dynamically. This is essential to the streaming
model of the online prediction framework.
Initialization of an adapive encoder using resolution or radius is not supported;
it must be intitialized with n. This n is kept constant while the min and max of the
encoder changes.
The adaptive encoder must be have periodic set to false.
The adaptive encoder may be initialized with a minval and maxval or with `None`
for each of these. In the latter case, the min and max are set as the 1st and 99th
percentile over a window of the past 100 records.
**Note:** the sliding window may record duplicates of the values in the dataset,
and therefore does not reflect the statistical distribution of the input data
and may not be used to calculate the median, mean etc.
For params, see :class:`~.nupic.encoders.scalar.ScalarEncoder`.
:raises: Exception if input is periodic.
"""
def __init__(self, w, minval=None, maxval=None, periodic=False, n=0, radius=0,
resolution=0, name=None, verbosity=0, clipInput=True, forced=False):
self._learningEnabled = True
if periodic:
#Adaptive scalar encoders take non-periodic inputs only
raise Exception('Adaptive scalar encoder does not encode periodic inputs')
assert n!=0 #An adaptive encoder can only be intialized using n
super(AdaptiveScalarEncoder, self).__init__(w=w, n=n, minval=minval, maxval=maxval,
clipInput=True, name=name, verbosity=verbosity, forced=forced)
self.recordNum=0 #how many inputs have been sent to the encoder?
self.slidingWindow = MovingAverage(300)
def _setEncoderParams(self):
"""
Set the radius, resolution and range. These values are updated when minval
and/or maxval change.
"""
self.rangeInternal = float(self.maxval - self.minval)
self.resolution = float(self.rangeInternal) / (self.n - self.w)
self.radius = self.w * self.resolution
self.range = self.rangeInternal + self.resolution
# nInternal represents the output area excluding the possible padding on each side
self.nInternal = self.n - 2 * self.padding
# Invalidate the bucket values cache so that they get recomputed
self._bucketValues = None
def setFieldStats(self, fieldName, fieldStats):
"""
TODO: document
"""
#If the stats are not fully formed, ignore.
if fieldStats[fieldName]['min'] == None or \
fieldStats[fieldName]['max'] == None:
return
self.minval = fieldStats[fieldName]['min']
self.maxval = fieldStats[fieldName]['max']
if self.minval == self.maxval:
self.maxval+=1
self._setEncoderParams()
def _setMinAndMax(self, input, learn):
"""
Potentially change the minval and maxval using input.
**The learn flag is currently not supported by cla regions.**
"""
self.slidingWindow.next(input)
if self.minval is None and self.maxval is None:
self.minval = input
self.maxval = input+1 #When the min and max and unspecified and only one record has been encoded
self._setEncoderParams()
elif learn:
sorted = self.slidingWindow.getSlidingWindow()
sorted.sort()
minOverWindow = sorted[0]
maxOverWindow = sorted[len(sorted)-1]
if minOverWindow < self.minval:
#initialBump = abs(self.minval-minOverWindow)*(1-(min(self.recordNum, 200.0)/200.0))*2 #decrement minval more aggressively in the beginning
if self.verbosity >= 2:
print "Input %s=%.2f smaller than minval %.2f. Adjusting minval to %.2f"\
% (self.name, input, self.minval, minOverWindow)
self.minval = minOverWindow #-initialBump
self._setEncoderParams()
if maxOverWindow > self.maxval:
#initialBump = abs(self.maxval-maxOverWindow)*(1-(min(self.recordNum, 200.0)/200.0))*2 #decrement maxval more aggressively in the beginning
if self.verbosity >= 2:
print "Input %s=%.2f greater than maxval %.2f. Adjusting maxval to %.2f" \
% (self.name, input, self.maxval, maxOverWindow)
self.maxval = maxOverWindow #+initialBump
self._setEncoderParams()
def getBucketIndices(self, input, learn=None):
"""
[overrides nupic.encoders.scalar.ScalarEncoder.getBucketIndices]
"""
self.recordNum +=1
if learn is None:
learn = self._learningEnabled
if type(input) is float and math.isnan(input):
input = SENTINEL_VALUE_FOR_MISSING_DATA
if input == SENTINEL_VALUE_FOR_MISSING_DATA:
return [None]
else:
self._setMinAndMax(input, learn)
return super(AdaptiveScalarEncoder, self).getBucketIndices(input)
def encodeIntoArray(self, input, output,learn=None):
"""
[overrides nupic.encoders.scalar.ScalarEncoder.encodeIntoArray]
"""
self.recordNum +=1
if learn is None:
learn = self._learningEnabled
if input == SENTINEL_VALUE_FOR_MISSING_DATA:
output[0:self.n] = 0
elif not math.isnan(input):
self._setMinAndMax(input, learn)
super(AdaptiveScalarEncoder, self).encodeIntoArray(input, output)
def getBucketInfo(self, buckets):
"""
[overrides nupic.encoders.scalar.ScalarEncoder.getBucketInfo]
"""
if self.minval is None or self.maxval is None:
return [EncoderResult(value=0, scalar=0,
encoding=numpy.zeros(self.n))]
return super(AdaptiveScalarEncoder, self).getBucketInfo(buckets)
def topDownCompute(self, encoded):
"""
[overrides nupic.encoders.scalar.ScalarEncoder.topDownCompute]
"""
if self.minval is None or self.maxval is None:
return [EncoderResult(value=0, scalar=0,
encoding=numpy.zeros(self.n))]
return super(AdaptiveScalarEncoder, self).topDownCompute(encoded)
def __str__(self):
string = "AdaptiveScalarEncoder:"
string += " min: {minval}".format(minval = self.minval)
string += " max: {maxval}".format(maxval = self.maxval)
string += " w: {w}".format(w = self.w)
string += " n: {n}".format(n = self.n)
string += " resolution: {resolution}".format(resolution = self.resolution)
string += " radius: {radius}".format(radius = self.radius)
string += " periodic: {periodic}".format(periodic = self.periodic)
string += " nInternal: {nInternal}".format(nInternal = self.nInternal)
string += " rangeInternal: {rangeInternal}".format(rangeInternal = self.rangeInternal)
string += " padding: {padding}".format(padding = self.padding)
return string
@classmethod
def getSchema(cls):
return AdaptiveScalarEncoderProto
@classmethod
def read(cls, proto):
encoder = super(AdaptiveScalarEncoder, cls).read(proto)
encoder.recordNum = proto.recordNum
encoder.slidingWindow = MovingAverage.read(proto.slidingWindow)
return encoder
def write(self, proto):
super(AdaptiveScalarEncoder, self).write(proto)
proto.recordNum = self.recordNum
self.slidingWindow.write(proto.slidingWindow)
| 8,535 | Python | .py | 183 | 41.120219 | 152 | 0.698518 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,982 | utils.py | numenta_nupic-legacy/src/nupic/encoders/utils.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from array import array
def bitsToString(arr):
"""Returns a string representing a numpy array of 0's and 1's"""
s = array('c','.'*len(arr))
for i in xrange(len(arr)):
if arr[i] == 1:
s[i]='*'
return s
| 1,200 | Python | .py | 28 | 41.035714 | 72 | 0.657241 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,983 | sdr_category.py | numenta_nupic-legacy/src/nupic/encoders/sdr_category.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import random
import numpy
from nupic.data.field_meta import FieldMetaType
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.encoders.base import Encoder, EncoderResult
from nupic.bindings.math import SM32, GetNTAReal, Random as NupicRandom
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.sdr_category_capnp import SDRCategoryEncoderProto
class SDRCategoryEncoder(Encoder):
"""
Encodes a list of discrete categories (described by strings), that aren't
related to each other.
Each encoding is an SDR in which w out of n bits are turned on.
Unknown categories are encoded as a single value.
Internally we use a :class:`.ScalarEncoder` with a radius of 1, but since we
only encode integers, we never get mixture outputs.
The :class:`.CategoryEncoder` uses a different method to encode categories
:param categoryList: list of discrete string categories, if ``None`` then
categories will automatically be added as they are
encountered
:param forced: if True, skip checks for parameters' settings; see
:class:`.ScalarEncoder` for details. (default False)
"""
def __init__(self, n, w, categoryList = None, name="category", verbosity=0,
encoderSeed=1, forced=False):
self.n = n
self.w = w
self._learningEnabled = True
# initialize the random number generators
self._seed(encoderSeed)
if not forced:
# -- this is just to catch bad parameter choices
if (self.n/self.w) < 2: # w is 50% of total len
raise ValueError("Number of ON bits in SDR (%d) must be much smaller than "
"the output width (%d)" % (self.w, self.n))
# Another arbitrary cutoff to catch likely mistakes
if self.w < 21:
raise ValueError("Number of bits in the SDR (%d) must be greater than 2, and should be >= 21, pass forced=True to init() to override this check"
% self.w)
self._initOverlap()
self.verbosity = verbosity
self.description = [(name, 0)]
self.name = name
self.categoryToIndex = dict()
self.ncategories = 0
self.categories = list()
self.sdrs = None
# Always include an 'unknown' category for
# edge cases
self._addCategory("<UNKNOWN>")
if categoryList is None:
self._learningEnabled = True
else:
self._learningEnabled = False
for category in categoryList:
self._addCategory(category)
assert self.ncategories == len(categoryList) + 1
# Not used by this class. Used for decoding (scalarsToStr())
self.encoders = None
# This matrix is used for the topDownCompute. We build it the first time
# topDownCompute is called
self._topDownMappingM = None
self._topDownValues = None
def _initOverlap(self):
# Calculate average overlap of SDRs for decoding
# Density is fraction of bits on, and it is also the
# probability that any individual bit is on.
density = float(self.w) / self.n
self.averageOverlap = self.w * density
# We can do a better job of calculating the threshold. For now, just
# something quick and dirty, which is the midway point between average
# and full overlap. averageOverlap is always < w, so the threshold
# is always < w.
self.thresholdOverlap = int((self.averageOverlap + self.w)/2)
# 1.25 -- too sensitive for decode test, so make it less sensitive
if self.thresholdOverlap < self.w - 3:
self.thresholdOverlap = self.w - 3
def __setstate__(self, state):
self.__dict__.update(state)
# Initialize self.random as an instance of NupicRandom derived from the
# previous numpy random state
randomState = state["random"]
if isinstance(randomState, numpy.random.mtrand.RandomState):
self.random = NupicRandom(randomState.randint(sys.maxint))
def _seed(self, seed=-1):
"""
Initialize the random seed
"""
if seed != -1:
self.random = NupicRandom(seed)
else:
self.random = NupicRandom()
def getDecoderOutputFieldTypes(self):
""" [Encoder class virtual method override]
"""
# TODO: change back to string meta-type after the decoding logic is fixed
# to output strings instead of internal index values.
return (FieldMetaType.string,)
#return (FieldMetaType.integer,)
def _addCategory(self, category):
if category in self.categories:
raise RuntimeError("Attempt to add add encoder category '%s' "
"that already exists" % category)
if self.sdrs is None:
assert self.ncategories == 0
assert len(self.categoryToIndex) == 0
# Initial allocation -- 16 rows
self.sdrs = numpy.zeros((16, self.n), dtype='uint8')
elif self.ncategories > self.sdrs.shape[0] - 2:
# Preallocated sdrs are used up. Double our size
currentMax = self.sdrs.shape[0]
newsdrs = numpy.zeros((currentMax * 2, self.n), dtype='uint8')
newsdrs[0:currentMax] = self.sdrs[0:currentMax]
self.sdrs = newsdrs
newrep = self._newRep()
self.sdrs[self.ncategories] = newrep
self.categories.append(category)
self.categoryToIndex[category] = self.ncategories
self.ncategories += 1
self._topDownMappingM = None
def _newRep(self):
"""Generate a new and unique representation. Returns a numpy array
of shape (n,). """
maxAttempts = 1000
for _ in xrange(maxAttempts):
foundUnique = True
population = numpy.arange(self.n, dtype=numpy.uint32)
choices = numpy.arange(self.w, dtype=numpy.uint32)
oneBits = sorted(self.random.sample(population, choices))
sdr = numpy.zeros(self.n, dtype='uint8')
sdr[oneBits] = 1
for i in xrange(self.ncategories):
if (sdr == self.sdrs[i]).all():
foundUnique = False
break
if foundUnique:
break;
if not foundUnique:
raise RuntimeError("Error, could not find unique pattern %d after "
"%d attempts" % (self.ncategories, maxAttempts))
return sdr
def getWidth(self):
return self.n
def getDescription(self):
return self.description
def getScalars(self, input):
""" See method description in base.py """
if input == SENTINEL_VALUE_FOR_MISSING_DATA:
return numpy.array([0])
index = self.categoryToIndex.get(input, None)
if index is None:
if self._learningEnabled:
self._addCategory(input)
index = self.ncategories - 1
else:
# if not found, we encode category 0
index = 0
return numpy.array([index])
def getBucketIndices(self, input):
""" See method description in base.py """
# For category encoder, the "scalar" we map to each category is the
# bucket index
return self.getScalars(input)
def encodeIntoArray(self, input, output):
if input == SENTINEL_VALUE_FOR_MISSING_DATA:
output[0:self.n] = 0
index = 0
else:
index = self.getBucketIndices(input)[0]
output[0:self.n] = self.sdrs[index,:]
if self.verbosity >= 2:
print "input:", input, "index:", index, "output:", output
print "decoded:", self.decodedToStr(self.decode(output))
def decode(self, encoded, parentFieldName=''):
""" See the function description in base.py
"""
assert (encoded[0:self.n] <= 1.0).all()
resultString = ""
resultRanges = []
overlaps = (self.sdrs * encoded[0:self.n]).sum(axis=1)
if self.verbosity >= 2:
print "Overlaps for decoding:"
for i in xrange(0, self.ncategories):
print "%d %s" % (overlaps[i], self.categories[i])
matchingCategories = (overlaps > self.thresholdOverlap).nonzero()[0]
for index in matchingCategories:
if resultString != "":
resultString += " "
resultString += str(self.categories[index])
resultRanges.append([int(index),int(index)])
if parentFieldName != '':
fieldName = "%s.%s" % (parentFieldName, self.name)
else:
fieldName = self.name
return ({fieldName: (resultRanges, resultString)}, [fieldName])
def _getTopDownMapping(self):
""" Return the interal _topDownMappingM matrix used for handling the
bucketInfo() and topDownCompute() methods. This is a matrix, one row per
category (bucket) where each row contains the encoded output for that
category.
"""
# -------------------------------------------------------------------------
# Do we need to build up our reverse mapping table?
if self._topDownMappingM is None:
# Each row represents an encoded output pattern
self._topDownMappingM = SM32(self.ncategories, self.n)
outputSpace = numpy.zeros(self.n, dtype=GetNTAReal())
for i in xrange(self.ncategories):
self.encodeIntoArray(self.categories[i], outputSpace)
self._topDownMappingM.setRowFromDense(i, outputSpace)
return self._topDownMappingM
def getBucketValues(self):
""" See the function description in base.py """
return self.categories
def getBucketInfo(self, buckets):
""" See the function description in base.py
"""
if self.ncategories==0:
return 0
topDownMappingM = self._getTopDownMapping()
categoryIndex = buckets[0]
category = self.categories[categoryIndex]
encoding = topDownMappingM.getRow(categoryIndex)
return [EncoderResult(value=category, scalar=categoryIndex,
encoding=encoding)]
def topDownCompute(self, encoded):
""" See the function description in base.py
"""
if self.ncategories==0:
return 0
topDownMappingM = self._getTopDownMapping()
categoryIndex = topDownMappingM.rightVecProd(encoded).argmax()
category = self.categories[categoryIndex]
encoding = topDownMappingM.getRow(categoryIndex)
return EncoderResult(value=category, scalar=categoryIndex, encoding=encoding)
def closenessScores(self, expValues, actValues, fractional=True):
""" See the function description in base.py
kwargs will have the keyword "fractional", which is ignored by this encoder
"""
expValue = expValues[0]
actValue = actValues[0]
if expValue == actValue:
closeness = 1.0
else:
closeness = 0.0
if not fractional:
closeness = 1.0 - closeness
return numpy.array([closeness])
@classmethod
def getSchema(cls):
return SDRCategoryEncoderProto
@classmethod
def read(cls, proto):
encoder = object.__new__(cls)
encoder.n = proto.n
encoder.w = proto.w
encoder.random = NupicRandom()
encoder.random.read(proto.random)
encoder.verbosity = proto.verbosity
encoder.name = proto.name
encoder.description = [(proto.name, 0)]
encoder.categories = list(proto.categories)
encoder.sdrs = numpy.array(proto.sdrs, dtype=numpy.uint8)
encoder.categoryToIndex = {category:index
for index, category
in enumerate(encoder.categories)}
encoder.ncategories = len(encoder.categories)
encoder._learningEnabled = proto.learningEnabled
encoder._initOverlap()
encoder._topDownMappingM = None
encoder._topDownValues = None
encoder.encoders = None
return encoder
def write(self, proto):
proto.n = self.n
proto.w = self.w
self.random.write(proto.random)
proto.verbosity = self.verbosity
proto.name = self.name
proto.categories = self.categories
proto.sdrs = self.sdrs.tolist()
proto.learningEnabled = self._learningEnabled
| 12,607 | Python | .py | 306 | 35.189542 | 152 | 0.676774 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,984 | date.py | numenta_nupic-legacy/src/nupic/encoders/date.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import datetime
import numpy
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.encoders.base import Encoder
from nupic.encoders.scalar import ScalarEncoder
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.date_capnp import DateEncoderProto
class DateEncoder(Encoder):
"""
A date encoder encodes a date according to encoding parameters specified in
its constructor. The input to a date encoder is a datetime.datetime object.
The output is the concatenation of several sub-encodings, each of which
encodes a different aspect of the date. Which sub-encodings are present, and
details of those sub-encodings, are specified in the DateEncoder constructor.
Each parameter describes one attribute to encode. By default, the attribute
is not encoded.
:param season: (int | tuple) Season of the year, where units = day.
- (int) width of attribute; default radius = 91.5 days (1 season)
- (tuple) season[0] = width; season[1] = radius
:param dayOfWeek: (int | tuple) Day of week, where monday = 0, units = 1 day.
- (int) width of attribute; default radius = 1 day
- (tuple) dayOfWeek[0] = width; dayOfWeek[1] = radius
:param weekend: (int) Is a weekend or not. A block of bits either 0s or 1s.
- (int) width of attribute
:param holiday: (int) Is a holiday or not, boolean: 0, 1
- (int) width of attribute
:param timeOfday: (int | tuple) Time of day, where midnight = 0, units = hour.
- (int) width of attribute: default radius = 4 hours
- (tuple) timeOfDay[0] = width; timeOfDay[1] = radius
:param customDays: (tuple) A way to custom encode specific days of the week.
- [0] (int) Width of attribute
- [1] (str | list) Either a string representing a day of the week like
"Monday" or "mon", or a list of these strings.
:param forced: (default True) if True, skip checks for parameters' settings.
See :class:`~.nupic.encoders.scalar.ScalarEncoder` for details.
:param holidays: (list) a list of tuples for holidays.
- Each holiday is either (month, day) or (year, month, day).
The former will use the same month day every year eg: (12, 25) for Christmas.
The latter will be a one off holiday eg: (2018, 4, 1) for Easter Sunday 2018
"""
def __init__(self, season=0, dayOfWeek=0, weekend=0, holiday=0, timeOfDay=0, customDays=0,
name='', forced=True, holidays=()):
self.width = 0
self.description = []
self.name = name
# This will contain a list of (name, encoder, offset) tuples for use by
# the decode() method
self.encoders = []
self.seasonEncoder = None
if season != 0:
# Ignore leapyear differences -- assume 366 days in a year
# Radius = 91.5 days = length of season
# Value is number of days since beginning of year (0 - 355)
if hasattr(season, "__getitem__"):
w = season[0]
radius = season[1]
else:
w = season
radius = 91.5
self.seasonEncoder = ScalarEncoder(w = w, minval=0, maxval=366,
radius=radius, periodic=True,
name="season", forced=forced)
self.seasonOffset = self.width
self.width += self.seasonEncoder.getWidth()
self.description.append(("season", self.seasonOffset))
self.encoders.append(("season", self.seasonEncoder, self.seasonOffset))
self.dayOfWeekEncoder = None
if dayOfWeek != 0:
# Value is day of week (floating point)
# Radius is 1 day
if hasattr(dayOfWeek, "__getitem__"):
w = dayOfWeek[0]
radius = dayOfWeek[1]
else:
w = dayOfWeek
radius = 1
self.dayOfWeekEncoder = ScalarEncoder(w = w, minval=0, maxval=7,
radius=radius, periodic=True,
name="day of week", forced=forced)
self.dayOfWeekOffset = self.width
self.width += self.dayOfWeekEncoder.getWidth()
self.description.append(("day of week", self.dayOfWeekOffset))
self.encoders.append(
("day of week", self.dayOfWeekEncoder, self.dayOfWeekOffset))
self.weekendEncoder = None
if weekend != 0:
# Binary value. Not sure if this makes sense. Also is somewhat redundant
# with dayOfWeek
# Append radius if it was not provided
if not hasattr(weekend, "__getitem__"):
weekend = (weekend, 1)
self.weekendEncoder = ScalarEncoder(w=weekend[0], minval=0, maxval=1,
periodic=False, radius=weekend[1],
name="weekend", forced=forced)
self.weekendOffset = self.width
self.width += self.weekendEncoder.getWidth()
self.description.append(("weekend", self.weekendOffset))
self.encoders.append(("weekend", self.weekendEncoder, self.weekendOffset))
# Set up custom days encoder, first argument in tuple is width
# second is either a single day of the week or a list of the days
# you want encoded as ones.
self.customDaysEncoder = None
if customDays !=0:
customDayEncoderName = ""
daysToParse = []
assert len(customDays)==2, "Please provide a w and the desired days"
if isinstance(customDays[1], list):
for day in customDays[1]:
customDayEncoderName+=str(day)+" "
daysToParse=customDays[1]
elif isinstance(customDays[1], str):
customDayEncoderName+=customDays[1]
daysToParse = [customDays[1]]
else:
assert False, "You must provide either a list of days or a single day"
#Parse days
self.customDays = []
for day in daysToParse:
if(day.lower() in ["mon","monday"]):
self.customDays+=[0]
elif day.lower() in ["tue","tuesday"]:
self.customDays+=[1]
elif day.lower() in ["wed","wednesday"]:
self.customDays+=[2]
elif day.lower() in ["thu","thursday"]:
self.customDays+=[3]
elif day.lower() in ["fri","friday"]:
self.customDays+=[4]
elif day.lower() in ["sat","saturday"]:
self.customDays+=[5]
elif day.lower() in ["sun","sunday"]:
self.customDays+=[6]
else:
assert False, "Unable to understand %s as a day of week" % str(day)
self.customDaysEncoder = ScalarEncoder(w=customDays[0], minval = 0, maxval=1,
periodic=False, radius=1,
name=customDayEncoderName, forced=forced)
self.customDaysOffset = self.width
self.width += self.customDaysEncoder.getWidth()
self.description.append(("customdays", self.customDaysOffset))
self.encoders.append(("customdays", self.customDaysEncoder, self.customDaysOffset))
self.holidayEncoder = None
if holiday != 0:
# A "continuous" binary value. = 1 on the holiday itself and smooth ramp
# 0->1 on the day before the holiday and 1->0 on the day after the holiday.
self.holidayEncoder = ScalarEncoder(w = holiday, minval = 0, maxval=1,
periodic=False, radius=1,
name="holiday", forced=forced)
self.holidayOffset = self.width
self.width += self.holidayEncoder.getWidth()
self.description.append(("holiday", self.holidayOffset))
self.encoders.append(("holiday", self.holidayEncoder, self.holidayOffset))
for h in holidays:
if not (hasattr(h, "__getitem__") or len(h) not in [2,3]):
raise ValueError("Holidays must be an iterable of length 2 or 3")
self.holidays = holidays
self.timeOfDayEncoder = None
if timeOfDay != 0:
# Value is time of day in hours
# Radius = 4 hours, e.g. morning, afternoon, evening, early night,
# late night, etc.
if hasattr(timeOfDay, "__getitem__"):
w = timeOfDay[0]
radius = timeOfDay[1]
else:
w = timeOfDay
radius = 4
self.timeOfDayEncoder = ScalarEncoder(w = w, minval=0, maxval=24,
periodic=True, radius=radius, name="time of day", forced=forced)
self.timeOfDayOffset = self.width
self.width += self.timeOfDayEncoder.getWidth()
self.description.append(("time of day", self.timeOfDayOffset))
self.encoders.append(("time of day", self.timeOfDayEncoder, self.timeOfDayOffset))
def getWidth(self):
return self.width
def getScalarNames(self, parentFieldName=''):
""" See method description in base.py """
names = []
# This forms a name which is the concatenation of the parentFieldName
# passed in and the encoder's own name.
def _formFieldName(encoder):
if parentFieldName == '':
return encoder.name
else:
return '%s.%s' % (parentFieldName, encoder.name)
# -------------------------------------------------------------------------
# Get the scalar values for each sub-field
if self.seasonEncoder is not None:
names.append(_formFieldName(self.seasonEncoder))
if self.dayOfWeekEncoder is not None:
names.append(_formFieldName(self.dayOfWeekEncoder))
if self.customDaysEncoder is not None:
names.append(_formFieldName(self.customDaysEncoder))
if self.weekendEncoder is not None:
names.append(_formFieldName(self.weekendEncoder))
if self.holidayEncoder is not None:
names.append(_formFieldName(self.holidayEncoder))
if self.timeOfDayEncoder is not None:
names.append(_formFieldName(self.timeOfDayEncoder))
return names
def getEncodedValues(self, input):
""" See method description in base.py """
if input == SENTINEL_VALUE_FOR_MISSING_DATA:
return numpy.array([None])
assert isinstance(input, datetime.datetime)
values = []
# -------------------------------------------------------------------------
# Get the scalar values for each sub-field
timetuple = input.timetuple()
timeOfDay = timetuple.tm_hour + float(timetuple.tm_min)/60.0
if self.seasonEncoder is not None:
dayOfYear = timetuple.tm_yday
# input.timetuple() computes the day of year 1 based, so convert to 0 based
values.append(dayOfYear-1)
if self.dayOfWeekEncoder is not None:
dayOfWeek = timetuple.tm_wday + timeOfDay / 24.0
values.append(dayOfWeek)
if self.weekendEncoder is not None:
# saturday, sunday or friday evening
if timetuple.tm_wday == 6 or timetuple.tm_wday == 5 \
or (timetuple.tm_wday == 4 and timeOfDay > 18):
weekend = 1
else:
weekend = 0
values.append(weekend)
if self.customDaysEncoder is not None:
if timetuple.tm_wday in self.customDays:
customDay = 1
else:
customDay = 0
values.append(customDay)
if self.holidayEncoder is not None:
# A "continuous" binary value. = 1 on the holiday itself and smooth ramp
# 0->1 on the day before the holiday and 1->0 on the day after the holiday.
# Currently the only holiday we know about is December 25
# holidays is a list of holidays that occur on a fixed date every year
if len(self.holidays) == 0:
holidays = [(12, 25)]
else:
holidays = self.holidays
val = 0
for h in holidays:
# hdate is midnight on the holiday
if len(h) == 3:
hdate = datetime.datetime(h[0], h[1], h[2], 0, 0, 0)
else:
hdate = datetime.datetime(timetuple.tm_year, h[0], h[1], 0, 0, 0)
if input > hdate:
diff = input - hdate
if diff.days == 0:
# return 1 on the holiday itself
val = 1
break
elif diff.days == 1:
# ramp smoothly from 1 -> 0 on the next day
val = 1.0 - (float(diff.seconds) / 86400)
break
else:
diff = hdate - input
if diff.days == 0:
# ramp smoothly from 0 -> 1 on the previous day
val = 1.0 - (float(diff.seconds) / 86400)
values.append(val)
if self.timeOfDayEncoder is not None:
values.append(timeOfDay)
return values
def getScalars(self, input):
"""
See method description in :meth:`~.nupic.encoders.base.Encoder.getScalars`.
:param input: (datetime) representing the time being encoded
:returns: A numpy array of the corresponding scalar values in the following
order: season, dayOfWeek, weekend, holiday, timeOfDay. Some of
these fields might be omitted if they were not specified in the
encoder.
"""
return numpy.array(self.getEncodedValues(input))
def getBucketIndices(self, input):
""" See method description in base.py """
if input == SENTINEL_VALUE_FOR_MISSING_DATA:
# Encoder each sub-field
return [None] * len(self.encoders)
else:
assert isinstance(input, datetime.datetime)
# Get the scalar values for each sub-field
scalars = self.getScalars(input)
# Encoder each sub-field
result = []
for i in xrange(len(self.encoders)):
(name, encoder, offset) = self.encoders[i]
result.extend(encoder.getBucketIndices(scalars[i]))
return result
def encodeIntoArray(self, input, output):
""" See method description in base.py """
if input == SENTINEL_VALUE_FOR_MISSING_DATA:
output[0:] = 0
else:
if not isinstance(input, datetime.datetime):
raise ValueError("Input is type %s, expected datetime. Value: %s" % (
type(input), str(input)))
# Get the scalar values for each sub-field
scalars = self.getScalars(input)
# Encoder each sub-field
for i in xrange(len(self.encoders)):
(name, encoder, offset) = self.encoders[i]
encoder.encodeIntoArray(scalars[i], output[offset:])
def getDescription(self):
return self.description
@classmethod
def getSchema(cls):
return DateEncoderProto
@classmethod
def read(cls, proto):
encoder = object.__new__(cls)
encoder.encoders = []
encoder.description = []
encoder.width = 0
encoder.name = proto.name
def addEncoder(encoderAttr, offsetAttr):
protoVal = getattr(proto, encoderAttr)
if protoVal.n:
setattr(encoder, encoderAttr, ScalarEncoder.read(protoVal))
innerEncoder = getattr(encoder, encoderAttr)
setattr(encoder, offsetAttr, encoder.width)
innerOffset = getattr(encoder, offsetAttr)
encoder.width += innerEncoder.getWidth()
encoder.description.append((innerEncoder.name, innerOffset))
encoder.encoders.append((innerEncoder.name, innerEncoder, innerOffset))
else:
setattr(encoder, encoderAttr, None)
addEncoder("seasonEncoder", "seasonOffset")
addEncoder("dayOfWeekEncoder", "dayOfWeekOffset")
addEncoder("weekendEncoder", "weekendOffset")
addEncoder("customDaysEncoder", "customDaysOffset")
addEncoder("holidayEncoder", "holidayOffset")
addEncoder("timeOfDayEncoder", "timeOfDayOffset")
return encoder
def write(self, proto):
for name in ("seasonEncoder",
"dayOfWeekEncoder",
"weekendEncoder",
"customDaysEncoder",
"holidayEncoder",
"timeOfDayEncoder"):
encoder = getattr(self, name)
if encoder:
encoder.write(getattr(proto, name))
| 16,569 | Python | .py | 370 | 36.856757 | 94 | 0.639263 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,985 | pass_through.py | numenta_nupic-legacy/src/nupic/encoders/pass_through.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy
from nupic.data.field_meta import FieldMetaType
from nupic.encoders.base import Encoder
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.pass_through_capnp import PassThroughEncoderProto
class PassThroughEncoder(Encoder):
"""
Pass an encoded SDR straight to the model.
Each encoding is an SDR in which w out of n bits are turned on.
The input should be a 1-D array or numpy.ndarray of length n
:param n: the total #bits in output
:param w: used to normalize the sparsity of the output, exactly w bits ON,
if None (default) - do not alter the input, just pass it further.
:param forced: if forced, encode will accept any data, and just return it back
"""
def __init__(self, n, w=None, name="pass_through", forced=False,
verbosity=0):
self.n = n
self.w = w
self.verbosity = verbosity
self.description = [(name, 0)]
self.name = name
self.encoders = None
self.forced = forced
def getDecoderOutputFieldTypes(self):
""" [Encoder class virtual method override]
"""
return (FieldMetaType.string,)
def getWidth(self):
return self.n
def getDescription(self):
return self.description
def getScalars(self, input):
""" See method description in base.py """
return numpy.array([0])
def getBucketIndices(self, input):
""" See method description in base.py """
return [0]
def encodeIntoArray(self, inputVal, outputVal):
"""See method description in base.py"""
if len(inputVal) != len(outputVal):
raise ValueError("Different input (%i) and output (%i) sizes." % (
len(inputVal), len(outputVal)))
if self.w is not None and sum(inputVal) != self.w:
raise ValueError("Input has %i bits but w was set to %i." % (
sum(inputVal), self.w))
outputVal[:] = inputVal[:]
if self.verbosity >= 2:
print "input:", inputVal, "output:", outputVal
print "decoded:", self.decodedToStr(self.decode(outputVal))
def decode(self, encoded, parentFieldName=""):
"""See the function description in base.py"""
if parentFieldName != "":
fieldName = "%s.%s" % (parentFieldName, self.name)
else:
fieldName = self.name
return ({fieldName: ([[0, 0]], "input")}, [fieldName])
def getBucketInfo(self, buckets):
"""See the function description in base.py"""
return [EncoderResult(value=0, scalar=0, encoding=numpy.zeros(self.n))]
def topDownCompute(self, encoded):
"""See the function description in base.py"""
return EncoderResult(value=0, scalar=0,
encoding=numpy.zeros(self.n))
def closenessScores(self, expValues, actValues, **kwargs):
"""
Does a bitwise compare of the two bitmaps and returns a fractonal
value between 0 and 1 of how similar they are.
- ``1`` => identical
- ``0`` => no overlaping bits
``kwargs`` will have the keyword "fractional", which is assumed by this
encoder.
"""
ratio = 1.0
esum = int(expValues.sum())
asum = int(actValues.sum())
if asum > esum:
diff = asum - esum
if diff < esum:
ratio = 1 - diff/float(esum)
else:
ratio = 1/float(diff)
olap = expValues & actValues
osum = int(olap.sum())
if esum == 0:
r = 0.0
else:
r = osum/float(esum)
r = r * ratio
return numpy.array([r])
@classmethod
def getSchema(cls):
return PassThroughEncoderProto
@classmethod
def read(cls, proto):
encoder = object.__new__(cls)
encoder.n = proto.n
encoder.w = proto.w if proto.w else None
encoder.verbosity = proto.verbosity
encoder.name = proto.name
encoder.description = [(encoder.name, 0)]
encoder.encoders = None
encoder.forced = proto.forced
return encoder
def write(self, proto):
proto.n = self.n
if self.w is not None:
proto.w = self.w
proto.verbosity = self.verbosity
proto.name = self.name
proto.forced = self.forced
| 5,011 | Python | .py | 135 | 32.407407 | 80 | 0.666115 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,986 | __init__.py | numenta_nupic-legacy/src/nupic/encoders/__init__.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from scalar import ScalarEncoder
from random_distributed_scalar import RandomDistributedScalarEncoder
from adaptive_scalar import AdaptiveScalarEncoder
from date import DateEncoder
from logarithm import LogEncoder
from category import CategoryEncoder
from sdr_category import SDRCategoryEncoder
from delta import DeltaEncoder
from scalar_space import ScalarSpaceEncoder
from coordinate import CoordinateEncoder
from geospatial_coordinate import GeospatialCoordinateEncoder
from pass_through import PassThroughEncoder
from sparse_pass_through import SparsePassThroughEncoder
# multiencoder must be imported last because it imports * from this module!
from multi import MultiEncoder
from utils import bitsToString
| 1,689 | Python | .py | 36 | 45.888889 | 75 | 0.764528 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,987 | base.py | numenta_nupic-legacy/src/nupic/encoders/base.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Classes for encoding different types into SDRs for HTM input."""
from collections import namedtuple
import numpy
from nupic.encoders.utils import bitsToString
from nupic.serializable import Serializable
defaultDtype = numpy.uint8
EncoderResult = namedtuple("EncoderResult", ['value', 'scalar', 'encoding'])
""" Tuple to represent the results of computations in different forms.
.. py:attribute:: value
A representation of the encoded value in the same format as the input
(i.e. float for scalars, string for categories). This is the value for
the sub-field in a format that is consistent with the type specified by
:meth:`.getDecoderOutputFieldTypes`. Note that this value is not
necessarily numeric.
.. py:attribute:: scalar
The scalar representation of value (e.g. for categories, this is the
internal index used by the encoder). This number is consistent with what
is returned by :meth:`.getScalars`. This value is always an int or
float, and can be used for numeric comparisons.
.. py:attribute:: encoding
This is the encoded bit-array (numpy array) that represents ``value``.
That is, if ``value`` was passed to :meth:`.encode`, an identical
bit-array should be returned.
"""
def _isSequence(obj):
"""Helper function to determine if a function is a list or sequence."""
mType = type(obj)
return mType is list or mType is tuple
class Encoder(Serializable):
"""
An encoder converts a value to a sparse distributed representation.
This is the base class for encoders that are compatible with the OPF. The OPF
requires that values can be represented as a scalar value for use in places
like the SDR Classifier.
.. note:: The Encoder superclass implements:
- :func:`~nupic.encoders.base.Encoder.encode`
- :func:`~nupic.encoders.base.Encoder.pprintHeader`
- :func:`~nupic.encoders.base.Encoder.pprint`
.. warning:: The following methods and properties must be implemented by
subclasses:
- :func:`~nupic.encoders.base.Encoder.getDecoderOutputFieldTypes`
- :func:`~nupic.encoders.base.Encoder.getWidth`
- :func:`~nupic.encoders.base.Encoder.encodeIntoArray`
- :func:`~nupic.encoders.base.Encoder.getDescription`
"""
def getWidth(self):
"""Should return the output width, in bits.
:return: (int) output width in bits
"""
raise NotImplementedError()
def encodeIntoArray(self, inputData, output):
"""
Encodes inputData and puts the encoded value into the numpy output array,
which is a 1-D array of length returned by :meth:`.getWidth`.
.. note:: The numpy output array is reused, so clear it before updating it.
:param inputData: Data to encode. This should be validated by the encoder.
:param output: numpy 1-D array of same length returned by
:meth:`.getWidth`.
"""
raise NotImplementedError()
def setLearning(self, learningEnabled):
"""Set whether learning is enabled.
:param learningEnabled: (bool) whether learning should be enabled
"""
# TODO: (#1943) Make sure subclasses don't rely on this and remove it.
# Default behavior should be a noop.
if hasattr(self, "_learningEnabled"):
self._learningEnabled = learningEnabled
def setFieldStats(self, fieldName, fieldStatistics):
"""
This method is called by the model to set the statistics like min and
max for the underlying encoders if this information is available.
:param fieldName: name of the field this encoder is encoding, provided by
:class:`~.nupic.encoders.multi.MultiEncoder`.
:param fieldStatistics: dictionary of dictionaries with the first level being
the fieldname and the second index the statistic ie:
``fieldStatistics['pounds']['min']``
"""
pass
def encode(self, inputData):
"""Convenience wrapper for :meth:`.encodeIntoArray`.
This may be less efficient because it allocates a new numpy array every
call.
:param inputData: input data to be encoded
:return: a numpy array with the encoded representation of inputData
"""
output = numpy.zeros((self.getWidth(),), dtype=defaultDtype)
self.encodeIntoArray(inputData, output)
return output
def getScalarNames(self, parentFieldName=''):
"""
Return the field names for each of the scalar values returned by
getScalars.
:param parentFieldName: The name of the encoder which is our parent. This
name is prefixed to each of the field names within this encoder to
form the keys of the dict() in the retval.
:return: array of field names
"""
names = []
if self.encoders is not None:
for (name, encoder, offset) in self.encoders:
subNames = encoder.getScalarNames(parentFieldName=name)
if parentFieldName != '':
subNames = ['%s.%s' % (parentFieldName, name) for name in subNames]
names.extend(subNames)
else:
if parentFieldName != '':
names.append(parentFieldName)
else:
names.append(self.name)
return names
def getDecoderOutputFieldTypes(self):
"""
Returns a sequence of field types corresponding to the elements in the
decoded output field array. The types are defined by
:class:`~nupic.data.field_meta.FieldMetaType`.
:return: list of :class:`~nupic.data.field_meta.FieldMetaType` objects
"""
if hasattr(self, '_flattenedFieldTypeList') and \
self._flattenedFieldTypeList is not None:
return self._flattenedFieldTypeList
fieldTypes = []
# NOTE: we take care of the composites, but leaf encoders must override
# this method and return a list of one field_meta.FieldMetaType.XXXX
# element corresponding to the encoder's decoder output field type
for (name, encoder, offset) in self.encoders:
subTypes = encoder.getDecoderOutputFieldTypes()
fieldTypes.extend(subTypes)
self._flattenedFieldTypeList = fieldTypes
return fieldTypes
def setStateLock(self,lock):
"""
Setting this to true freezes the state of the encoder
This is separate from the learning state which affects changing parameters.
Implemented in subclasses.
"""
pass
def _getInputValue(self, obj, fieldName):
"""
Gets the value of a given field from the input record
"""
if isinstance(obj, dict):
if not fieldName in obj:
knownFields = ", ".join(
key for key in obj.keys() if not key.startswith("_")
)
raise ValueError(
"Unknown field name '%s' in input record. Known fields are '%s'.\n"
"This could be because input headers are mislabeled, or because "
"input data rows do not contain a value for '%s'." % (
fieldName, knownFields, fieldName
)
)
return obj[fieldName]
else:
return getattr(obj, fieldName)
def getEncoderList(self):
"""
:return: a reference to each sub-encoder in this encoder. They are
returned in the same order as they are for :meth:`.getScalarNames`
and :meth:`.getScalars`.
"""
if hasattr(self, '_flattenedEncoderList') and \
self._flattenedEncoderList is not None:
return self._flattenedEncoderList
encoders = []
if self.encoders is not None:
for (name, encoder, offset) in self.encoders:
subEncoders = encoder.getEncoderList()
encoders.extend(subEncoders)
else:
encoders.append(self)
self._flattenedEncoderList = encoders
return encoders
def getScalars(self, inputData):
"""
Returns a numpy array containing the sub-field scalar value(s) for
each sub-field of the ``inputData``. To get the associated field names for
each of the scalar values, call :meth:`.getScalarNames()`.
For a simple scalar encoder, the scalar value is simply the input unmodified.
For category encoders, it is the scalar representing the category string
that is passed in. For the datetime encoder, the scalar value is the
the number of seconds since epoch.
The intent of the scalar representation of a sub-field is to provide a
baseline for measuring error differences. You can compare the scalar value
of the inputData with the scalar value returned from :meth:`.topDownCompute`
on a top-down representation to evaluate prediction accuracy, for example.
:param inputData: The data from the source. This is typically an object with
members
:return: array of scalar values
"""
retVals = numpy.array([])
if self.encoders is not None:
for (name, encoder, offset) in self.encoders:
values = encoder.getScalars(self._getInputValue(inputData, name))
retVals = numpy.hstack((retVals, values))
else:
retVals = numpy.hstack((retVals, inputData))
return retVals
def getEncodedValues(self, inputData):
"""
Returns the input in the same format as is returned by
:meth:`.topDownCompute`. For most encoder types, this is the same as the
input data. For instance, for scalar and category types, this corresponds to
the numeric and string values, respectively, from the inputs. For datetime
encoders, this returns the list of scalars for each of the sub-fields
(timeOfDay, dayOfWeek, etc.)
This method is essentially the same as :meth:`.getScalars` except that it
returns strings.
:param inputData: The input data in the format it is received from the data
source
:return: A list of values, in the same format and in the same order as they
are returned by :meth:`.topDownCompute`.
"""
retVals = []
if self.encoders is not None:
for name, encoders, offset in self.encoders:
values = encoders.getEncodedValues(self._getInputValue(inputData, name))
if _isSequence(values):
retVals.extend(values)
else:
retVals.append(values)
else:
if _isSequence(inputData):
retVals.extend(inputData)
else:
retVals.append(inputData)
return tuple(retVals)
def getBucketIndices(self, inputData):
"""
Returns an array containing the sub-field bucket indices for each sub-field
of the inputData. To get the associated field names for each of the buckets,
call :meth:`.getScalarNames`.
:param inputData: The data from the source. This is typically an object with
members.
:return: array of bucket indices
"""
retVals = []
if self.encoders is not None:
for (name, encoder, offset) in self.encoders:
values = encoder.getBucketIndices(self._getInputValue(inputData, name))
retVals.extend(values)
else:
assert False, "Should be implemented in base classes that are not " \
"containers for other encoders"
return retVals
def scalarsToStr(self, scalarValues, scalarNames=None):
"""
Return a pretty print string representing the return values from
:meth:`.getScalars` and :meth:`.getScalarNames`.
:param scalarValues: input values to encode to string
:param scalarNames: optional input of scalar names to convert. If None, gets
scalar names from :meth:`.getScalarNames`
:return: string representation of scalar values
"""
if scalarNames is None:
scalarNames = self.getScalarNames()
desc = ''
for (name, value) in zip(scalarNames, scalarValues):
if len(desc) > 0:
desc += ", %s:%.2f" % (name, value)
else:
desc += "%s:%.2f" % (name, value)
return desc
def getDescription(self):
"""
**Must be overridden by subclasses.**
This returns a list of tuples, each containing (``name``, ``offset``).
The ``name`` is a string description of each sub-field, and ``offset`` is
the bit offset of the sub-field for that encoder.
For now, only the 'multi' and 'date' encoders have multiple (name, offset)
pairs. All other encoders have a single pair, where the offset is 0.
:return: list of tuples containing (name, offset)
"""
raise Exception("getDescription must be implemented by all subclasses")
def getFieldDescription(self, fieldName):
"""
Return the offset and length of a given field within the encoded output.
:param fieldName: Name of the field
:return: tuple(``offset``, ``width``) of the field within the encoded output
"""
# Find which field it's in
description = self.getDescription() + [("end", self.getWidth())]
for i in xrange(len(description)):
(name, offset) = description[i]
if (name == fieldName):
break
if i >= len(description)-1:
raise RuntimeError("Field name %s not found in this encoder" % fieldName)
# Return the offset and width
return (offset, description[i+1][1] - offset)
def encodedBitDescription(self, bitOffset, formatted=False):
"""
Return a description of the given bit in the encoded output.
This will include the field name and the offset within the field.
:param bitOffset: Offset of the bit to get the description of
:param formatted: If True, the bitOffset is w.r.t. formatted output,
which includes separators
:return: tuple(``fieldName``, ``offsetWithinField``)
"""
# Find which field it's in
(prevFieldName, prevFieldOffset) = (None, None)
description = self.getDescription()
for i in xrange(len(description)):
(name, offset) = description[i]
if formatted:
offset = offset + i
if bitOffset == offset-1:
prevFieldName = "separator"
prevFieldOffset = bitOffset
break
if bitOffset < offset:
break
(prevFieldName, prevFieldOffset) = (name, offset)
# Return the field name and offset within the field
# return (fieldName, bitOffset - fieldOffset)
width = self.getDisplayWidth() if formatted else self.getWidth()
if prevFieldOffset is None or bitOffset > self.getWidth():
raise IndexError("Bit is outside of allowable range: [0 - %d]" % width)
return (prevFieldName, bitOffset - prevFieldOffset)
def pprintHeader(self, prefix=""):
"""
Pretty-print a header that labels the sub-fields of the encoded
output. This can be used in conjuction with :meth:`.pprint`.
:param prefix: printed before the header if specified
"""
print prefix,
description = self.getDescription() + [("end", self.getWidth())]
for i in xrange(len(description) - 1):
name = description[i][0]
width = description[i+1][1] - description[i][1]
formatStr = "%%-%ds |" % width
if len(name) > width:
pname = name[0:width]
else:
pname = name
print formatStr % pname,
print
print prefix, "-" * (self.getWidth() + (len(description) - 1)*3 - 1)
def pprint(self, output, prefix=""):
"""
Pretty-print the encoded output using ascii art.
:param output: to print
:param prefix: printed before the header if specified
"""
print prefix,
description = self.getDescription() + [("end", self.getWidth())]
for i in xrange(len(description) - 1):
offset = description[i][1]
nextoffset = description[i+1][1]
print "%s |" % bitsToString(output[offset:nextoffset]),
print
def decode(self, encoded, parentFieldName=''):
"""
Takes an encoded output and does its best to work backwards and generate
the input that would have generated it.
In cases where the encoded output contains more ON bits than an input
would have generated, this routine will return one or more ranges of inputs
which, if their encoded outputs were ORed together, would produce the
target output. This behavior makes this method suitable for doing things
like generating a description of a learned coincidence in the SP, which
in many cases might be a union of one or more inputs.
If instead, you want to figure the *most likely* single input scalar value
that would have generated a specific encoded output, use the
:meth:`.topDownCompute` method.
If you want to pretty print the return value from this method, use the
:meth:`.decodedToStr` method.
:param encoded: The encoded output that you want decode
:param parentFieldName: The name of the encoder which is our parent. This name
is prefixed to each of the field names within this encoder to form the
keys of the dict() in the retval.
:return: tuple(``fieldsDict``, ``fieldOrder``)
``fieldsDict`` is a dict() where the keys represent field names
(only 1 if this is a simple encoder, > 1 if this is a multi
or date encoder) and the values are the result of decoding each
field. If there are no bits in encoded that would have been
generated by a field, it won't be present in the dict. The
key of each entry in the dict is formed by joining the passed in
parentFieldName with the child encoder name using a '.'.
Each 'value' in ``fieldsDict`` consists of (ranges, desc), where
ranges is a list of one or more (minVal, maxVal) ranges of
input that would generate bits in the encoded output and 'desc'
is a pretty print description of the ranges. For encoders like
the category encoder, the 'desc' will contain the category
names that correspond to the scalar values included in the
ranges.
``fieldOrder`` is a list of the keys from ``fieldsDict``, in the
same order as the fields appear in the encoded output.
TODO: when we switch to Python 2.7 or 3.x, use OrderedDict
Example retvals for a scalar encoder:
.. code-block:: python
{'amount': ( [[1,3], [7,10]], '1-3, 7-10' )}
{'amount': ( [[2.5,2.5]], '2.5' )}
Example retval for a category encoder:
.. code-block:: python
{'country': ( [[1,1], [5,6]], 'US, GB, ES' )}
Example retval for a multi encoder:
.. code-block:: python
{'amount': ( [[2.5,2.5]], '2.5' ),
'country': ( [[1,1], [5,6]], 'US, GB, ES' )}
"""
fieldsDict = dict()
fieldsOrder = []
# What is the effective parent name?
if parentFieldName == '':
parentName = self.name
else:
parentName = "%s.%s" % (parentFieldName, self.name)
if self.encoders is not None:
# Merge decodings of all child encoders together
for i in xrange(len(self.encoders)):
# Get the encoder and the encoded output
(name, encoder, offset) = self.encoders[i]
if i < len(self.encoders)-1:
nextOffset = self.encoders[i+1][2]
else:
nextOffset = self.width
fieldOutput = encoded[offset:nextOffset]
(subFieldsDict, subFieldsOrder) = encoder.decode(fieldOutput,
parentFieldName=parentName)
fieldsDict.update(subFieldsDict)
fieldsOrder.extend(subFieldsOrder)
return (fieldsDict, fieldsOrder)
def decodedToStr(self, decodeResults):
"""
Return a pretty print string representing the return value from
:meth:`.decode`.
"""
(fieldsDict, fieldsOrder) = decodeResults
desc = ''
for fieldName in fieldsOrder:
(ranges, rangesStr) = fieldsDict[fieldName]
if len(desc) > 0:
desc += ", %s:" % (fieldName)
else:
desc += "%s:" % (fieldName)
desc += "[%s]" % (rangesStr)
return desc
def getBucketValues(self):
"""
**Must be overridden by subclasses.**
Returns a list of items, one for each bucket defined by this encoder.
Each item is the value assigned to that bucket, this is the same as the
:attr:`.EncoderResult.value` that would be returned by
:meth:`.getBucketInfo` for that bucket and is in the same format as the
input that would be passed to :meth:`.encode`.
This call is faster than calling :meth:`.getBucketInfo` on each bucket
individually if all you need are the bucket values.
:return: list of items, each item representing the bucket value for that
bucket.
"""
raise Exception("getBucketValues must be implemented by all subclasses")
def getBucketInfo(self, buckets):
"""
Returns a list of :class:`.EncoderResult` namedtuples describing the inputs
for each sub-field that correspond to the bucket indices passed in
``buckets``. To get the associated field names for each of the values, call
:meth:`.getScalarNames`.
:param buckets: The list of bucket indices, one for each sub-field encoder.
These bucket indices for example may have been retrieved
from the :meth:`.getBucketIndices` call.
:return: A list of :class:`.EncoderResult`.
"""
# Fall back topdown compute
if self.encoders is None:
raise RuntimeError("Must be implemented in sub-class")
# Concatenate the results from bucketInfo on each child encoder
retVals = []
bucketOffset = 0
for i in xrange(len(self.encoders)):
(name, encoder, offset) = self.encoders[i]
if encoder.encoders is not None:
nextBucketOffset = bucketOffset + len(encoder.encoders)
else:
nextBucketOffset = bucketOffset + 1
bucketIndices = buckets[bucketOffset:nextBucketOffset]
values = encoder.getBucketInfo(bucketIndices)
retVals.extend(values)
bucketOffset = nextBucketOffset
return retVals
def topDownCompute(self, encoded):
"""
Returns a list of :class:`.EncoderResult` namedtuples describing the
top-down best guess inputs for each sub-field given the encoded output.
These are the values which are most likely to generate the given encoded
output. To get the associated field names for each of the values, call
:meth:`.getScalarNames`.
:param encoded: The encoded output. Typically received from the topDown
outputs from the spatial pooler just above us.
:return: A list of :class:`.EncoderResult`
"""
# Fallback topdown compute
if self.encoders is None:
raise RuntimeError("Must be implemented in sub-class")
# Concatenate the results from topDownCompute on each child encoder
retVals = []
for i in xrange(len(self.encoders)):
(name, encoder, offset) = self.encoders[i]
if i < len(self.encoders)-1:
nextOffset = self.encoders[i+1][2]
else:
nextOffset = self.width
fieldOutput = encoded[offset:nextOffset]
values = encoder.topDownCompute(fieldOutput)
if _isSequence(values):
retVals.extend(values)
else:
retVals.append(values)
return retVals
def closenessScores(self, expValues, actValues, fractional=True):
"""
Compute closeness scores between the expected scalar value(s) and actual
scalar value(s). The expected scalar values are typically those obtained
from the :meth:`.getScalars` method. The actual scalar values are typically
those returned from :meth:`.topDownCompute`.
This method returns one closeness score for each value in expValues (or
actValues which must be the same length). The closeness score ranges from
0 to 1.0, 1.0 being a perfect match and 0 being the worst possible match.
If this encoder is a simple, single field encoder, then it will expect
just 1 item in each of the ``expValues`` and ``actValues`` arrays.
Multi-encoders will expect 1 item per sub-encoder.
Each encoder type can define it's own metric for closeness. For example,
a category encoder may return either 1 or 0, if the scalar matches exactly
or not. A scalar encoder might return a percentage match, etc.
:param expValues: Array of expected scalar values, typically obtained from
:meth:`.getScalars`
:param actValues: Array of actual values, typically obtained from
:meth:`.topDownCompute`
:return: Array of closeness scores, one per item in expValues (or
actValues).
"""
# Fallback closenss is a percentage match
if self.encoders is None:
err = abs(expValues[0] - actValues[0])
if fractional:
denom = max(expValues[0], actValues[0])
if denom == 0:
denom = 1.0
closeness = 1.0 - float(err)/denom
if closeness < 0:
closeness = 0
else:
closeness = err
return numpy.array([closeness])
# Concatenate the results from closeness scores on each child encoder
scalarIdx = 0
retVals = numpy.array([])
for (name, encoder, offset) in self.encoders:
values = encoder.closenessScores(expValues[scalarIdx:], actValues[scalarIdx:],
fractional=fractional)
scalarIdx += len(values)
retVals = numpy.hstack((retVals, values))
return retVals
def getDisplayWidth(self):
"""
Calculate width of display for bits plus blanks between fields.
:return: (int) width of display for bits plus blanks between fields
"""
width = self.getWidth() + len(self.getDescription()) - 1
return width
| 26,422 | Python | .py | 580 | 38.860345 | 84 | 0.681024 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,988 | coordinate.py | numenta_nupic-legacy/src/nupic/encoders/coordinate.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import hashlib
import itertools
import numpy
from nupic.bindings.math import Random
from nupic.encoders.base import Encoder
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.coordinate_capnp import CoordinateEncoderProto
class CoordinateEncoder(Encoder):
"""
Given a coordinate in an N-dimensional space, and a radius around
that coordinate, the Coordinate Encoder returns an SDR representation
of that position.
The Coordinate Encoder uses an N-dimensional integer coordinate space.
For example, a valid coordinate in this space is (150, -49, 58), whereas
an invalid coordinate would be (55.4, -5, 85.8475).
It uses the following algorithm:
1. Find all the coordinates around the input coordinate, within the
specified radius.
2. For each coordinate, use a uniform hash function to
deterministically map it to a real number between 0 and 1. This is the
"order" of the coordinate.
3. Of these coordinates, pick the top W by order, where W is the
number of active bits desired in the SDR.
4. For each of these W coordinates, use a uniform hash function to
deterministically map it to one of the bits in the SDR. Make this bit
active.
5. This results in a final SDR with exactly W bits active (barring chance hash
collisions).
"""
def __init__(self, w=21, n=1000, name=None, verbosity=0):
# Validate inputs
if (w <= 0) or (w % 2 == 0):
raise ValueError("w must be an odd positive integer")
if (n <= 6 * w) or (not isinstance(n, int)):
raise ValueError("n must be an int strictly greater than 6*w. For "
"good results we recommend n be strictly greater "
"than 11*w")
self.w = w
self.n = n
self.verbosity = verbosity
self.encoders = None
if name is None:
name = "[%s:%s]" % (self.n, self.w)
self.name = name
def getWidth(self):
"""See `nupic.encoders.base.Encoder` for more information."""
return self.n
def getDescription(self):
"""See `nupic.encoders.base.Encoder` for more information."""
return [('coordinate', 0), ('radius', 1)]
def getScalars(self, inputData):
"""See `nupic.encoders.base.Encoder` for more information."""
return numpy.array([0]*len(inputData))
def encodeIntoArray(self, inputData, output):
"""
See `nupic.encoders.base.Encoder` for more information.
@param inputData (tuple) Contains coordinate (numpy.array, N-dimensional
integer coordinate) and radius (int)
@param output (numpy.array) Stores encoded SDR in this numpy array
"""
(coordinate, radius) = inputData
assert isinstance(radius, int), ("Expected integer radius, got: {} ({})"
.format(radius, type(radius)))
neighbors = self._neighbors(coordinate, radius)
winners = self._topWCoordinates(neighbors, self.w)
bitFn = lambda coordinate: self._bitForCoordinate(coordinate, self.n)
indices = numpy.array([bitFn(w) for w in winners])
output[:] = 0
output[indices] = 1
@staticmethod
def _neighbors(coordinate, radius):
"""
Returns coordinates around given coordinate, within given radius.
Includes given coordinate.
@param coordinate (numpy.array) N-dimensional integer coordinate
@param radius (int) Radius around `coordinate`
@return (numpy.array) List of coordinates
"""
ranges = (xrange(n-radius, n+radius+1) for n in coordinate.tolist())
return numpy.array(list(itertools.product(*ranges)))
@classmethod
def _topWCoordinates(cls, coordinates, w):
"""
Returns the top W coordinates by order.
@param coordinates (numpy.array) A 2D numpy array, where each element
is a coordinate
@param w (int) Number of top coordinates to return
@return (numpy.array) A subset of `coordinates`, containing only the
top ones by order
"""
orders = numpy.array([cls._orderForCoordinate(c)
for c in coordinates.tolist()])
indices = numpy.argsort(orders)[-w:]
return coordinates[indices]
@staticmethod
def _hashCoordinate(coordinate):
"""Hash a coordinate to a 64 bit integer."""
coordinateStr = ",".join(str(v) for v in coordinate)
# Compute the hash and convert to 64 bit int.
hash = int(int(hashlib.md5(coordinateStr).hexdigest(), 16) % (2 ** 64))
return hash
@classmethod
def _orderForCoordinate(cls, coordinate):
"""
Returns the order for a coordinate.
@param coordinate (numpy.array) Coordinate
@return (float) A value in the interval [0, 1), representing the
order of the coordinate
"""
seed = cls._hashCoordinate(coordinate)
rng = Random(seed)
return rng.getReal64()
@classmethod
def _bitForCoordinate(cls, coordinate, n):
"""
Maps the coordinate to a bit in the SDR.
@param coordinate (numpy.array) Coordinate
@param n (int) The number of available bits in the SDR
@return (int) The index to a bit in the SDR
"""
seed = cls._hashCoordinate(coordinate)
rng = Random(seed)
return rng.getUInt32(n)
def __str__(self):
string = "CoordinateEncoder:"
string += "\n w: {w}".format(w=self.w)
string += "\n n: {n}".format(n=self.n)
return string
@classmethod
def getSchema(cls):
return CoordinateEncoderProto
@classmethod
def read(cls, proto):
encoder = object.__new__(cls)
encoder.w = proto.w
encoder.n = proto.n
encoder.verbosity = proto.verbosity
encoder.name = proto.name
encoder.encoders = None
return encoder
def write(self, proto):
proto.w = self.w
proto.n = self.n
proto.verbosity = self.verbosity
proto.name = self.name
| 6,832 | Python | .py | 169 | 35.076923 | 80 | 0.676444 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,989 | random_distributed_scalar.py | numenta_nupic-legacy/src/nupic/encoders/random_distributed_scalar.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import math
import numbers
import sys
import numpy
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.data.field_meta import FieldMetaType
from nupic.encoders.base import Encoder
from nupic.bindings.math import Random as NupicRandom
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.random_distributed_scalar_capnp import (
RandomDistributedScalarEncoderProto)
INITIAL_BUCKETS = 1000
class RandomDistributedScalarEncoder(Encoder):
"""
A scalar encoder encodes a numeric (floating point) value into an array
of bits.
This class maps a scalar value into a random distributed representation that
is suitable as scalar input into the spatial pooler. The encoding scheme is
designed to replace a simple ScalarEncoder. It preserves the important
properties around overlapping representations. Unlike ScalarEncoder the min
and max range can be dynamically increased without any negative effects. The
only required parameter is resolution, which determines the resolution of
input values.
Scalar values are mapped to a bucket. The class maintains a random distributed
encoding for each bucket. The following properties are maintained by
RandomDistributedEncoder:
1) Similar scalars should have high overlap. Overlap should decrease smoothly
as scalars become less similar. Specifically, neighboring bucket indices must
overlap by a linearly decreasing number of bits.
2) Dissimilar scalars should have very low overlap so that the SP does not
confuse representations. Specifically, buckets that are more than w indices
apart should have at most maxOverlap bits of overlap. We arbitrarily (and
safely) define "very low" to be 2 bits of overlap or lower.
Properties 1 and 2 lead to the following overlap rules for buckets i and j:
.. code-block:: python
If abs(i-j) < w then:
overlap(i,j) = w - abs(i-j)
else:
overlap(i,j) <= maxOverlap
3) The representation for a scalar must not change during the lifetime of
the object. Specifically, as new buckets are created and the min/max range
is extended, the representation for previously in-range sscalars and
previously created buckets must not change.
:param resolution: A floating point positive number denoting the resolution
of the output representation. Numbers within
[offset-resolution/2, offset+resolution/2] will fall into
the same bucket and thus have an identical representation.
Adjacent buckets will differ in one bit. resolution is a
required parameter.
:param w: Number of bits to set in output. w must be odd to avoid centering
problems. w must be large enough that spatial pooler
columns will have a sufficiently large overlap to avoid
false matches. A value of w=21 is typical.
:param n: Number of bits in the representation (must be > w). n must be
large enough such that there is enough room to select
new representations as the range grows. With w=21 a value
of n=400 is typical. The class enforces n > 6*w.
:param name: An optional string which will become part of the description.
:param offset: A floating point offset used to map scalar inputs to bucket
indices. The middle bucket will correspond to numbers in the
range [offset - resolution/2, offset + resolution/2). If set
to None, the very first input that is encoded will be used
to determine the offset.
:param seed: The seed used for numpy's random number generator. If set to -1
the generator will be initialized without a fixed seed.
:param verbosity: An integer controlling the level of debugging output. A
value of 0 implies no output. verbosity=1 may lead to
one-time printouts during construction, serialization or
deserialization. verbosity=2 may lead to some output per
encode operation. verbosity>2 may lead to significantly
more output.
"""
def __init__(self, resolution, w=21, n=400, name=None, offset=None,
seed=42, verbosity=0):
# Validate inputs
if (w <= 0) or (w%2 == 0):
raise ValueError("w must be an odd positive integer")
if resolution <= 0:
raise ValueError("resolution must be a positive number")
if (n <= 6*w) or (not isinstance(n, int)):
raise ValueError("n must be an int strictly greater than 6*w. For "
"good results we recommend n be strictly greater "
"than 11*w")
self.encoders = None
self.verbosity = verbosity
self.w = w
self.n = n
self.resolution = float(resolution)
# The largest overlap we allow for non-adjacent encodings
self._maxOverlap = 2
# initialize the random number generators
self._seed(seed)
# Internal parameters for bucket mapping
self.minIndex = None
self.maxIndex = None
self._offset = None
self._initializeBucketMap(INITIAL_BUCKETS, offset)
# A name used for debug printouts
if name is not None:
self.name = name
else:
self.name = "[%s]" % (self.resolution)
if self.verbosity > 0:
print(self)
def __setstate__(self, state):
self.__dict__.update(state)
# Initialize self.random as an instance of NupicRandom derived from the
# previous numpy random state
randomState = state["random"]
if isinstance(randomState, numpy.random.mtrand.RandomState):
self.random = NupicRandom(randomState.randint(sys.maxint))
def _seed(self, seed=-1):
"""
Initialize the random seed
"""
if seed != -1:
self.random = NupicRandom(seed)
else:
self.random = NupicRandom()
def getDecoderOutputFieldTypes(self):
""" See method description in base.py """
return (FieldMetaType.float, )
def getWidth(self):
""" See method description in base.py """
return self.n
def getDescription(self):
return [(self.name, 0)]
def getBucketIndices(self, x):
""" See method description in base.py """
if ((isinstance(x, float) and math.isnan(x)) or
x == SENTINEL_VALUE_FOR_MISSING_DATA):
return [None]
if self._offset is None:
self._offset = x
bucketIdx = (
(self._maxBuckets/2) + int(round((x - self._offset) / self.resolution))
)
if bucketIdx < 0:
bucketIdx = 0
elif bucketIdx >= self._maxBuckets:
bucketIdx = self._maxBuckets-1
return [bucketIdx]
def mapBucketIndexToNonZeroBits(self, index):
"""
Given a bucket index, return the list of non-zero bits. If the bucket
index does not exist, it is created. If the index falls outside our range
we clip it.
:param index The bucket index to get non-zero bits for.
@returns numpy array of indices of non-zero bits for specified index.
"""
if index < 0:
index = 0
if index >= self._maxBuckets:
index = self._maxBuckets-1
if not self.bucketMap.has_key(index):
if self.verbosity >= 2:
print "Adding additional buckets to handle index=", index
self._createBucket(index)
return self.bucketMap[index]
def encodeIntoArray(self, x, output):
""" See method description in base.py """
if x is not None and not isinstance(x, numbers.Number):
raise TypeError(
"Expected a scalar input but got input of type %s" % type(x))
# Get the bucket index to use
bucketIdx = self.getBucketIndices(x)[0]
# None is returned for missing value in which case we return all 0's.
output[0:self.n] = 0
if bucketIdx is not None:
output[self.mapBucketIndexToNonZeroBits(bucketIdx)] = 1
def _createBucket(self, index):
"""
Create the given bucket index. Recursively create as many in-between
bucket indices as necessary.
"""
if index < self.minIndex:
if index == self.minIndex - 1:
# Create a new representation that has exactly w-1 overlapping bits
# as the min representation
self.bucketMap[index] = self._newRepresentation(self.minIndex,
index)
self.minIndex = index
else:
# Recursively create all the indices above and then this index
self._createBucket(index+1)
self._createBucket(index)
else:
if index == self.maxIndex + 1:
# Create a new representation that has exactly w-1 overlapping bits
# as the max representation
self.bucketMap[index] = self._newRepresentation(self.maxIndex,
index)
self.maxIndex = index
else:
# Recursively create all the indices below and then this index
self._createBucket(index-1)
self._createBucket(index)
def _newRepresentation(self, index, newIndex):
"""
Return a new representation for newIndex that overlaps with the
representation at index by exactly w-1 bits
"""
newRepresentation = self.bucketMap[index].copy()
# Choose the bit we will replace in this representation. We need to shift
# this bit deterministically. If this is always chosen randomly then there
# is a 1 in w chance of the same bit being replaced in neighboring
# representations, which is fairly high
ri = newIndex % self.w
# Now we choose a bit such that the overlap rules are satisfied.
newBit = self.random.getUInt32(self.n)
newRepresentation[ri] = newBit
while newBit in self.bucketMap[index] or \
not self._newRepresentationOK(newRepresentation, newIndex):
self.numTries += 1
newBit = self.random.getUInt32(self.n)
newRepresentation[ri] = newBit
return newRepresentation
def _newRepresentationOK(self, newRep, newIndex):
"""
Return True if this new candidate representation satisfies all our overlap
rules. Since we know that neighboring representations differ by at most
one bit, we compute running overlaps.
"""
if newRep.size != self.w:
return False
if (newIndex < self.minIndex-1) or (newIndex > self.maxIndex+1):
raise ValueError("newIndex must be within one of existing indices")
# A binary representation of newRep. We will use this to test containment
newRepBinary = numpy.array([False]*self.n)
newRepBinary[newRep] = True
# Midpoint
midIdx = self._maxBuckets/2
# Start by checking the overlap at minIndex
runningOverlap = self._countOverlap(self.bucketMap[self.minIndex], newRep)
if not self._overlapOK(self.minIndex, newIndex, overlap=runningOverlap):
return False
# Compute running overlaps all the way to the midpoint
for i in range(self.minIndex+1, midIdx+1):
# This is the bit that is going to change
newBit = (i-1)%self.w
# Update our running overlap
if newRepBinary[self.bucketMap[i-1][newBit]]:
runningOverlap -= 1
if newRepBinary[self.bucketMap[i][newBit]]:
runningOverlap += 1
# Verify our rules
if not self._overlapOK(i, newIndex, overlap=runningOverlap):
return False
# At this point, runningOverlap contains the overlap for midIdx
# Compute running overlaps all the way to maxIndex
for i in range(midIdx+1, self.maxIndex+1):
# This is the bit that is going to change
newBit = i%self.w
# Update our running overlap
if newRepBinary[self.bucketMap[i-1][newBit]]:
runningOverlap -= 1
if newRepBinary[self.bucketMap[i][newBit]]:
runningOverlap += 1
# Verify our rules
if not self._overlapOK(i, newIndex, overlap=runningOverlap):
return False
return True
def _countOverlapIndices(self, i, j):
"""
Return the overlap between bucket indices i and j
"""
if self.bucketMap.has_key(i) and self.bucketMap.has_key(j):
iRep = self.bucketMap[i]
jRep = self.bucketMap[j]
return self._countOverlap(iRep, jRep)
else:
raise ValueError("Either i or j don't exist")
@staticmethod
def _countOverlap(rep1, rep2):
"""
Return the overlap between two representations. rep1 and rep2 are lists of
non-zero indices.
"""
overlap = 0
for e in rep1:
if e in rep2:
overlap += 1
return overlap
def _overlapOK(self, i, j, overlap=None):
"""
Return True if the given overlap between bucket indices i and j are
acceptable. If overlap is not specified, calculate it from the bucketMap
"""
if overlap is None:
overlap = self._countOverlapIndices(i, j)
if abs(i-j) < self.w:
if overlap == (self.w - abs(i-j)):
return True
else:
return False
else:
if overlap <= self._maxOverlap:
return True
else:
return False
def _initializeBucketMap(self, maxBuckets, offset):
"""
Initialize the bucket map assuming the given number of maxBuckets.
"""
# The first bucket index will be _maxBuckets / 2 and bucket indices will be
# allowed to grow lower or higher as long as they don't become negative.
# _maxBuckets is required because the current SDR Classifier assumes bucket
# indices must be non-negative. This normally does not need to be changed
# but if altered, should be set to an even number.
self._maxBuckets = maxBuckets
self.minIndex = self._maxBuckets / 2
self.maxIndex = self._maxBuckets / 2
# The scalar offset used to map scalar values to bucket indices. The middle
# bucket will correspond to numbers in the range
# [offset-resolution/2, offset+resolution/2).
# The bucket index for a number x will be:
# maxBuckets/2 + int( round( (x-offset)/resolution ) )
self._offset = offset
# This dictionary maps a bucket index into its bit representation
# We initialize the class with a single bucket with index 0
self.bucketMap = {}
def _permutation(n):
r = numpy.arange(n, dtype=numpy.uint32)
self.random.shuffle(r)
return r
self.bucketMap[self.minIndex] = _permutation(self.n)[0:self.w]
# How often we need to retry when generating valid encodings
self.numTries = 0
def __str__(self):
string = "RandomDistributedScalarEncoder:"
string += "\n minIndex: {min}".format(min = self.minIndex)
string += "\n maxIndex: {max}".format(max = self.maxIndex)
string += "\n w: {w}".format(w = self.w)
string += "\n n: {width}".format(width = self.getWidth())
string += "\n resolution: {res}".format(res = self.resolution)
string += "\n offset: {offset}".format(offset = str(self._offset))
string += "\n numTries: {tries}".format(tries = self.numTries)
string += "\n name: {name}".format(name = self.name)
if self.verbosity > 2:
string += "\n All buckets: "
string += "\n "
string += str(self.bucketMap)
return string
@classmethod
def getSchema(cls):
return RandomDistributedScalarEncoderProto
@classmethod
def read(cls, proto):
encoder = object.__new__(cls)
encoder.resolution = proto.resolution
encoder.w = proto.w
encoder.n = proto.n
encoder.name = proto.name
if proto.offset.which() == "none":
encoder._offset = None
else:
encoder._offset = proto.offset.value
encoder.random = NupicRandom()
encoder.random.read(proto.random)
encoder.resolution = proto.resolution
encoder.verbosity = proto.verbosity
encoder.minIndex = proto.minIndex
encoder.maxIndex = proto.maxIndex
encoder.encoders = None
encoder._maxBuckets = INITIAL_BUCKETS
encoder._maxOverlap = proto.maxOverlap or 0
encoder.numTries = proto.numTries or 0
encoder.bucketMap = {x.key: numpy.array(x.value, dtype=numpy.uint32)
for x in proto.bucketMap}
return encoder
def write(self, proto):
proto.resolution = self.resolution
proto.w = self.w
proto.n = self.n
proto.name = self.name
if self._offset is None:
proto.offset.none = None
else:
proto.offset.value = self._offset
self.random.write(proto.random)
proto.verbosity = self.verbosity
proto.minIndex = self.minIndex
proto.maxIndex = self.maxIndex
proto.bucketMap = [{"key": key, "value": value.tolist()}
for key, value in self.bucketMap.items()]
proto.numTries = self.numTries
proto.maxOverlap = self._maxOverlap
| 17,662 | Python | .py | 411 | 36.515815 | 80 | 0.680448 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,990 | geospatial_coordinate.py | numenta_nupic-legacy/src/nupic/encoders/geospatial_coordinate.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import math
import numpy
from pyproj import Proj, transform
from nupic.encoders import CoordinateEncoder
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.geospatial_coordinate_capnp import GeospatialCoordinateEncoderProto
# From http://spatialreference.org/ref/epsg/popular-visualisation-crs-mercator/
PROJ = Proj(init="epsg:3785") # Spherical Mercator
PROJ_RANGE=(20037508.3428, 19971868.8804) # in meters
# See http://gis.stackexchange.com/a/73829/41082
geocentric = Proj('+proj=geocent +datum=WGS84 +units=m +no_defs')
class GeospatialCoordinateEncoder(CoordinateEncoder):
"""
Given a GPS coordinate and a speed reading, the
Geospatial Coordinate Encoder returns an SDR representation
of that position.
:param: scale (int) Scale of the map, as measured by distance between two
coordinates (in meters per dimensional unit)
:param: timestep (int) Time between readings (in seconds)
"""
def __init__(self,
scale,
timestep,
w=21,
n=1000,
name=None,
verbosity=0):
super(GeospatialCoordinateEncoder, self).__init__(w=w,
n=n,
name=name,
verbosity=verbosity)
self.scale = scale
self.timestep = timestep
def getDescription(self):
"""See `nupic.encoders.base.Encoder` for more information."""
return [('speed', 0), ('longitude', 1), ('latitude', 2), ('altitude', 3)]
def getScalars(self, inputData):
"""See `nupic.encoders.base.Encoder` for more information."""
return numpy.array([0] * len(self.getDescription()))
def encodeIntoArray(self, inputData, output):
"""
See `nupic.encoders.base.Encoder` for more information.
:param: inputData (tuple) Contains speed (float), longitude (float),
latitude (float), altitude (float)
:param: output (numpy.array) Stores encoded SDR in this numpy array
"""
altitude = None
if len(inputData) == 4:
(speed, longitude, latitude, altitude) = inputData
else:
(speed, longitude, latitude) = inputData
coordinate = self.coordinateForPosition(longitude, latitude, altitude)
radius = self.radiusForSpeed(speed)
super(GeospatialCoordinateEncoder, self).encodeIntoArray(
(coordinate, radius), output)
def coordinateForPosition(self, longitude, latitude, altitude=None):
"""
Returns coordinate for given GPS position.
:param: longitude (float) Longitude of position
:param: latitude (float) Latitude of position
:param: altitude (float) Altitude of position
:returns: (numpy.array) Coordinate that the given GPS position
maps to
"""
coords = PROJ(longitude, latitude)
if altitude is not None:
coords = transform(PROJ, geocentric, coords[0], coords[1], altitude)
coordinate = numpy.array(coords)
coordinate = coordinate / self.scale
return coordinate.astype(int)
def radiusForSpeed(self, speed):
"""
Returns radius for given speed.
Tries to get the encodings of consecutive readings to be
adjacent with some overlap.
:param: speed (float) Speed (in meters per second)
:returns: (int) Radius for given speed
"""
overlap = 1.5
coordinatesPerTimestep = speed * self.timestep / self.scale
radius = int(round(float(coordinatesPerTimestep) / 2 * overlap))
minRadius = int(math.ceil((math.sqrt(self.w) - 1) / 2))
return max(radius, minRadius)
def __str__(self):
string = "GeospatialCoordinateEncoder:"
string += "\n w: {w}".format(w=self.w)
string += "\n n: {n}".format(n=self.n)
return string
@classmethod
def getSchema(cls):
return GeospatialCoordinateEncoderProto
@classmethod
def read(cls, proto):
encoder = super(GeospatialCoordinateEncoder, cls).read(proto)
encoder.scale = proto.scale
encoder.timestep = proto.timestep
return encoder
def write(self, proto):
super(GeospatialCoordinateEncoder, self).write(proto)
proto.scale = self.scale
proto.timestep = self.timestep
| 5,240 | Python | .py | 125 | 35.832 | 89 | 0.671786 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,991 | logarithm.py | numenta_nupic-legacy/src/nupic/encoders/logarithm.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import math
import numpy
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.data.field_meta import FieldMetaType
from nupic.encoders.base import Encoder, EncoderResult
from nupic.encoders import ScalarEncoder
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.logarithm_capnp import LogEncoderProto
EPSILON_ROUND = 7 # Used to round floats
class LogEncoder(Encoder):
"""
This class wraps the :class:`.ScalarEncoder`.
A Log encoder represents a floating point value on a logarithmic scale.
.. code-block:: python
valueToEncode = log10(input)
:param resolution: The minimum change in scaled value needed to produce a
change in encoding. This should be specified in log space.
For example, the scaled values 10 and 11 will be
distinguishable in the output. In terms of the original
input values, this means 10^1 (1) and 10^1.1 (1.25) will be
distinguishable.
:param radius: inputs separated by more than this distance in log space will
have non-overlapping representations
"""
def __init__(self,
w=5,
minval=1e-07,
maxval=10000,
periodic=False,
n=0,
radius=0,
resolution=0,
name="log",
verbosity=0,
clipInput=True,
forced=False):
# Lower bound for log encoding near machine precision limit
lowLimit = 1e-07
# Limit minval as log10(0) is undefined.
if minval < lowLimit:
minval = lowLimit
# Check that minval is still lower than maxval
if not minval < maxval:
raise ValueError("Max val must be larger than min val or the lower limit "
"for this encoder %.7f" % lowLimit)
self.encoders = None
self.verbosity = verbosity
# Scale values for calculations within the class
self.minScaledValue = math.log10(minval)
self.maxScaledValue = math.log10(maxval)
if not self.maxScaledValue > self.minScaledValue:
raise ValueError("Max val must be larger, in log space, than min val.")
self.clipInput = clipInput
self.minval = minval
self.maxval = maxval
self.encoder = ScalarEncoder(w=w,
minval=self.minScaledValue,
maxval=self.maxScaledValue,
periodic=False,
n=n,
radius=radius,
resolution=resolution,
verbosity=self.verbosity,
clipInput=self.clipInput,
forced=forced)
self.width = self.encoder.getWidth()
self.description = [(name, 0)]
self.name = name
# This list is created by getBucketValues() the first time it is called,
# and re-created whenever our buckets would be re-arranged.
self._bucketValues = None
def getWidth(self):
return self.width
def getDescription(self):
return self.description
def getDecoderOutputFieldTypes(self):
"""
Encoder class virtual method override
"""
return (FieldMetaType.float, )
def _getScaledValue(self, inpt):
"""
Convert the input, which is in normal space, into log space
"""
if inpt == SENTINEL_VALUE_FOR_MISSING_DATA:
return None
else:
val = inpt
if val < self.minval:
val = self.minval
elif val > self.maxval:
val = self.maxval
scaledVal = math.log10(val)
return scaledVal
def getBucketIndices(self, inpt):
"""
See the function description in base.py
"""
# Get the scaled value
scaledVal = self._getScaledValue(inpt)
if scaledVal is None:
return [None]
else:
return self.encoder.getBucketIndices(scaledVal)
def encodeIntoArray(self, inpt, output):
"""
See the function description in base.py
"""
# Get the scaled value
scaledVal = self._getScaledValue(inpt)
if scaledVal is None:
output[0:] = 0
else:
self.encoder.encodeIntoArray(scaledVal, output)
if self.verbosity >= 2:
print "input:", inpt, "scaledVal:", scaledVal, "output:", output
print "decoded:", self.decodedToStr(self.decode(output))
def decode(self, encoded, parentFieldName=''):
"""
See the function description in base.py
"""
# Get the scalar values from the underlying scalar encoder
(fieldsDict, fieldNames) = self.encoder.decode(encoded)
if len(fieldsDict) == 0:
return (fieldsDict, fieldNames)
# Expect only 1 field
assert(len(fieldsDict) == 1)
# Convert each range into normal space
(inRanges, inDesc) = fieldsDict.values()[0]
outRanges = []
for (minV, maxV) in inRanges:
outRanges.append((math.pow(10, minV),
math.pow(10, maxV)))
# Generate a text description of the ranges
desc = ""
numRanges = len(outRanges)
for i in xrange(numRanges):
if outRanges[i][0] != outRanges[i][1]:
desc += "%.2f-%.2f" % (outRanges[i][0], outRanges[i][1])
else:
desc += "%.2f" % (outRanges[i][0])
if i < numRanges-1:
desc += ", "
# Return result
if parentFieldName != '':
fieldName = "%s.%s" % (parentFieldName, self.name)
else:
fieldName = self.name
return ({fieldName: (outRanges, desc)}, [fieldName])
def getBucketValues(self):
"""
See the function description in base.py
"""
# Need to re-create?
if self._bucketValues is None:
scaledValues = self.encoder.getBucketValues()
self._bucketValues = []
for scaledValue in scaledValues:
value = math.pow(10, scaledValue)
self._bucketValues.append(value)
return self._bucketValues
def getBucketInfo(self, buckets):
"""
See the function description in base.py
"""
scaledResult = self.encoder.getBucketInfo(buckets)[0]
scaledValue = scaledResult.value
value = math.pow(10, scaledValue)
return [EncoderResult(value=value, scalar=value,
encoding = scaledResult.encoding)]
def topDownCompute(self, encoded):
"""
See the function description in base.py
"""
scaledResult = self.encoder.topDownCompute(encoded)[0]
scaledValue = scaledResult.value
value = math.pow(10, scaledValue)
return EncoderResult(value=value, scalar=value,
encoding = scaledResult.encoding)
def closenessScores(self, expValues, actValues, fractional=True):
"""
See the function description in base.py
"""
# Compute the percent error in log space
if expValues[0] > 0:
expValue = math.log10(expValues[0])
else:
expValue = self.minScaledValue
if actValues [0] > 0:
actValue = math.log10(actValues[0])
else:
actValue = self.minScaledValue
if fractional:
err = abs(expValue - actValue)
pctErr = err / (self.maxScaledValue - self.minScaledValue)
pctErr = min(1.0, pctErr)
closeness = 1.0 - pctErr
else:
err = abs(expValue - actValue)
closeness = err
#print "log::", "expValue:", expValues[0], "actValue:", actValues[0], \
# "closeness", closeness
#import pdb; pdb.set_trace()
return numpy.array([closeness])
@classmethod
def getSchema(cls):
return LogEncoderProto
@classmethod
def read(cls, proto):
encoder = object.__new__(cls)
encoder.verbosity = proto.verbosity
encoder.minScaledValue = round(proto.minScaledValue, EPSILON_ROUND)
encoder.maxScaledValue = round(proto.maxScaledValue, EPSILON_ROUND)
encoder.clipInput = proto.clipInput
encoder.minval = round(proto.minval, EPSILON_ROUND)
encoder.maxval = round(proto.maxval, EPSILON_ROUND)
encoder.encoder = ScalarEncoder.read(proto.encoder)
encoder.name = proto.name
encoder.width = encoder.encoder.getWidth()
encoder.description = [(encoder.name, 0)]
encoder._bucketValues = None
encoder.encoders = None
return encoder
def write(self, proto):
proto.verbosity = self.verbosity
proto.minScaledValue = self.minScaledValue
proto.maxScaledValue = self.maxScaledValue
proto.clipInput = self.clipInput
proto.minval = self.minval
proto.maxval = self.maxval
self.encoder.write(proto.encoder)
proto.name = self.name
| 9,518 | Python | .py | 256 | 30.125 | 80 | 0.648434 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,992 | sparse_pass_through.py | numenta_nupic-legacy/src/nupic/encoders/sparse_pass_through.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy
from nupic.encoders import pass_through
class SparsePassThroughEncoder(pass_through.PassThroughEncoder):
"""
Convert a bitmap encoded as array indices to an SDR.
Each encoding is an SDR in which ``w`` out of ``n`` bits are turned on.
The input should be an array or string of indices to turn on.
**Note:** the value for ``n`` must equal input length * w, for example:
.. code-block:: python
for n=8 w=1 [0,2,5] => 101001000
or:
.. code-block:: python
for n=8 w=1 "0,2,5" => 101001000
and:
.. code-block:: python
for n=24 w=3 [0,2,5] => 111000111000000111000000000
or:
.. code-block:: python
for n=24 w=3 "0,2,5" => 111000111000000111000000000
"""
def __init__(self, n, w=None, name="sparse_pass_through", forced=False, verbosity=0):
"""
n is the total bits in input
w is the number of bits used to encode each input bit
"""
super(SparsePassThroughEncoder, self).__init__(
n, w, name, forced, verbosity)
def encodeIntoArray(self, value, output):
""" See method description in base.py """
denseInput = numpy.zeros(output.shape)
try:
denseInput[value] = 1
except IndexError:
if isinstance(value, numpy.ndarray):
raise ValueError(
"Numpy array must have integer dtype but got {}".format(
value.dtype))
raise
super(SparsePassThroughEncoder, self).encodeIntoArray(denseInput, output)
| 2,444 | Python | .py | 59 | 37.508475 | 87 | 0.669771 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,993 | multi.py | numenta_nupic-legacy/src/nupic/encoders/multi.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from nupic.encoders.base import Encoder
from nupic.encoders import (ScalarEncoder,
AdaptiveScalarEncoder,
DateEncoder,LogEncoder,
CategoryEncoder,
SDRCategoryEncoder,
DeltaEncoder,
ScalarSpaceEncoder,
PassThroughEncoder,
SparsePassThroughEncoder,
CoordinateEncoder,
GeospatialCoordinateEncoder,
RandomDistributedScalarEncoder)
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.multi_capnp import MultiEncoderProto
# Map class to Cap'n Proto schema union attribute
_CLASS_ATTR_MAP = {
ScalarEncoder: "scalarEncoder",
AdaptiveScalarEncoder: "adaptiveScalarEncoder",
DateEncoder: "dateEncoder",
LogEncoder: "logEncoder",
CategoryEncoder: "categoryEncoder",
CoordinateEncoder: "coordinateEncoder",
SDRCategoryEncoder: "sdrCategoryEncoder",
DeltaEncoder: "deltaEncoder",
PassThroughEncoder: "passThroughEncoder",
SparsePassThroughEncoder: "sparsePassThroughEncoder",
GeospatialCoordinateEncoder: "geospatialCoordinateEncoder",
ScalarSpaceEncoder: "scalarSpaceEncoder",
RandomDistributedScalarEncoder: "randomDistributedScalarEncoder"
}
# Invert for fast lookup in MultiEncoder.read()
_ATTR_CLASS_MAP = {value: key for key, value in _CLASS_ATTR_MAP.items()}
class MultiEncoder(Encoder):
"""
A MultiEncoder encodes a dictionary or object with multiple components. A
MultiEncoder contains a number of sub-encoders, each of which encodes a
separate component.
:param encoderDefinitions: a dict of dicts, mapping field names to the field
params dict. Sent directly to :meth:`.addMultipleEncoders`.
"""
def __init__(self, encoderDefinitions=None):
self.width = 0
self.encoders = []
self.description = []
self.name = ''
self._flattenedEncoderList = None
self._flattenedFieldTypeList = None
if encoderDefinitions is not None:
self.addMultipleEncoders(encoderDefinitions)
def setFieldStats(self, fieldName, fieldStatistics ):
for (name, encoder, offset) in self.encoders:
encoder.setFieldStats(name, fieldStatistics)
def addEncoder(self, name, encoder):
"""
Adds one encoder.
:param name: (string) name of encoder, should be unique
:param encoder: (:class:`.Encoder`) the encoder to add
"""
self.encoders.append((name, encoder, self.width))
for d in encoder.getDescription():
self.description.append((d[0], d[1] + self.width))
self.width += encoder.getWidth()
def encodeIntoArray(self, obj, output):
for name, encoder, offset in self.encoders:
encoder.encodeIntoArray(self._getInputValue(obj, name), output[offset:])
def getDescription(self):
return self.description
def getWidth(self):
"""Represents the sum of the widths of each fields encoding."""
return self.width
def setLearning(self,learningEnabled):
encoders = self.getEncoderList()
for encoder in encoders:
encoder.setLearning(learningEnabled)
return
def encodeField(self, fieldName, value):
for name, encoder, offset in self.encoders:
if name == fieldName:
return encoder.encode(value)
def encodeEachField(self, inputRecord):
encodings = []
for name, encoder, offset in self.encoders:
encodings.append(encoder.encode(getattr(inputRecord, name)))
return encodings
def addMultipleEncoders(self, fieldEncodings):
"""
:param fieldEncodings: dict of dicts, mapping field names to the field
params dict.
Each field params dict has the following keys:
1. ``fieldname``: data field name
2. ``type`` an encoder type
3. All other keys are encoder parameters
For example,
.. code-block:: python
fieldEncodings={
'dateTime': dict(fieldname='dateTime', type='DateEncoder',
timeOfDay=(5,5)),
'attendeeCount': dict(fieldname='attendeeCount', type='ScalarEncoder',
name='attendeeCount', minval=0, maxval=250,
clipInput=True, w=5, resolution=10),
'consumption': dict(fieldname='consumption',type='ScalarEncoder',
name='consumption', minval=0,maxval=110,
clipInput=True, w=5, resolution=5),
}
would yield a vector with a part encoded by the :class:`.DateEncoder`, and
to parts seperately taken care of by the :class:`.ScalarEncoder` with the
specified parameters. The three seperate encodings are then merged together
to the final vector, in such a way that they are always at the same location
within the vector.
"""
# Sort the encoders so that they end up in a controlled order
encoderList = sorted(fieldEncodings.items())
for key, fieldParams in encoderList:
if ':' not in key and fieldParams is not None:
fieldParams = fieldParams.copy()
fieldName = fieldParams.pop('fieldname')
encoderName = fieldParams.pop('type')
try:
self.addEncoder(fieldName, eval(encoderName)(**fieldParams))
except TypeError, e:
print ("#### Error in constructing %s encoder. Possibly missing "
"some required constructor parameters. Parameters "
"that were provided are: %s" % (encoderName, fieldParams))
raise
@classmethod
def getSchema(cls):
return MultiEncoderProto
@classmethod
def read(cls, proto):
encoder = object.__new__(cls)
encoder._flattenedEncoderList = None
encoder._flattenedFieldTypeList = None
encoder.encoders = [None] * len(proto.encoders)
encoder.width = 0
for index, encoderProto in enumerate(proto.encoders):
# Identify which attr is set in union
encoderType = encoderProto.which()
encoderDetails = getattr(encoderProto, encoderType)
encoder.encoders[index] = (
encoderProto.name,
# Call class.read() where class is determined by _ATTR_CLASS_MAP
_ATTR_CLASS_MAP.get(encoderType).read(encoderDetails),
encoderProto.offset
)
encoder.width += encoder.encoders[index][1].getWidth()
# Derive description from encoder list
encoder.description = [(enc[1].name, int(enc[2]))
for enc in encoder.encoders]
encoder.name = proto.name
return encoder
def write(self, proto):
proto.init("encoders", len(self.encoders))
for index, (name, encoder, offset) in enumerate(self.encoders):
encoderProto = proto.encoders[index]
encoderType = _CLASS_ATTR_MAP.get(encoder.__class__)
encoderProto.init(encoderType)
encoderDetails = getattr(encoderProto, encoderType)
encoder.write(encoderDetails)
encoderProto.name = name
encoderProto.offset = offset
proto.name = self.name
| 8,089 | Python | .py | 186 | 36.026882 | 80 | 0.673755 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,994 | scalar.py | numenta_nupic-legacy/src/nupic/encoders/scalar.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import math
import numbers
import numpy
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.data.field_meta import FieldMetaType
from nupic.bindings.math import SM32, GetNTAReal
from nupic.encoders.base import Encoder, EncoderResult
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.scalar_capnp import ScalarEncoderProto
DEFAULT_RADIUS = 0
DEFAULT_RESOLUTION = 0
class ScalarEncoder(Encoder):
"""
A scalar encoder encodes a numeric (floating point) value into an array
of bits. The output is 0's except for a contiguous block of 1's. The
location of this contiguous block varies continuously with the input value.
The encoding is linear. If you want a nonlinear encoding, just transform
the scalar (e.g. by applying a logarithm function) before encoding.
It is not recommended to bin the data as a pre-processing step, e.g.
"1" = $0 - $.20, "2" = $.21-$0.80, "3" = $.81-$1.20, etc. as this
removes a lot of information and prevents nearby values from overlapping
in the output. Instead, use a continuous transformation that scales
the data (a piecewise transformation is fine).
.. warning:: There are three mutually exclusive parameters that determine the
overall size of of the output. Exactly one of n, radius, resolution must be
set. "0" is a special value that means "not set".
:param w: The number of bits that are set to encode a single value - the
"width" of the output signal restriction: w must be odd to avoid
centering problems.
:param minval: The minimum value of the input signal.
:param maxval: The upper bound of the input signal. (input is strictly less if
``periodic == True``)
:param periodic: If true, then the input value "wraps around" such that
``minval`` = ``maxval``. For a periodic value, the input must be
strictly less than ``maxval``, otherwise ``maxval`` is a true
upper bound.
:param n: The number of bits in the output. Must be greater than or equal to
``w``
:param radius: Two inputs separated by more than the radius have
non-overlapping representations. Two inputs separated by less
than the radius will in general overlap in at least some of
their bits. You can think of this as the radius of the input.
:param resolution: Two inputs separated by greater than, or equal to the
resolution are guaranteed to have different
representations.
:param name: an optional string which will become part of the description
:param clipInput: if true, non-periodic inputs smaller than minval or greater
than maxval will be clipped to minval/maxval
:param forced: if true, skip some safety checks (for compatibility reasons),
default false
.. note:: ``radius`` and ``resolution`` are specified with respect to the
input, not output. ``w`` is specified with respect to the output.
**Example: day of week**
.. code-block:: text
w = 3
Minval = 1 (Monday)
Maxval = 8 (Monday)
periodic = true
n = 14
[equivalently: radius = 1.5 or resolution = 0.5]
The following values would encode midnight -- the start of the day
.. code-block:: text
monday (1) -> 11000000000001
tuesday(2) -> 01110000000000
wednesday(3) -> 00011100000000
...
sunday (7) -> 10000000000011
Since the resolution is 12 hours, we can also encode noon, as
.. code-block:: text
monday noon -> 11100000000000
monday midnt-> 01110000000000
tuesday noon -> 00111000000000
etc.
**`n` vs `resolution`**
It may not be natural to specify "n", especially with non-periodic
data. For example, consider encoding an input with a range of 1-10
(inclusive) using an output width of 5. If you specify resolution =
1, this means that inputs of 1 and 2 have different outputs, though
they overlap, but 1 and 1.5 might not have different outputs.
This leads to a 14-bit representation like this:
.. code-block:: text
1 -> 11111000000000 (14 bits total)
2 -> 01111100000000
...
10-> 00000000011111
[resolution = 1; n=14; radius = 5]
You could specify resolution = 0.5, which gives
.. code-block:: text
1 -> 11111000... (22 bits total)
1.5 -> 011111.....
2.0 -> 0011111....
[resolution = 0.5; n=22; radius=2.5]
You could specify radius = 1, which gives
.. code-block:: text
1 -> 111110000000.... (50 bits total)
2 -> 000001111100....
3 -> 000000000011111...
...
10 -> .....000011111
[radius = 1; resolution = 0.2; n=50]
An N/M encoding can also be used to encode a binary value,
where we want more than one bit to represent each state.
For example, we could have: w = 5, minval = 0, maxval = 1,
radius = 1 (which is equivalent to n=10)
.. code-block:: text
0 -> 1111100000
1 -> 0000011111
**Implementation details**
.. code-block:: text
range = maxval - minval
h = (w-1)/2 (half-width)
resolution = radius / w
n = w * range/radius (periodic)
n = w * range/radius + 2 * h (non-periodic)
"""
def __init__(self,
w,
minval,
maxval,
periodic=False,
n=0,
radius=DEFAULT_RADIUS,
resolution=DEFAULT_RESOLUTION,
name=None,
verbosity=0,
clipInput=False,
forced=False):
assert isinstance(w, numbers.Integral)
self.encoders = None
self.verbosity = verbosity
self.w = w
if (w % 2 == 0):
raise Exception("Width must be an odd number (%f)" % w)
self.minval = minval
self.maxval = maxval
self.periodic = periodic
self.clipInput = clipInput
self.halfwidth = (w - 1) / 2
# For non-periodic inputs, padding is the number of bits "outside" the range,
# on each side. I.e. the representation of minval is centered on some bit, and
# there are "padding" bits to the left of that centered bit; similarly with
# bits to the right of the center bit of maxval
if self.periodic:
self.padding = 0
else:
self.padding = self.halfwidth
if (minval is not None and maxval is not None):
if (minval >= maxval):
raise Exception("The encoder for %s is invalid. minval %s is greater than "
"or equal to maxval %s. minval must be strictly less "
"than maxval." % (name, minval, maxval))
self.rangeInternal = float(self.maxval - self.minval)
# There are three different ways of thinking about the representation. Handle
# each case here.
self._initEncoder(w, minval, maxval, n, radius, resolution)
# nInternal represents the output area excluding the possible padding on each
# side
if (minval is not None and maxval is not None):
self.nInternal = self.n - 2 * self.padding
# Our name
if name is not None:
self.name = name
else:
self.name = "[%s:%s]" % (self.minval, self.maxval)
# This matrix is used for the topDownCompute. We build it the first time
# topDownCompute is called
self._topDownMappingM = None
self._topDownValues = None
# This list is created by getBucketValues() the first time it is called,
# and re-created whenever our buckets would be re-arranged.
self._bucketValues = None
# checks for likely mistakes in encoder settings
if not forced:
self._checkReasonableSettings()
def _initEncoder(self, w, minval, maxval, n, radius, resolution):
""" (helper function) There are three different ways of thinking about the representation.
Handle each case here."""
if n != 0:
if (radius !=0 or resolution != 0):
raise ValueError("Only one of n/radius/resolution can be specified for a ScalarEncoder")
assert n > w
self.n = n
if (minval is not None and maxval is not None):
if not self.periodic:
self.resolution = float(self.rangeInternal) / (self.n - self.w)
else:
self.resolution = float(self.rangeInternal) / (self.n)
self.radius = self.w * self.resolution
if self.periodic:
self.range = self.rangeInternal
else:
self.range = self.rangeInternal + self.resolution
else:
if radius != 0:
if (resolution != 0):
raise ValueError("Only one of radius/resolution can be specified for a ScalarEncoder")
self.radius = radius
self.resolution = float(self.radius) / w
elif resolution != 0:
self.resolution = float(resolution)
self.radius = self.resolution * self.w
else:
raise Exception("One of n, radius, resolution must be specified for a ScalarEncoder")
if (minval is not None and maxval is not None):
if self.periodic:
self.range = self.rangeInternal
else:
self.range = self.rangeInternal + self.resolution
nfloat = self.w * (self.range / self.radius) + 2 * self.padding
self.n = int(math.ceil(nfloat))
def _checkReasonableSettings(self):
"""(helper function) check if the settings are reasonable for SP to work"""
if self.w < 21:
raise ValueError("Number of bits in the SDR (%d) must be >= 21 (use "
"forced=True to override)." % self.w)
def getDecoderOutputFieldTypes(self):
""" [Encoder class virtual method override]
"""
return (FieldMetaType.float, )
def getWidth(self):
return self.n
def _recalcParams(self):
self.rangeInternal = float(self.maxval - self.minval)
if not self.periodic:
self.resolution = float(self.rangeInternal) / (self.n - self.w)
else:
self.resolution = float(self.rangeInternal) / (self.n)
self.radius = self.w * self.resolution
if self.periodic:
self.range = self.rangeInternal
else:
self.range = self.rangeInternal + self.resolution
name = "[%s:%s]" % (self.minval, self.maxval)
def getDescription(self):
return [(self.name, 0)]
def _getFirstOnBit(self, input):
""" Return the bit offset of the first bit to be set in the encoder output.
For periodic encoders, this can be a negative number when the encoded output
wraps around. """
if input == SENTINEL_VALUE_FOR_MISSING_DATA:
return [None]
else:
if input < self.minval:
# Don't clip periodic inputs. Out-of-range input is always an error
if self.clipInput and not self.periodic:
if self.verbosity > 0:
print "Clipped input %s=%.2f to minval %.2f" % (self.name, input,
self.minval)
input = self.minval
else:
raise Exception('input (%s) less than range (%s - %s)' %
(str(input), str(self.minval), str(self.maxval)))
if self.periodic:
# Don't clip periodic inputs. Out-of-range input is always an error
if input >= self.maxval:
raise Exception('input (%s) greater than periodic range (%s - %s)' %
(str(input), str(self.minval), str(self.maxval)))
else:
if input > self.maxval:
if self.clipInput:
if self.verbosity > 0:
print "Clipped input %s=%.2f to maxval %.2f" % (self.name, input,
self.maxval)
input = self.maxval
else:
raise Exception('input (%s) greater than range (%s - %s)' %
(str(input), str(self.minval), str(self.maxval)))
if self.periodic:
centerbin = int((input - self.minval) * self.nInternal / self.range) \
+ self.padding
else:
centerbin = int(((input - self.minval) + self.resolution/2) \
/ self.resolution ) + self.padding
# We use the first bit to be set in the encoded output as the bucket index
minbin = centerbin - self.halfwidth
return [minbin]
def getBucketIndices(self, input):
""" See method description in base.py """
if type(input) is float and math.isnan(input):
input = SENTINEL_VALUE_FOR_MISSING_DATA
if input == SENTINEL_VALUE_FOR_MISSING_DATA:
return [None]
minbin = self._getFirstOnBit(input)[0]
# For periodic encoders, the bucket index is the index of the center bit
if self.periodic:
bucketIdx = minbin + self.halfwidth
if bucketIdx < 0:
bucketIdx += self.n
# for non-periodic encoders, the bucket index is the index of the left bit
else:
bucketIdx = minbin
return [bucketIdx]
def encodeIntoArray(self, input, output, learn=True):
""" See method description in base.py """
if input is not None and not isinstance(input, numbers.Number):
raise TypeError(
"Expected a scalar input but got input of type %s" % type(input))
if type(input) is float and math.isnan(input):
input = SENTINEL_VALUE_FOR_MISSING_DATA
# Get the bucket index to use
bucketIdx = self._getFirstOnBit(input)[0]
if bucketIdx is None:
# None is returned for missing value
output[0:self.n] = 0 #TODO: should all 1s, or random SDR be returned instead?
else:
# The bucket index is the index of the first bit to set in the output
output[:self.n] = 0
minbin = bucketIdx
maxbin = minbin + 2*self.halfwidth
if self.periodic:
# Handle the edges by computing wrap-around
if maxbin >= self.n:
bottombins = maxbin - self.n + 1
output[:bottombins] = 1
maxbin = self.n - 1
if minbin < 0:
topbins = -minbin
output[self.n - topbins:self.n] = 1
minbin = 0
assert minbin >= 0
assert maxbin < self.n
# set the output (except for periodic wraparound)
output[minbin:maxbin + 1] = 1
# Debug the decode() method
if self.verbosity >= 2:
print
print "input:", input
print "range:", self.minval, "-", self.maxval
print "n:", self.n, "w:", self.w, "resolution:", self.resolution, \
"radius", self.radius, "periodic:", self.periodic
print "output:",
self.pprint(output)
print "input desc:", self.decodedToStr(self.decode(output))
def decode(self, encoded, parentFieldName=''):
""" See the function description in base.py
"""
# For now, we simply assume any top-down output greater than 0
# is ON. Eventually, we will probably want to incorporate the strength
# of each top-down output.
tmpOutput = numpy.array(encoded[:self.n] > 0).astype(encoded.dtype)
if not tmpOutput.any():
return (dict(), [])
# ------------------------------------------------------------------------
# First, assume the input pool is not sampled 100%, and fill in the
# "holes" in the encoded representation (which are likely to be present
# if this is a coincidence that was learned by the SP).
# Search for portions of the output that have "holes"
maxZerosInARow = self.halfwidth
for i in xrange(maxZerosInARow):
searchStr = numpy.ones(i + 3, dtype=encoded.dtype)
searchStr[1:-1] = 0
subLen = len(searchStr)
# Does this search string appear in the output?
if self.periodic:
for j in xrange(self.n):
outputIndices = numpy.arange(j, j + subLen)
outputIndices %= self.n
if numpy.array_equal(searchStr, tmpOutput[outputIndices]):
tmpOutput[outputIndices] = 1
else:
for j in xrange(self.n - subLen + 1):
if numpy.array_equal(searchStr, tmpOutput[j:j + subLen]):
tmpOutput[j:j + subLen] = 1
if self.verbosity >= 2:
print "raw output:", encoded[:self.n]
print "filtered output:", tmpOutput
# ------------------------------------------------------------------------
# Find each run of 1's.
nz = tmpOutput.nonzero()[0]
runs = [] # will be tuples of (startIdx, runLength)
run = [nz[0], 1]
i = 1
while (i < len(nz)):
if nz[i] == run[0] + run[1]:
run[1] += 1
else:
runs.append(run)
run = [nz[i], 1]
i += 1
runs.append(run)
# If we have a periodic encoder, merge the first and last run if they
# both go all the way to the edges
if self.periodic and len(runs) > 1:
if runs[0][0] == 0 and runs[-1][0] + runs[-1][1] == self.n:
runs[-1][1] += runs[0][1]
runs = runs[1:]
# ------------------------------------------------------------------------
# Now, for each group of 1's, determine the "left" and "right" edges, where
# the "left" edge is inset by halfwidth and the "right" edge is inset by
# halfwidth.
# For a group of width w or less, the "left" and "right" edge are both at
# the center position of the group.
ranges = []
for run in runs:
(start, runLen) = run
if runLen <= self.w:
left = right = start + runLen / 2
else:
left = start + self.halfwidth
right = start + runLen - 1 - self.halfwidth
# Convert to input space.
if not self.periodic:
inMin = (left - self.padding) * self.resolution + self.minval
inMax = (right - self.padding) * self.resolution + self.minval
else:
inMin = (left - self.padding) * self.range / self.nInternal + self.minval
inMax = (right - self.padding) * self.range / self.nInternal + self.minval
# Handle wrap-around if periodic
if self.periodic:
if inMin >= self.maxval:
inMin -= self.range
inMax -= self.range
# Clip low end
if inMin < self.minval:
inMin = self.minval
if inMax < self.minval:
inMax = self.minval
# If we have a periodic encoder, and the max is past the edge, break into
# 2 separate ranges
if self.periodic and inMax >= self.maxval:
ranges.append([inMin, self.maxval])
ranges.append([self.minval, inMax - self.range])
else:
if inMax > self.maxval:
inMax = self.maxval
if inMin > self.maxval:
inMin = self.maxval
ranges.append([inMin, inMax])
desc = self._generateRangeDescription(ranges)
# Return result
if parentFieldName != '':
fieldName = "%s.%s" % (parentFieldName, self.name)
else:
fieldName = self.name
return ({fieldName: (ranges, desc)}, [fieldName])
def _generateRangeDescription(self, ranges):
"""generate description from a text description of the ranges"""
desc = ""
numRanges = len(ranges)
for i in xrange(numRanges):
if ranges[i][0] != ranges[i][1]:
desc += "%.2f-%.2f" % (ranges[i][0], ranges[i][1])
else:
desc += "%.2f" % (ranges[i][0])
if i < numRanges - 1:
desc += ", "
return desc
def _getTopDownMapping(self):
""" Return the interal _topDownMappingM matrix used for handling the
bucketInfo() and topDownCompute() methods. This is a matrix, one row per
category (bucket) where each row contains the encoded output for that
category.
"""
# Do we need to build up our reverse mapping table?
if self._topDownMappingM is None:
# The input scalar value corresponding to each possible output encoding
if self.periodic:
self._topDownValues = numpy.arange(self.minval + self.resolution / 2.0,
self.maxval,
self.resolution)
else:
#Number of values is (max-min)/resolutions
self._topDownValues = numpy.arange(self.minval,
self.maxval + self.resolution / 2.0,
self.resolution)
# Each row represents an encoded output pattern
numCategories = len(self._topDownValues)
self._topDownMappingM = SM32(numCategories, self.n)
outputSpace = numpy.zeros(self.n, dtype=GetNTAReal())
for i in xrange(numCategories):
value = self._topDownValues[i]
value = max(value, self.minval)
value = min(value, self.maxval)
self.encodeIntoArray(value, outputSpace, learn=False)
self._topDownMappingM.setRowFromDense(i, outputSpace)
return self._topDownMappingM
def getBucketValues(self):
""" See the function description in base.py """
# Need to re-create?
if self._bucketValues is None:
topDownMappingM = self._getTopDownMapping()
numBuckets = topDownMappingM.nRows()
self._bucketValues = []
for bucketIdx in range(numBuckets):
self._bucketValues.append(self.getBucketInfo([bucketIdx])[0].value)
return self._bucketValues
def getBucketInfo(self, buckets):
""" See the function description in base.py """
# Get/generate the topDown mapping table
#NOTE: although variable topDownMappingM is unused, some (bad-style) actions
#are executed during _getTopDownMapping() so this line must stay here
topDownMappingM = self._getTopDownMapping()
# The "category" is simply the bucket index
category = buckets[0]
encoding = self._topDownMappingM.getRow(category)
# Which input value does this correspond to?
if self.periodic:
inputVal = (self.minval + (self.resolution / 2.0) +
(category * self.resolution))
else:
inputVal = self.minval + (category * self.resolution)
return [EncoderResult(value=inputVal, scalar=inputVal, encoding=encoding)]
def topDownCompute(self, encoded):
""" See the function description in base.py
"""
# Get/generate the topDown mapping table
topDownMappingM = self._getTopDownMapping()
# See which "category" we match the closest.
category = topDownMappingM.rightVecProd(encoded).argmax()
# Return that bucket info
return self.getBucketInfo([category])
def closenessScores(self, expValues, actValues, fractional=True):
""" See the function description in base.py
"""
expValue = expValues[0]
actValue = actValues[0]
if self.periodic:
expValue = expValue % self.maxval
actValue = actValue % self.maxval
err = abs(expValue - actValue)
if self.periodic:
err = min(err, self.maxval - err)
if fractional:
pctErr = float(err) / (self.maxval - self.minval)
pctErr = min(1.0, pctErr)
closeness = 1.0 - pctErr
else:
closeness = err
return numpy.array([closeness])
def __str__(self):
string = "ScalarEncoder:"
string += " min: {minval}".format(minval = self.minval)
string += " max: {maxval}".format(maxval = self.maxval)
string += " w: {w}".format(w = self.w)
string += " n: {n}".format(n = self.n)
string += " resolution: {resolution}".format(resolution = self.resolution)
string += " radius: {radius}".format(radius = self.radius)
string += " periodic: {periodic}".format(periodic = self.periodic)
string += " nInternal: {nInternal}".format(nInternal = self.nInternal)
string += " rangeInternal: {rangeInternal}".format(rangeInternal = self.rangeInternal)
string += " padding: {padding}".format(padding = self.padding)
return string
@classmethod
def getSchema(cls):
return ScalarEncoderProto
@classmethod
def read(cls, proto):
if proto.n is not None:
radius = DEFAULT_RADIUS
resolution = DEFAULT_RESOLUTION
else:
radius = proto.radius
resolution = proto.resolution
return cls(w=proto.w,
minval=proto.minval,
maxval=proto.maxval,
periodic=proto.periodic,
n=proto.n,
name=proto.name,
verbosity=proto.verbosity,
clipInput=proto.clipInput,
forced=True)
def write(self, proto):
proto.w = self.w
proto.minval = self.minval
proto.maxval = self.maxval
proto.periodic = self.periodic
# Radius and resolution can be recalculated based on n
proto.n = self.n
proto.name = self.name
proto.verbosity = self.verbosity
proto.clipInput = self.clipInput
| 25,452 | Python | .py | 596 | 35.290268 | 96 | 0.632022 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,995 | delta.py | numenta_nupic-legacy/src/nupic/encoders/delta.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numbers
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.encoders.adaptive_scalar import AdaptiveScalarEncoder
from nupic.encoders.base import EncoderResult
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.delta_capnp import DeltaEncoderProto
class DeltaEncoder(AdaptiveScalarEncoder):
"""
This is an implementation of a delta encoder. The delta encoder encodes
differences between successive scalar values instead of encoding the actual
values. It returns an actual value when decoding and not a delta.
"""
def __init__(self, w, minval=None, maxval=None, periodic=False, n=0, radius=0,
resolution=0, name=None, verbosity=0, clipInput=True, forced=False):
"""[ScalarEncoder class method override]"""
self._learningEnabled = True
self._stateLock = False
self.width = 0
self.encoders = None
self.description = []
self.name = name
if periodic:
#Delta scalar encoders take non-periodic inputs only
raise Exception('Delta encoder does not encode periodic inputs')
assert n!=0 #An adaptive encoder can only be intialized using n
self._adaptiveScalarEnc = AdaptiveScalarEncoder(w=w, n=n, minval=minval,
maxval=maxval, clipInput=True, name=name, verbosity=verbosity, forced=forced)
self.width+=self._adaptiveScalarEnc.getWidth()
self.n = self._adaptiveScalarEnc.n
self._prevAbsolute = None #how many inputs have been sent to the encoder?
self._prevDelta = None
def encodeIntoArray(self, input, output, learn=None):
if not isinstance(input, numbers.Number):
raise TypeError(
"Expected a scalar input but got input of type %s" % type(input))
if learn is None:
learn = self._learningEnabled
if input == SENTINEL_VALUE_FOR_MISSING_DATA:
output[0:self.n] = 0
else:
#make the first delta zero so that the delta ranges are not messed up.
if self._prevAbsolute==None:
self._prevAbsolute= input
delta = input - self._prevAbsolute
self._adaptiveScalarEnc.encodeIntoArray(delta, output, learn)
if not self._stateLock:
self._prevAbsolute = input
self._prevDelta = delta
return output
def setStateLock(self, lock):
self._stateLock = lock
def setFieldStats(self, fieldName, fieldStatistics):
pass
def getBucketIndices(self, input, learn=None):
return self._adaptiveScalarEnc.getBucketIndices(input, learn)
def getBucketInfo(self, buckets):
return self._adaptiveScalarEnc.getBucketInfo(buckets)
def topDownCompute(self, encoded):
"""[ScalarEncoder class method override]"""
#Decode to delta scalar
if self._prevAbsolute==None or self._prevDelta==None:
return [EncoderResult(value=0, scalar=0,
encoding=numpy.zeros(self.n))]
ret = self._adaptiveScalarEnc.topDownCompute(encoded)
if self._prevAbsolute != None:
ret = [EncoderResult(value=ret[0].value+self._prevAbsolute,
scalar=ret[0].scalar+self._prevAbsolute,
encoding=ret[0].encoding)]
# ret[0].value+=self._prevAbsolute
# ret[0].scalar+=self._prevAbsolute
return ret
@classmethod
def getSchema(cls):
return DeltaEncoderProto
@classmethod
def read(cls, proto):
encoder = object.__new__(cls)
encoder.width = proto.width
encoder.name = proto.name or None
encoder.n = proto.n
encoder._adaptiveScalarEnc = (
AdaptiveScalarEncoder.read(proto.adaptiveScalarEnc)
)
encoder._prevAbsolute = None if proto.prevAbsolute == 0 else proto.prevAbsolute
encoder._prevDelta = None if proto.prevDelta == 0 else proto.prevDelta
encoder._stateLock = proto.stateLock
encoder._learningEnabled = proto.learningEnabled
encoder.description = []
encoder.encoders = None
return encoder
def write(self, proto):
proto.width = self.width
proto.name = self.name or ""
proto.n = self.n
self._adaptiveScalarEnc.write(proto.adaptiveScalarEnc)
if self._prevAbsolute:
proto.prevAbsolute = self._prevAbsolute
if self._prevDelta:
proto.prevDelta = self._prevDelta
proto.stateLock = self._stateLock
proto.learningEnabled = self._learningEnabled
| 5,303 | Python | .py | 125 | 37.32 | 96 | 0.702194 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,996 | makeDataset.py | numenta_nupic-legacy/src/nupic/datafiles/extra/regression/makeDataset.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Generate artificial datasets
"""
import numpy
from nupic.data.file import File
def scaleData(data, newScale=[0,100]):
minVals = data.min(axis=0)
maxVals = data.max(axis=0)
data = (data-minVals)*(newScale[1]-newScale[0])/(maxVals-minVals) + newScale[0]
return data
def generatePolyData(numDataPoints=100,
coefficients=[1, 0],
noiseLevel = 0.1,
dataScale = [0,100],):
xvals = numpy.random.random(numDataPoints)
yvals = numpy.polyval(coefficients, xvals) + \
noiseLevel * numpy.random.randn(numDataPoints)
data = numpy.vstack((yvals, xvals)).transpose()
scaledData = scaleData(data, newScale=dataScale)
return scaledData
def generateLinearData(numDataPoints=100,
coefficients=[1, 1],
noiseLevel = 0.1,
dataScale = [0,100],):
xvals = numpy.random.random((numDataPoints, len(coefficients)))
yvals = (xvals * coefficients).sum(axis=1) + \
noiseLevel * numpy.random.randn(numDataPoints)
data = numpy.hstack((yvals.reshape(-1,1), xvals))
scaledData = scaleData(data, newScale=dataScale)
return scaledData
def _generateLinearModel(numTrainingRecords, numTestingRecords,
coefficients=[1], noiseLevel=0.1, dataScale=[0,100]):
"""
"""
data = generateLinearData(numDataPoints=numTrainingRecords+numTestingRecords,
coefficients=coefficients,
noiseLevel=noiseLevel,
dataScale=dataScale,)
trainData = data[:numTrainingRecords]
testData = data[numTrainingRecords:]
return trainData, testData
def _generateFile(filename, data):
"""
Parameters:
----------------------------------------------------------------
filename: name of .csv file to generate
"""
# Create the file
print "Creating %s..." % (filename)
numRecords, numFields = data.shape
fields = [('field%d'%(i+1), 'float', '') for i in range(numFields)]
outFile = File(filename, fields)
for i in xrange(numRecords):
outFile.write(data[i].tolist())
outFile.close()
def generate(model, filenameTrain, filenameTest,
numTrainingRecords=10000, numTestingRecords=1000,):
"""
"""
numpy.random.seed(41)
# ====================================================================
# Generate the model
if model == 'linear0':
trainData, testData = _generateLinearModel(numTrainingRecords,
numTestingRecords,
coefficients=[1],
noiseLevel=0.1)
#import pylab
#pylab.figure()
#pylab.plot(trainData[:,1], trainData[:,0], 'b.')
##pylab.figure()
#pylab.plot(testData[:,1], testData[:,0],'g.')
#pylab.show()
elif model == 'linear1':
trainData, testData = _generateLinearModel(numTrainingRecords,
numTestingRecords,
coefficients=[1,1],
noiseLevel=0.1)
elif model == 'linear2':
trainData, testData = _generateLinearModel(numTrainingRecords,
numTestingRecords,
coefficients=[1,-3])
else:
raise RuntimeError("Unsupported model")
# ====================================================================
# Generate the training and testing files
_generateFile(filename=filenameTrain, data=trainData,)
_generateFile(filename=filenameTest, data=testData,)
| 4,850 | Python | .py | 108 | 34.638889 | 81 | 0.578431 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,997 | GenerateSampleData.py | numenta_nupic-legacy/src/nupic/datafiles/extra/generated/GenerateSampleData.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import sys
import csv
import numpy as np
def writeSimpleTest1(filePath, numRecords, testNumber):
""" Generates requested number of records and saves in a csv file
"""
with open(filePath+'.csv', 'wb') as f:
writer = csv.writer(f)
if testNumber == 1:
writer.writerow(['field1', 'field2'])
writer.writerow(['int', 'int'])
writer.writerow(['', ''])
for i in ranger(0, numRecords):
field1 = int(np.random.random_integers(0, 100, 1))
field2 = field1 + int(0.025*np.random.normal(0, 100, 1))
writer.writerow([field1, field2])
elif testNumber == 2:
writer.writerow(['field1', 'field2', 'field3'])
writer.writerow(['int', 'int', 'int'])
writer.writerow(['', '', ''])
for i in range(0, numRecords):
field1 = int(np.random.random_integers(0, 100, 1))
field2 = field1 + int(0.025*np.random.normal(0, 100, 1))
field3 = int(np.random.random_integers(0, 100, 1))
writer.writerow([field1, field2, field3])
pass
elif testNumber == 3:
writer.writerow(['field1', 'field2', 'field3', 'field4'])
writer.writerow(['int', 'int', 'int', 'int'])
writer.writerow(['', '', '', ''])
for i in range(0, numRecords):
field2 = int(np.random.random_integers(0, 100, 1))
field3 = int(np.random.random_integers(0, 100, 1))
field1 = field2 + field3
field4 = int(np.random.random_integers(0, 100, 1))
writer.writerow([field1, field2, field3, field4])
elif testNumber == 4 or testNumber == 5:
writer.writerow(['field1', 'field2'])
writer.writerow(['string', 'string'])
writer.writerow(['', ''])
if testNumber == 5:
categories = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j',
'k', 'l', 'm', 'n', 'o', 'p']
else:
categories = ['a', 'b', 'c', 'd']
numRecsSaved = 0
firstFieldInd = 0
done = False
while not done:
while not done:
field1 = categories[firstFieldInd]
for category in categories:
field2 = category
writer.writerow([field1, field2])
numRecsSaved += 1
if numRecsSaved == numRecords:
done = True
break
firstFieldInd += 1
if firstFieldInd == len(categories):
firstFieldInd = 0
elif testNumber == 6:
writer.writerow(['field1', 'field2'])
writer.writerow(['string', 'string'])
writer.writerow(['', ''])
choises = [
['a', [0.9, 0.05, 0.05]],
['b', [0.05, 0.9, 0.05]],
['c', [0.05, 0.05, 0.9]]
]
cat2 = ['d', 'e', 'f']
for i in range(0, numRecords):
ind1 = int(np.random.random_integers(0, 2, 1))
field1 = choises[ind1][0]
ind2 = np.searchsorted(np.cumsum(choises[ind1][1]), np.random.random())
field2 = cat2[ind2]
writer.writerow([field1, field2])
pass
elif testNumber == 7:
writer.writerow(['field1', 'field2', 'field3'])
writer.writerow(['string', 'string', 'string'])
writer.writerow(['', '', ''])
choises = [
['a', [0.9, 0.05, 0.05]],
['b', [0.05, 0.9, 0.05]],
['c', [0.05, 0.05, 0.9]]
]
cat2 = ['d', 'e', 'f']
cat3 = ['g', 'h', 'i']
for i in range(0, numRecords):
ind1 = int(np.random.random_integers(0, 2, 1))
field1 = choises[ind1][0]
ind2 = np.searchsorted(np.cumsum(choises[ind1][1]), np.random.random())
field2 = cat2[ind2]
ind3 = int(np.random.random_integers(0, 2, 1))
field3 = cat3[ind3]
writer.writerow([field1, field2, field3])
pass
elif testNumber == 8:
writer.writerow(['field1', 'field2', 'field3'])
writer.writerow(['string', 'string', 'string'])
writer.writerow(['', '', ''])
choises = [
['a', 'd', [0.9, 0.05, 0.05]],
['a', 'e', [0.05, 0.9, 0.05]],
['a', 'f', [0.05, 0.05, 0.9]],
['b', 'd', [0.9, 0.05, 0.05]],
['b', 'e', [0.05, 0.9, 0.05]],
['b', 'f', [0.05, 0.05, 0.9]],
['c', 'd', [0.9, 0.05, 0.05]],
['c', 'e', [0.05, 0.9, 0.05]],
['c', 'f', [0.05, 0.05, 0.9]]
]
cat3 = ['g', 'h', 'i']
for i in range(0, numRecords):
ind1 = int(np.random.random_integers(0, 8, 1))
field1 = choises[ind1][0]
field2 = choises[ind1][1]
ind2 = np.searchsorted(np.cumsum(choises[ind1][2]), np.random.random())
field3 = cat3[ind2]
writer.writerow([field1, field2, field3])
pass
return
if __name__ == '__main__':
np.random.seed(83)
# Test 1
# 2 fields. field2 = field1 + noise (5%). Values are 0-100 (plus noise)
# Test 2
# 3 fields, field 1 and 2 are the same as in #1, but 3rd field is random.
# Values are 0-100.
# Test 3
# 4 fields, field1 = field2 + field3 (no noise), field4 is random.
# Values are 0-100.
# Test 4
# 2 fields, categories. Each category can have 4 values (a, b, c, d).
# Data in the following structure
# (a,a)->(a,b)->(a, c)->(a,d)->(b,a)->(b,b) and so on
# Test 5
# 2 fields, categories. The data is the same as in #4,
# but each category can have 16 values (a,b, ...p)
# Test 6
# 2 fields, categories. First field is one of (a, b, c).
# Second field is (a->d, b->e, c->f) with probabilities (0.9 and 0.05, 0.05)
# Test 7
# 3 fields. 2 fields are the same as in #6, 3rd field is random (g, h, i)
# Test 8
# 3 fields. 1st field is (a, b, c), 2nd is (d, e, f). 3rd field is
# (a,d -> g), (a, e -> h), (a, f -> i) and so on, with probabilities
# (0.9, 0.05, 0.05)
print 'Generating %s with %s records, test #%s' % \
(sys.argv[1], sys.argv[2], sys.argv[3])
writeSimpleTest1(sys.argv[1], int(sys.argv[2]), int(sys.argv[3]))
| 7,081 | Python | .py | 172 | 33.226744 | 79 | 0.54243 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,998 | makeDataset.py | numenta_nupic-legacy/src/nupic/datafiles/extra/gym/raw/makeDataset.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2010-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unify the various Gym CSV files to a single coherent CSV file
The Gym dataset has two file types:
1. Hourly attendance data per gym
2. KW consumption in 15 minutes intervals
The createDataset() function merges the two file types and creates
a single CSV file with hourly data. Each record contains the following fields:
Gym name, Date, Hour, # Atendees, KW consumption
"""
import os
import sys
import fileinput
import glob
import operator
import datetime
from nupic.data.file import File
months = 'Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec'.split()
class Record(object):
def __init__(self):
self.club = ''
self.date = None
self.time = 0
self.KW = 0
self.attendeeCount = 0
self.consumption = 0
class Club(object):
def __init__(self, name):
self.name = name
self.records = {}
def processAttendance(self, f):
# Skip first two
line = f.next()
assert line == ',,,,,,,,,,,,,,,,,,,\n'
line = f.next()
assert line == 'Date Of Swipe, < 6 am,6-7 am,7-8 am,8-9 am,9-10 am,10-11 am,11-12 am,12-1 pm,1-2 pm,2-3 pm,3-4 pm,4-5 pm,5-6 pm,6-7 pm,7-8 pm,8-9 pm,9-10 pm,> 10 pm,Totals\n'
for i, line in enumerate(f):
# Check weather we're done with this club
if line == ',,,,,,,,,,,,,,,,,,,\n':
# skip next two lines
line = f.next()
assert line.startswith('Club Totals:')
line = f.next()
assert line == ',,,,,,,,,,,,,,,,,,,\n'
return
else:
self.addRecord(line)
def addRecord(self, line):
fields = line.split(',')
assert len(fields) == 20
date = fields[0].split('-')
# Convert day to 'dd'
dd = int(date[0])
mm = months.index(date[1]) + 1
assert mm in (9, 10)
# Convert year from 'yy' to 'yyyy'
yyyy = 2000 + int(date[2])
date = (yyyy, mm, dd)
# Add 0 for hours without attendants (<12AM-4AM and 11PM)
attendance = [0] * 5 + fields[1:19] + [0]
assert len(attendance) == 24
# Create a record for each hour of the day.
for i, a in enumerate(attendance):
r = Record()
r.club = self.name
r.timestamp = datetime.datetime(yyyy, mm, dd, i)
#r.time = i
r.attendeeCount = a
self.records[(date, i)] = r
def updateRecord(self, date, t, consumption):
# Get rid of time and AM/PM if needed
date = date.split()[0]
# Convert to (yyyy, mmm, dd)
date = date.split('/')
# Convert day to 'dd'
dd = int(date[0])
# Convert month index to month name
mm = int(date[1])
yyyy = int(date[2])
# Locate record
key = ((yyyy, mm, dd), t)
if not key in self.records:
print self.name, 'is missing attendance data for', key
else:
r = self.records[key]
r.consumption = consumption
def processClubAttendance(f, clubs):
"""Process the attendance data of one club
If the club already exists in the list update its data.
If the club is new create a new Club object and add it to the dict
The next step is to iterate over all the lines and add a record for each line.
When reaching an empty line it means there are no more records for this club.
Along the way some redundant lines are skipped. When the file ends the f.next()
call raises a StopIteration exception and that's the sign to return False,
which indicates to the caller that there are no more clubs to process.
"""
try:
# Skip as many empty lines as necessary (file format inconsistent)
line = f.next()
while line == ',,,,,,,,,,,,,,,,,,,\n':
line = f.next()
# The first non-empty line should have the name as the first field
name = line.split(',')[0]
# Create a new club object if needed
if name not in clubs:
clubs[name] = Club(name)
# Get the named club
c = clubs[name]
c.processAttendance(f)
return True
except StopIteration:
return False
def processClubConsumption(f, clubs):
"""Process the consumption a club
- Skip the header line
- Iterate over lines
- Read 4 records at a time
- Parse each line: club, date, time, consumption
- Get club object from dictionary if needed
- Aggregate consumption
- Call club.processConsumption() with data
"""
try:
# Skip header line
line = f.next()
assert line.endswith('" ","SITE_LOCATION_NAME","TIMESTAMP","TOTAL_KWH"\n')
valid_times = range(24)
t = 0 # used to track time
club = None
clubName = None
lastDate = None
while True:
assert t in valid_times
consumption = 0
for x in range(4):
# Read the line and get rid of the newline character
line = f.next()[:-1]
fields = line.split(',')
assert len(fields) == 4
for i, field in enumerate(fields):
# Strip the redundant double quotes
assert field[0] == '"' and field[-1] == '"'
fields[i] = field[1:-1]
# Ignoring field 0, which is just a running count
# Get the club name
name = fields[1]
# Hack to fix inconsistent club names like: "Melbourne CBD - Melbourne Central" vs. "Melbourne Central"
partialNames = ('Melbourne Central', 'North Sydney', 'Park St', 'Pitt St')
for pn in partialNames:
if pn in name:
name = pn
# Locate the club if needed (maybe )
if name != clubName:
clubName = name
club = clubs[name]
# Split the date (time is counted using the t variable)
tokens = fields[2].split()
# Verify that t == 0 and consumption == 0 when there is no time in the file
if len(tokens) == 1:
assert consumption == 0 and t == 0
# The first (and sometimes only) token is the date
date = tokens[0]
# Aggregate the consumption
consumption += float(fields[3])
# Update the Club object after aggregating the consumption of 4 lines
club.updateRecord(date, t, consumption)
# Increment time
t += 1
t %= 24
except StopIteration:
return
def processAttendanceFiles():
files = glob.glob('Attendance*.csv')
f = fileinput.input(files=files)
# Process the input files and create a dictionary of Club objects
clubs = {}
while processClubAttendance(f, clubs):
pass
return clubs
def processConsumptionFiles(clubs):
"""
"""
files = glob.glob('all_group*detail.csv')
f = fileinput.input(files=files)
# Process the input files and create a dictionary of Club objects
while processClubConsumption(f, clubs):
pass
return clubs
def makeDataset():
"""
"""
clubs = processAttendanceFiles()
clubs = processConsumptionFiles(clubs)
fields = [('gym', 'string', 'S'),
('timestamp', 'datetime', 'T'),
('attendeeCount', 'int', ''),
('consumption', 'float', ''),
]
with File('gym.csv', fields) as f:
## write header
#f.write('Gym Name,Date,Time,Attendee Count,Consumption (KWH)\n')
for c in clubs.values():
for k, r in sorted(c.records.iteritems(), key=operator.itemgetter(0)):
#dd = r.date[2]
#mm = r.date[1]
#yyyy = r.date[0]
#line = ','.join(str(x) for x in
# (c.name, '%d-%s-%d' % (dd, mmm, yyyy), r.time, r.attendeeCount, r.consumption))
#f.write(line + '\n')
f.write([r.club, r.timestamp, r.attendeeCount, r.consumption])
if __name__=='__main__':
makeDataset()
print 'Done.'
| 8,637 | Python | .py | 231 | 31.194805 | 178 | 0.631095 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
25,999 | makeDataset.py | numenta_nupic-legacy/src/nupic/datafiles/extra/firstOrder/raw/makeDataset.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Generate artificial datasets
"""
import numpy
from nupic.data.file import File
def createFirstOrderModel(numCategories=5, alpha=0.5):
categoryList = ['cat%02d' % i for i in range(numCategories)]
initProbability = numpy.ones(numCategories)/numCategories
transitionTable = numpy.random.dirichlet(alpha=[alpha]*numCategories,
size=numCategories)
return categoryList, initProbability, transitionTable
def generateFirstOrderData(model, numIterations=10000, seqLength=5,
resets=True, suffix='train'):
print "Creating %d iteration file with seqLength %d" % (numIterations, seqLength)
print "Filename",
categoryList, initProbability, transitionTable = model
initProbability = initProbability.cumsum()
transitionTable = transitionTable.cumsum(axis=1)
outputFile = 'fo_%d_%d_%s.csv' % (numIterations, seqLength, suffix)
print "Filename", outputFile
fields = [('reset', 'int', 'R'), ('name', 'string', '')]
o = File(outputFile, fields)
seqIdx = 0
rand = numpy.random.rand()
catIdx = numpy.searchsorted(initProbability, rand)
for i in xrange(numIterations):
rand = numpy.random.rand()
if seqIdx == 0 and resets:
catIdx = numpy.searchsorted(initProbability, rand)
reset = 1
else:
catIdx = numpy.searchsorted(transitionTable[catIdx], rand)
reset = 0
o.write([reset,categoryList[catIdx]])
seqIdx = (seqIdx+1)%seqLength
o.close()
if __name__=='__main__':
numpy.random.seed(1956)
model = createFirstOrderModel()
categoryList = model[0]
categoryFile = open("categories.txt", 'w')
for category in categoryList:
categoryFile.write(category+'\n')
categoryFile.close()
#import pylab
#pylab.imshow(model[2], interpolation='nearest')
#pylab.show()
for resets in [True, False]:
for seqLength in [2, 10]:
for numIterations in [1000, 10000, 100000]:
generateFirstOrderData(model,
numIterations=numIterations,
seqLength=seqLength,
resets=resets,
suffix='train_%s' % ('resets' if resets else 'noresets',))
generateFirstOrderData(model, numIterations=10000, seqLength=seqLength,
resets=resets,
suffix='test_%s' % ('resets' if resets else 'noresets',))
| 3,462 | Python | .py | 78 | 37.512821 | 89 | 0.656919 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |