id int64 0 458k | file_name stringlengths 4 119 | file_path stringlengths 14 227 | content stringlengths 24 9.96M | size int64 24 9.96M | language stringclasses 1 value | extension stringclasses 14 values | total_lines int64 1 219k | avg_line_length float64 2.52 4.63M | max_line_length int64 5 9.91M | alphanum_fraction float64 0 1 | repo_name stringlengths 7 101 | repo_stars int64 100 139k | repo_forks int64 0 26.4k | repo_open_issues int64 0 2.27k | repo_license stringclasses 12 values | repo_extraction_date stringclasses 433 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
26,100 | backtracking_tm_cpp2_test.py | numenta_nupic-legacy/tests/unit/nupic/algorithms/backtracking_tm_cpp2_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Tests for the C++ implementation of the temporal memory."""
import cPickle as pickle
import numpy
import unittest2 as unittest
from nupic.bindings.math import Random
from nupic.algorithms import fdrutilities as fdrutils
from nupic.algorithms.backtracking_tm import BacktrackingTM
from nupic.algorithms.backtracking_tm_cpp import BacktrackingTMCPP
VERBOSITY = 0 # how chatty the unit tests should be
INFERENCE_VERBOSITY = 0 # Chattiness during inference test
SEED = 12
_RGEN = Random(SEED)
def checkCell0(tm):
"""Check that cell 0 has no incoming segments"""
for c in range(tm.numberOfCols):
assert tm.getNumSegmentsInCell(c, 0) == 0
def setVerbosity(verbosity, tm, tmPy):
"""Set verbosity levels of the TM's"""
tm.cells4.setVerbosity(verbosity)
tm.verbosity = verbosity
tmPy.verbosity = verbosity
class BacktrackingTMCPP2Test(unittest.TestCase):
def basicTest(self):
"""Basic test (creation, pickling, basic run of learning and inference)"""
# Create TM object
tm = BacktrackingTMCPP(numberOfCols=10, cellsPerColumn=3,
initialPerm=.2, connectedPerm= 0.8,
minThreshold=2, newSynapseCount=5,
permanenceInc=.1, permanenceDec= .05,
permanenceMax=1, globalDecay=.05,
activationThreshold=4, doPooling=False,
segUpdateValidDuration=5, seed=SEED,
verbosity=VERBOSITY)
tm.retrieveLearningStates = True
# Save and reload
tm.makeCells4Ephemeral = False
pickle.dump(tm, open("test_tm_cpp.pkl", "wb"))
tm2 = pickle.load(open("test_tm_cpp.pkl"))
self.assertTrue(fdrutils.tmDiff2(tm, tm2, VERBOSITY, checkStates=False))
# Learn
for i in xrange(5):
x = numpy.zeros(tm.numberOfCols, dtype='uint32')
_RGEN.initializeUInt32Array(x, 2)
tm.learn(x)
# Save and reload after learning
tm.reset()
tm.makeCells4Ephemeral = False
pickle.dump(tm, open("test_tm_cpp.pkl", "wb"))
tm2 = pickle.load(open("test_tm_cpp.pkl"))
self.assertTrue(fdrutils.tmDiff2(tm, tm2, VERBOSITY))
## Infer
patterns = numpy.zeros((4, tm.numberOfCols), dtype='uint32')
for i in xrange(4):
_RGEN.initializeUInt32Array(patterns[i], 2)
for i in xrange(10):
x = numpy.zeros(tm.numberOfCols, dtype='uint32')
_RGEN.initializeUInt32Array(x, 2)
tm.infer(x)
if i > 0:
tm._checkPrediction(patterns)
def basicTest2(self, tm, numPatterns=100, numRepetitions=3, activity=15,
testTrimming=False, testRebuild=False):
"""Basic test (basic run of learning and inference)"""
# Create PY TM object that mirrors the one sent in.
tmPy = BacktrackingTM(numberOfCols=tm.numberOfCols,
cellsPerColumn=tm.cellsPerColumn,
initialPerm=tm.initialPerm,
connectedPerm=tm.connectedPerm,
minThreshold=tm.minThreshold,
newSynapseCount=tm.newSynapseCount,
permanenceInc=tm.permanenceInc,
permanenceDec=tm.permanenceDec,
permanenceMax=tm.permanenceMax,
globalDecay=tm.globalDecay,
activationThreshold=tm.activationThreshold,
doPooling=tm.doPooling,
segUpdateValidDuration=tm.segUpdateValidDuration,
pamLength=tm.pamLength, maxAge=tm.maxAge,
maxSeqLength=tm.maxSeqLength,
maxSegmentsPerCell=tm.maxSegmentsPerCell,
maxSynapsesPerSegment=tm.maxSynapsesPerSegment,
seed=tm.seed, verbosity=tm.verbosity)
# Ensure we are copying over learning states for TMDiff
tm.retrieveLearningStates = True
verbosity = VERBOSITY
# Learn
# Build up sequences
sequence = fdrutils.generateCoincMatrix(nCoinc=numPatterns,
length=tm.numberOfCols,
activity=activity)
for r in xrange(numRepetitions):
for i in xrange(sequence.nRows()):
#if i > 11:
# setVerbosity(6, tm, tmPy)
if i % 10 == 0:
tm.reset()
tmPy.reset()
if verbosity >= 2:
print "\n\n ===================================\nPattern:",
print i, "Round:", r, "input:", sequence.getRow(i)
y1 = tm.learn(sequence.getRow(i))
y2 = tmPy.learn(sequence.getRow(i))
# Ensure everything continues to work well even if we continuously
# rebuild outSynapses structure
if testRebuild:
tm.cells4.rebuildOutSynapses()
if testTrimming:
tm.trimSegments()
tmPy.trimSegments()
if verbosity > 2:
print "\n ------ CPP states ------ ",
tm.printStates()
print "\n ------ PY states ------ ",
tmPy.printStates()
if verbosity > 6:
print "C++ cells: "
tm.printCells()
print "PY cells: "
tmPy.printCells()
if verbosity >= 3:
print "Num segments in PY and C++", tmPy.getNumSegments(), \
tm.getNumSegments()
# Check if the two TM's are identical or not. This check is slow so
# we do it every other iteration. Make it every iteration for debugging
# as needed.
self.assertTrue(fdrutils.tmDiff2(tm, tmPy, verbosity, False))
# Check that outputs are identical
self.assertLess(abs((y1 - y2).sum()), 3)
print "Learning completed"
self.assertTrue(fdrutils.tmDiff2(tm, tmPy, verbosity))
# TODO: Need to check - currently failing this
#checkCell0(tmPy)
# Remove unconnected synapses and check TM's again
# Test rebuild out synapses
print "Rebuilding outSynapses"
tm.cells4.rebuildOutSynapses()
self.assertTrue(fdrutils.tmDiff2(tm, tmPy, VERBOSITY))
print "Trimming segments"
tm.trimSegments()
tmPy.trimSegments()
self.assertTrue(fdrutils.tmDiff2(tm, tmPy, VERBOSITY))
# Save and reload after learning
print "Pickling and unpickling"
tm.makeCells4Ephemeral = False
pickle.dump(tm, open("test_tm_cpp.pkl", "wb"))
tm2 = pickle.load(open("test_tm_cpp.pkl"))
self.assertTrue(fdrutils.tmDiff2(tm, tm2, VERBOSITY, checkStates=False))
# Infer
print "Testing inference"
# Setup for inference
tm.reset()
tmPy.reset()
setVerbosity(INFERENCE_VERBOSITY, tm, tmPy)
patterns = numpy.zeros((40, tm.numberOfCols), dtype='uint32')
for i in xrange(4):
_RGEN.initializeUInt32Array(patterns[i], 2)
for i, x in enumerate(patterns):
x = numpy.zeros(tm.numberOfCols, dtype='uint32')
_RGEN.initializeUInt32Array(x, 2)
y = tm.infer(x)
yPy = tmPy.infer(x)
self.assertTrue(fdrutils.tmDiff2(tm, tmPy, VERBOSITY, checkLearn=False))
if abs((y - yPy).sum()) > 0:
print "C++ output", y
print "Py output", yPy
assert False
if i > 0:
tm._checkPrediction(patterns)
tmPy._checkPrediction(patterns)
print "Inference completed"
print "===================================="
return tm, tmPy
def testTMs(self, short=True):
"""Call basicTest2 with multiple parameter settings and ensure the C++ and
PY versions are identical throughout."""
if short == True:
print "Testing short version"
else:
print "Testing long version"
if short:
print "\nTesting with fixed resource CLA - test max segment and synapses"
tm = BacktrackingTMCPP(numberOfCols=30, cellsPerColumn=5,
initialPerm=.5, connectedPerm= 0.5,
permanenceMax=1,
minThreshold=8, newSynapseCount=10,
permanenceInc=0.1, permanenceDec=0.01,
globalDecay=.0, activationThreshold=8,
doPooling=False, segUpdateValidDuration=5,
seed=SEED, verbosity=VERBOSITY,
maxAge=0,
maxSegmentsPerCell=2, maxSynapsesPerSegment=10,
checkSynapseConsistency=True)
tm.cells4.setCellSegmentOrder(True)
self.basicTest2(tm, numPatterns=15, numRepetitions=1)
if not short:
print "\nTesting with fixed resource CLA - test max segment and synapses"
tm = BacktrackingTMCPP(numberOfCols=30, cellsPerColumn=5,
initialPerm = .5, connectedPerm= 0.5,
permanenceMax = 1,
minThreshold = 8, newSynapseCount = 10,
permanenceInc = .1, permanenceDec= .01,
globalDecay = .0, activationThreshold = 8,
doPooling = False, segUpdateValidDuration = 5,
seed=SEED, verbosity = VERBOSITY,
maxAge = 0,
maxSegmentsPerCell = 2, maxSynapsesPerSegment = 10,
checkSynapseConsistency = True)
tm.cells4.setCellSegmentOrder(1)
self.basicTest2(tm, numPatterns=30, numRepetitions=2)
print "\nTesting with permanenceInc = 0 and Dec = 0"
tm = BacktrackingTMCPP(numberOfCols=30, cellsPerColumn=5,
initialPerm = .5, connectedPerm= 0.5,
minThreshold = 3, newSynapseCount = 3,
permanenceInc = 0.0, permanenceDec= 0.00,
permanenceMax = 1,
globalDecay = .0, activationThreshold = 3,
doPooling = False, segUpdateValidDuration = 5,
seed=SEED, verbosity = VERBOSITY,
checkSynapseConsistency = False)
tm.printParameters()
self.basicTest2(tm, numPatterns = 30, numRepetitions = 3)
print "Testing with permanenceInc = 0 and Dec = 0 and 1 cell per column"
tm = BacktrackingTMCPP(numberOfCols=30, cellsPerColumn=1,
initialPerm = .5, connectedPerm= 0.5,
minThreshold = 3, newSynapseCount = 3,
permanenceInc = 0.0, permanenceDec= 0.0,
permanenceMax = 1,
globalDecay = .0, activationThreshold = 3,
doPooling = False, segUpdateValidDuration = 5,
seed=SEED, verbosity = VERBOSITY,
checkSynapseConsistency = False)
self.basicTest2(tm)
print "Testing with permanenceInc = 0.1 and Dec = .0"
tm = BacktrackingTMCPP(numberOfCols=30, cellsPerColumn=5,
initialPerm = .5, connectedPerm= 0.5,
minThreshold = 3, newSynapseCount = 3,
permanenceInc = .1, permanenceDec= .0,
permanenceMax = 1,
globalDecay = .0, activationThreshold = 3,
doPooling = False, segUpdateValidDuration = 5,
seed=SEED, verbosity = VERBOSITY,
checkSynapseConsistency = False)
self.basicTest2(tm)
print ("Testing with permanenceInc = 0.1, Dec = .01 and higher synapse "
"count")
tm = BacktrackingTMCPP(numberOfCols=30, cellsPerColumn=2,
initialPerm = .5, connectedPerm= 0.5,
minThreshold = 3, newSynapseCount = 5,
permanenceInc = .1, permanenceDec= .01,
permanenceMax = 1,
globalDecay = .0, activationThreshold = 3,
doPooling = False, segUpdateValidDuration = 5,
seed=SEED, verbosity = VERBOSITY,
checkSynapseConsistency = True)
self.basicTest2(tm, numPatterns=10, numRepetitions=2)
print "Testing age based global decay"
tm = BacktrackingTMCPP(numberOfCols=30, cellsPerColumn=5,
initialPerm = .4, connectedPerm= 0.5,
minThreshold = 3, newSynapseCount = 3,
permanenceInc = 0.1, permanenceDec= 0.1,
permanenceMax = 1,
globalDecay = .25, activationThreshold = 3,
doPooling = False, segUpdateValidDuration = 5,
pamLength = 2, maxAge = 20,
seed=SEED, verbosity = VERBOSITY,
checkSynapseConsistency = True)
tm.cells4.setCellSegmentOrder(1)
self.basicTest2(tm)
print "\nTesting with fixed size CLA, max segments per cell"
tm = BacktrackingTMCPP(numberOfCols=30, cellsPerColumn=5,
initialPerm = .5, connectedPerm= 0.5, permanenceMax = 1,
minThreshold = 8, newSynapseCount = 10,
permanenceInc = .1, permanenceDec= .01,
globalDecay = .0, activationThreshold = 8,
doPooling = False, segUpdateValidDuration = 5,
seed=SEED, verbosity = VERBOSITY,
maxAge = 0,
maxSegmentsPerCell = 2, maxSynapsesPerSegment = 100,
checkSynapseConsistency = True)
tm.cells4.setCellSegmentOrder(1)
self.basicTest2(tm, numPatterns=30, numRepetitions=2)
if __name__ == '__main__':
unittest.main()
| 14,966 | Python | .py | 303 | 35.805281 | 85 | 0.576185 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,101 | spatial_pooler_py_api_test.py | numenta_nupic-legacy/tests/unit/nupic/algorithms/spatial_pooler_py_api_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from mock import Mock, patch, ANY, call
import numpy
import cPickle as pickle
import unittest2 as unittest
from nupic.bindings.math import GetNTAReal
from nupic.bindings.algorithms import SpatialPooler
realType = GetNTAReal()
uintType = "uint32"
class SpatialPoolerAPITest(unittest.TestCase):
"""Tests for SpatialPooler public API"""
def setUp(self):
self.sp = SpatialPooler(columnDimensions=[5], inputDimensions=[5])
def testCompute(self):
# Check that there are no errors in call to compute
inputVector = numpy.ones(5, dtype=uintType)
activeArray = numpy.zeros(5, dtype=uintType)
self.sp.compute(inputVector, True, activeArray)
def testGetUpdatePeriod(self):
inParam = 1234
self.sp.setUpdatePeriod(inParam)
outParam = self.sp.getUpdatePeriod()
self.assertEqual(inParam, outParam)
def testGetPotentialRadius(self):
inParam = 56
self.sp.setPotentialRadius(inParam)
outParam = self.sp.getPotentialRadius()
self.assertEqual(inParam, outParam)
def testGetPotentialPct(self):
inParam = 0.4
self.sp.setPotentialPct(inParam)
outParam = self.sp.getPotentialPct()
self.assertAlmostEqual(inParam, outParam)
def testGetGlobalInhibition(self):
inParam = True
self.sp.setGlobalInhibition(inParam)
outParam = self.sp.getGlobalInhibition()
self.assertEqual(inParam, outParam)
inParam = False
self.sp.setGlobalInhibition(inParam)
outParam = self.sp.getGlobalInhibition()
self.assertEqual(inParam, outParam)
def testGetNumActiveColumnsPerInhArea(self):
inParam = 7
self.sp.setNumActiveColumnsPerInhArea(inParam)
outParam = self.sp.getNumActiveColumnsPerInhArea()
self.assertEqual(inParam, outParam)
def testGetLocalAreaDensity(self):
inParam = 0.4
self.sp.setLocalAreaDensity(inParam)
outParam = self.sp.getLocalAreaDensity()
self.assertAlmostEqual(inParam, outParam)
def testGetStimulusThreshold(self):
inParam = 89
self.sp.setStimulusThreshold(inParam)
outParam = self.sp.getStimulusThreshold()
self.assertEqual(inParam, outParam)
def testGetInhibitionRadius(self):
inParam = 4
self.sp.setInhibitionRadius(inParam)
outParam = self.sp.getInhibitionRadius()
self.assertEqual(inParam, outParam)
def testGetDutyCyclePeriod(self):
inParam = 2020
self.sp.setDutyCyclePeriod(inParam)
outParam = self.sp.getDutyCyclePeriod()
self.assertEqual(inParam, outParam)
def testGetBoostStrength(self):
inParam = 78
self.sp.setBoostStrength(inParam)
outParam = self.sp.getBoostStrength()
self.assertEqual(inParam, outParam)
def testGetIterationNum(self):
inParam = 999
self.sp.setIterationNum(inParam)
outParam = self.sp.getIterationNum()
self.assertEqual(inParam, outParam)
def testGetIterationLearnNum(self):
inParam = 666
self.sp.setIterationLearnNum(inParam)
outParam = self.sp.getIterationLearnNum()
self.assertEqual(inParam, outParam)
def testGetSpVerbosity(self):
inParam = 2
self.sp.setSpVerbosity(inParam)
outParam = self.sp.getSpVerbosity()
self.assertEqual(inParam, outParam)
def testGetSynPermTrimThreshold(self):
inParam = 0.7
self.sp.setSynPermTrimThreshold(inParam)
outParam = self.sp.getSynPermTrimThreshold()
self.assertAlmostEqual(inParam, outParam)
def testGetSynPermActiveInc(self):
inParam = 0.567
self.sp.setSynPermActiveInc(inParam)
outParam = self.sp.getSynPermActiveInc()
self.assertAlmostEqual(inParam, outParam)
def testGetSynPermInactiveDec(self):
inParam = 0.123
self.sp.setSynPermInactiveDec(inParam)
outParam = self.sp.getSynPermInactiveDec()
self.assertAlmostEqual(inParam, outParam)
def testGetSynPermBelowStimulusInc(self):
inParam = 0.0898
self.sp.setSynPermBelowStimulusInc(inParam)
outParam = self.sp.getSynPermBelowStimulusInc()
self.assertAlmostEqual(inParam, outParam)
def testGetSynPermConnected(self):
inParam = 0.514
self.sp.setSynPermConnected(inParam)
outParam = self.sp.getSynPermConnected()
self.assertAlmostEqual(inParam, outParam)
def testGetMinPctOverlapDutyCycles(self):
inParam = 0.11122
self.sp.setMinPctOverlapDutyCycles(inParam)
outParam = self.sp.getMinPctOverlapDutyCycles()
self.assertAlmostEqual(inParam, outParam)
def testGetPermanence(self):
numInputs = 5
numColumns = 5
self.sp.initialize(columnDimensions=[numInputs],
inputDimensions=[numColumns],
potentialRadius=1,
potentialPct=1)
inParam = numpy.array(
[0.06, 0.07, 0.08, 0.12, 0.13]).astype(realType)
self.sp.setPermanence(0,inParam)
outParam = numpy.zeros(numInputs).astype(realType)
self.sp.getPermanence(0, outParam)
self.assertListEqual(list(inParam),list(outParam))
def testGetBoostFactors(self):
numInputs = 3
numColumns = 3
self.sp.initialize(columnDimensions=[numInputs],
inputDimensions=[numColumns])
inParam = numpy.array([1, 1.2, 1.3, ]).astype(realType)
self.sp.setBoostFactors(inParam)
outParam = numpy.zeros(numInputs).astype(realType)
self.sp.getBoostFactors(outParam)
self.assertListEqual(list(inParam),list(outParam))
def testGetOverlapDutyCycles(self):
numInputs = 3
numColumns = 3
self.sp.initialize(columnDimensions=[numInputs],
inputDimensions=[numColumns])
inParam = numpy.array([0.9, 0.3, 0.1]).astype(realType)
self.sp.setOverlapDutyCycles(inParam)
outParam = numpy.zeros(numInputs).astype(realType)
self.sp.getOverlapDutyCycles(outParam)
self.assertListEqual(list(inParam),list(outParam))
def testGetActiveDutyCycles(self):
numInputs = 3
numColumns = 3
self.sp.initialize(columnDimensions=[numInputs],
inputDimensions=[numColumns])
inParam = numpy.array([0.9, 0.99, 0.999, ]).astype(realType)
self.sp.setActiveDutyCycles(inParam)
outParam = numpy.zeros(numInputs).astype(realType)
self.sp.getActiveDutyCycles(outParam)
self.assertListEqual(list(inParam),list(outParam))
def testGetMinOverlapDutyCycles(self):
numInputs = 3
numColumns = 3
self.sp.initialize(columnDimensions=[numInputs],
inputDimensions=[numColumns])
inParam = numpy.array([0.01, 0.02, 0.035, ]).astype(realType)
self.sp.setMinOverlapDutyCycles(inParam)
outParam = numpy.zeros(numInputs).astype(realType)
self.sp.getMinOverlapDutyCycles(outParam)
self.assertListEqual(list(inParam),list(outParam))
def testGetPotential(self):
self.sp.initialize(columnDimensions=[3], inputDimensions=[3])
numInputs = 3
numColumns = 3
self.sp.initialize(columnDimensions=[numInputs],
inputDimensions=[numColumns])
inParam1 = numpy.array([1, 0, 1]).astype(uintType)
self.sp.setPotential(0, inParam1)
inParam2 = numpy.array([1, 1, 0]).astype(uintType)
self.sp.setPotential(1, inParam2)
outParam1 = numpy.zeros(numInputs).astype(uintType)
outParam2 = numpy.zeros(numInputs).astype(uintType)
self.sp.getPotential(0, outParam1)
self.sp.getPotential(1, outParam2)
self.assertListEqual(list(inParam1),list(outParam1))
self.assertListEqual(list(inParam2),list(outParam2))
def testGetConnectedSynapses(self):
numInputs = 5
numColumns = 5
self.sp.initialize(columnDimensions=[numInputs],
inputDimensions=[numColumns],
potentialRadius=1,
potentialPct=1)
inParam = numpy.array(
[0.06, 0.07, 0.08, 0.12, 0.13]).astype(realType)
trueConnected = numpy.array([0, 0, 0, 1, 1])
self.sp.setSynPermConnected(0.1)
self.sp.setPermanence(0,inParam)
outParam = numpy.zeros(numInputs).astype(uintType)
self.sp.getConnectedSynapses(0, outParam)
self.assertListEqual(list(trueConnected),list(outParam))
def testGetConnectedCounts(self):
numInputs = 5
numColumns = 5
self.sp.initialize(columnDimensions=[numInputs],
inputDimensions=[numColumns],
potentialRadius=1,
potentialPct=1)
inParam = numpy.array(
[0.06, 0.07, 0.08, 0.12, 0.11]).astype(realType)
trueConnectedCount = 2
self.sp.setSynPermConnected(0.1)
self.sp.setPermanence(0, inParam)
outParam = numpy.zeros(numInputs).astype(uintType)
self.sp.getConnectedCounts(outParam)
self.assertEqual(trueConnectedCount, outParam[0])
def assertListAlmostEqual(self, alist, blist):
self.assertEqual(len(alist), len(blist))
for (a,b) in zip(alist,blist):
diff = abs(a - b)
self.assertLess(diff,1e-5)
if __name__ == "__main__":
unittest.main()
| 9,840 | Python | .py | 242 | 34.92562 | 72 | 0.720029 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,102 | backtracking_tm_test.py | numenta_nupic-legacy/tests/unit/nupic/algorithms/backtracking_tm_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Tests for the Python implementation of the temporal memory."""
import cPickle as pickle
import csv
import itertools
import numpy
import os
import random
import shutil
import tempfile
import unittest2 as unittest
try:
import capnp
except ImportError:
capnp = None
from pkg_resources import resource_filename
from nupic.algorithms import fdrutilities
from nupic.algorithms.backtracking_tm import BacktrackingTM
COL_SET = set(range(500))
VERBOSITY = 0
class BacktrackingTMTest(unittest.TestCase):
"""Unit tests for the TM class."""
def setUp(self):
self._tmpDir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self._tmpDir)
def testInitDefaultTM(self):
self.assertTrue(isinstance(BacktrackingTM(), BacktrackingTM))
@unittest.skipUnless(capnp, "pycapnp not installed")
def testSerializationLearned(self):
# Create a model and give it some inputs to learn.
tm1 = BacktrackingTM(numberOfCols=100, cellsPerColumn=12,
verbosity=VERBOSITY)
sequences = [self.generateSequence() for _ in xrange(5)]
train = list(itertools.chain.from_iterable(sequences[:3]))
for bottomUpInput in train:
if bottomUpInput is None:
tm1.reset()
else:
tm1.compute(bottomUpInput, True, True)
# Serialize and deserialized the TM.
tmProto = BacktrackingTM.getSchema().new_message()
tm1.write(tmProto)
checkpointPath = os.path.join(self._tmpDir, 'a')
with open(checkpointPath, "wb") as f:
tmProto.write(f)
with open(checkpointPath, "rb") as f:
tmProto = BacktrackingTM.getSchema().read(f)
tm2 = BacktrackingTM.read(tmProto)
# Check that the TMs are the same.
self.assertTMsEqual(tm1, tm2)
# Feed some data into the models.
test = list(itertools.chain.from_iterable(sequences[3:]))
for bottomUpInput in test:
if bottomUpInput is None:
tm1.reset()
tm2.reset()
else:
result1 = tm1.compute(bottomUpInput, True, True)
result2 = tm2.compute(bottomUpInput, True, True)
self.assertTMsEqual(tm1, tm2)
self.assertTrue(numpy.array_equal(result1, result2))
@unittest.skipUnless(capnp, "pycapnp not installed")
def testSerializationMiddleOfSequence(self):
# Create a model and give it some inputs to learn.
tm1 = BacktrackingTM(numberOfCols=100, cellsPerColumn=12,
verbosity=VERBOSITY)
sequences = [self.generateSequence() for _ in xrange(5)]
train = list(itertools.chain.from_iterable(sequences[:3] +
[sequences[3][:5]]))
for bottomUpInput in train:
if bottomUpInput is None:
tm1.reset()
else:
tm1.compute(bottomUpInput, True, True)
# Serialize and deserialized the TM.
tmProto = BacktrackingTM.getSchema().new_message()
tm1.write(tmProto)
checkpointPath = os.path.join(self._tmpDir, 'a')
with open(checkpointPath, "wb") as f:
tmProto.write(f)
with open(checkpointPath, "rb") as f:
tmProto = BacktrackingTM.getSchema().read(f)
tm2 = BacktrackingTM.read(tmProto)
# Check that the TMs are the same.
self.assertTMsEqual(tm1, tm2)
# Feed some data into the models.
test = list(itertools.chain.from_iterable([sequences[3][5:]] +
sequences[3:]))
for bottomUpInput in test:
if bottomUpInput is None:
tm1.reset()
tm2.reset()
else:
result1 = tm1.compute(bottomUpInput, True, True)
result2 = tm2.compute(bottomUpInput, True, True)
self.assertTMsEqual(tm1, tm2)
self.assertTrue(numpy.array_equal(result1, result2))
@unittest.skipUnless(capnp, "pycapnp not installed")
def testSerializationMiddleOfSequence2(self):
"""More complex test of checkpointing in the middle of a sequence."""
tm1 = BacktrackingTM(2048, 32, 0.21, 0.5, 11, 20, 0.1, 0.1, 1.0, 0.0, 14,
False, 5, 2, False, 1960, 0, False, 3, 10, 5, 0, 32,
128, 32, 'normal')
tm2 = BacktrackingTM(2048, 32, 0.21, 0.5, 11, 20, 0.1, 0.1, 1.0, 0.0, 14,
False, 5, 2, False, 1960, 0, False, 3, 10, 5, 0, 32,
128, 32, 'normal')
with open(resource_filename(__name__, 'data/tm_input.csv'), 'r') as fin:
reader = csv.reader(fin)
records = []
for bottomUpInStr in fin:
bottomUpIn = numpy.array(eval('[' + bottomUpInStr.strip() + ']'),
dtype='int32')
records.append(bottomUpIn)
i = 1
for r in records[:250]:
print i
i += 1
output1 = tm1.compute(r, True, True)
output2 = tm2.compute(r, True, True)
self.assertTrue(numpy.array_equal(output1, output2))
print 'Serializing and deserializing models.'
savePath1 = os.path.join(self._tmpDir, 'tm1.bin')
tmProto1 = BacktrackingTM.getSchema().new_message()
tm1.write(tmProto1)
with open(savePath1, "wb") as f:
tmProto1.write(f)
with open(savePath1, "rb") as f:
tmProto3 = BacktrackingTM.getSchema().read(f)
tm3 = BacktrackingTM.read(tmProto3)
savePath2 = os.path.join(self._tmpDir, 'tm2.bin')
tmProto2 = BacktrackingTM.getSchema().new_message()
tm2.write(tmProto2)
with open(savePath2, "wb") as f:
tmProto2.write(f)
with open(savePath2, "rb") as f:
tmProto4 = BacktrackingTM.getSchema().read(f)
tm4 = BacktrackingTM.read(tmProto4)
self.assertTMsEqual(tm1, tm3)
self.assertTMsEqual(tm2, tm4)
for r in records[250:]:
print i
i += 1
out1 = tm1.compute(r, True, True)
out2 = tm2.compute(r, True, True)
out3 = tm3.compute(r, True, True)
out4 = tm4.compute(r, True, True)
self.assertTrue(numpy.array_equal(out1, out2))
self.assertTrue(numpy.array_equal(out1, out3))
self.assertTrue(numpy.array_equal(out1, out4))
self.assertTMsEqual(tm1, tm2)
self.assertTMsEqual(tm1, tm3)
self.assertTMsEqual(tm2, tm4)
def testCheckpointLearned(self):
# Create a model and give it some inputs to learn.
tm1 = BacktrackingTM(numberOfCols=100, cellsPerColumn=12,
verbosity=VERBOSITY)
sequences = [self.generateSequence() for _ in xrange(5)]
train = list(itertools.chain.from_iterable(sequences[:3]))
for bottomUpInput in train:
if bottomUpInput is None:
tm1.reset()
else:
tm1.compute(bottomUpInput, True, True)
# Serialize and deserialized the TM.
checkpointPath = os.path.join(self._tmpDir, 'a')
tm1.saveToFile(checkpointPath)
tm2 = pickle.loads(pickle.dumps(tm1))
tm2.loadFromFile(checkpointPath)
# Check that the TMs are the same.
self.assertTMsEqual(tm1, tm2)
# Feed some data into the models.
test = list(itertools.chain.from_iterable(sequences[3:]))
for bottomUpInput in test:
if bottomUpInput is None:
tm1.reset()
tm2.reset()
else:
result1 = tm1.compute(bottomUpInput, True, True)
result2 = tm2.compute(bottomUpInput, True, True)
self.assertTMsEqual(tm1, tm2)
self.assertTrue(numpy.array_equal(result1, result2))
def testCheckpointMiddleOfSequence(self):
# Create a model and give it some inputs to learn.
tm1 = BacktrackingTM(numberOfCols=100, cellsPerColumn=12,
verbosity=VERBOSITY)
sequences = [self.generateSequence() for _ in xrange(5)]
train = list(itertools.chain.from_iterable(sequences[:3] +
[sequences[3][:5]]))
for bottomUpInput in train:
if bottomUpInput is None:
tm1.reset()
else:
tm1.compute(bottomUpInput, True, True)
# Serialize and deserialized the TM.
checkpointPath = os.path.join(self._tmpDir, 'a')
tm1.saveToFile(checkpointPath)
tm2 = pickle.loads(pickle.dumps(tm1))
tm2.loadFromFile(checkpointPath)
# Check that the TMs are the same.
self.assertTMsEqual(tm1, tm2)
# Feed some data into the models.
test = list(itertools.chain.from_iterable([sequences[3][5:]] +
sequences[3:]))
for bottomUpInput in test:
if bottomUpInput is None:
tm1.reset()
tm2.reset()
else:
result1 = tm1.compute(bottomUpInput, True, True)
result2 = tm2.compute(bottomUpInput, True, True)
self.assertTMsEqual(tm1, tm2)
self.assertTrue(numpy.array_equal(result1, result2))
def testCheckpointMiddleOfSequence2(self):
"""More complex test of checkpointing in the middle of a sequence."""
tm1 = BacktrackingTM(2048, 32, 0.21, 0.5, 11, 20, 0.1, 0.1, 1.0, 0.0, 14,
False, 5, 2, False, 1960, 0, False, 3, 10, 5, 0, 32,
128, 32, 'normal')
tm2 = BacktrackingTM(2048, 32, 0.21, 0.5, 11, 20, 0.1, 0.1, 1.0, 0.0, 14,
False, 5, 2, False, 1960, 0, False, 3, 10, 5, 0, 32,
128, 32, 'normal')
with open(resource_filename(__name__, 'data/tm_input.csv'), 'r') as fin:
reader = csv.reader(fin)
records = []
for bottomUpInStr in fin:
bottomUpIn = numpy.array(eval('[' + bottomUpInStr.strip() + ']'),
dtype='int32')
records.append(bottomUpIn)
i = 1
for r in records[:250]:
print i
i += 1
output1 = tm1.compute(r, True, True)
output2 = tm2.compute(r, True, True)
self.assertTrue(numpy.array_equal(output1, output2))
print 'Serializing and deserializing models.'
savePath1 = os.path.join(self._tmpDir, 'tm1.bin')
tm1.saveToFile(savePath1)
tm3 = pickle.loads(pickle.dumps(tm1))
tm3.loadFromFile(savePath1)
savePath2 = os.path.join(self._tmpDir, 'tm2.bin')
tm2.saveToFile(savePath2)
tm4 = pickle.loads(pickle.dumps(tm2))
tm4.loadFromFile(savePath2)
self.assertTMsEqual(tm1, tm3)
self.assertTMsEqual(tm2, tm4)
for r in records[250:]:
print i
i += 1
out1 = tm1.compute(r, True, True)
out2 = tm2.compute(r, True, True)
out3 = tm3.compute(r, True, True)
out4 = tm4.compute(r, True, True)
self.assertTrue(numpy.array_equal(out1, out2))
self.assertTrue(numpy.array_equal(out1, out3))
self.assertTrue(numpy.array_equal(out1, out4))
self.assertTMsEqual(tm1, tm2)
self.assertTMsEqual(tm1, tm3)
self.assertTMsEqual(tm2, tm4)
def assertTMsEqual(self, tm1, tm2):
"""Asserts that two TM instances are the same.
This is temporarily disabled since it does not work with the C++
implementation of the TM.
"""
self.assertEqual(tm1, tm2, tm1.diff(tm2))
self.assertTrue(fdrutilities.tmDiff2(tm1, tm2, 1, False))
@staticmethod
def generateSequence(n=10, numCols=100, minOnes=21, maxOnes=25):
"""Generates a sequence of n patterns."""
return [None] + [BacktrackingTMTest.generatePattern(numCols, minOnes,
maxOnes)
for _ in xrange(n)]
@staticmethod
def generatePattern(numCols=100, minOnes=21, maxOnes=25):
"""Generate a single test pattern with given parameters.
Parameters:
numCols: Number of columns in each pattern.
minOnes: The minimum number of 1's in each pattern.
maxOnes: The maximum number of 1's in each pattern.
"""
assert minOnes < maxOnes
assert maxOnes < numCols
nOnes = random.randint(minOnes, maxOnes)
ind = random.sample(xrange(numCols), nOnes)
x = numpy.zeros(numCols, dtype='float32')
x[ind] = 1
return x
if __name__ == '__main__':
unittest.main()
| 12,727 | Python | .py | 308 | 34.081169 | 77 | 0.648469 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,103 | anomaly_likelihood_jeff_test.py | numenta_nupic-legacy/tests/unit/nupic/algorithms/anomaly_likelihood_jeff_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Test the anomaly likelihood stuff with specific artificial distributions of
anomaly scores. We want to specifically cover the various situations Jeff drew
on the board.
Some of the tests currently don't pass and are marked as such. We understand why
but fixing them needs a deeper algoerithm discussion.
"""
import copy
import datetime
import unittest2 as unittest
from nupic.algorithms import anomaly_likelihood as an
from nupic.support.unittesthelpers.testcasebase import TestCaseBase
def _getDateList(numSamples, startDatetime):
"""
Generate a sequence of sample dates starting at startDatetime and incrementing
every minute.
"""
dateList = []
td = datetime.timedelta(minutes=1)
curDate = startDatetime + td
for _ in range(numSamples):
dateList.append(curDate)
curDate = curDate + td
return dateList
class ArtificialAnomalyTest(TestCaseBase):
def assertWithinEpsilon(self, a, b, epsilon=0.001):
self.assertLessEqual(abs(a - b), epsilon,
"Values %g and %g are not within %g" % (a, b, epsilon))
@staticmethod
def _addSampleData(origData=None, numSamples=1440, spikeValue=1.0,
spikePeriod=20):
"""
Add sample anomaly data to the existing data list and return it.
Note: this does not modify the original data list
Note 2: here we just add in increasing integers as the metric value
"""
if origData is None:
origData = []
# Get a list of dates
if len(origData) > 0:
lastDate = origData[-1][0]
else:
lastDate = datetime.datetime(2013, 2, 3)
dateList = _getDateList(numSamples, lastDate)
# Add anomaly spikes as appropriate
data = copy.copy(origData)
for idx, date in enumerate(dateList):
if (spikePeriod > 0) and ( (idx + 1) % spikePeriod == 0):
data.append([date, idx, spikeValue])
else:
data.append([date, idx, 0.0])
return data
def testCaseSingleSpike(self):
"""
No anomalies, and then you see a single spike. The likelihood of that
spike should be 0
"""
data = self._addSampleData(spikePeriod=0, numSamples=1000)
_, _, estimatorParams = (
an.estimateAnomalyLikelihoods(data[0:1000])
)
data = self._addSampleData(numSamples=1, spikePeriod=1)
likelihoods1, _, _ = (
an.updateAnomalyLikelihoods(data, estimatorParams)
)
self.assertWithinEpsilon(likelihoods1[0], 0.0)
def testCaseUnusuallyHighSpikeFrequency(self):
"""
Test B: one anomaly spike every 20 records. Then we suddenly get a bunch
in a row. The likelihood of those spikes should be low.
"""
data = self._addSampleData(spikePeriod=20, numSamples=1019)
_, _, estimatorParams = (
an.estimateAnomalyLikelihoods(data[0:1000])
)
# If we continue to see the same distribution, we should get reasonable
# likelihoods
data = self._addSampleData(numSamples=119, spikePeriod=20)
likelihoods1, _, estimatorParams1 = (
an.updateAnomalyLikelihoods(data, estimatorParams)
)
# The minimum likelihood should be reasonably high
self.assertTrue((likelihoods1.min() > 0.1 ))
data = self._addSampleData(numSamples=20, spikePeriod=2)
likelihoods2, _, _ = (
an.updateAnomalyLikelihoods(data, estimatorParams1)
)
# The likelihood once you get past the initial averaging should be very low.
self.assertTrue((likelihoods2[5:].sum() / 15.0) < 0.001)
@unittest.skip("Currently fails because the periodicity is greater than the "
"window size. Requires some algorithm enhancements. "
"Filed as https://github.com/numenta/nupic/issues/948.")
def testCaseMissingSpike(self):
"""
Test C: one anomaly every 20 records, but then see none. The likelihood
at the end should be very low.
"""
# Initial data
data = self._addSampleData(spikePeriod=20, numSamples=1019)
_, _, estimatorParams = (
an.estimateAnomalyLikelihoods(data[0:1000])
)
# Now feed in none
data = self._addSampleData(numSamples=100, spikePeriod=0)
likelihoods2, _, _ = (
an.updateAnomalyLikelihoods(data, estimatorParams)
)
# The likelihood once you get past the initial averaging should be very low.
self.assertTrue((likelihoods2[5:].sum() / 15.0) < 0.0001)
def testCaseContinuousBunchesOfSpikes(self):
"""
Test D: bunches of anomalies every 20 records that continue. This should not
be anomalous.
"""
# Generate initial data
data = []
for _ in range(30):
data = self._addSampleData(data, spikePeriod=0, numSamples=30)
data = self._addSampleData(data, spikePeriod=3, numSamples=10)
_, _, estimatorParams = (
an.estimateAnomalyLikelihoods(data[0:1000])
)
# Now feed in the same distribution
data = self._addSampleData(spikePeriod=0, numSamples=30)
data = self._addSampleData(data, spikePeriod=3, numSamples=10)
likelihoods2, _, _ = (
an.updateAnomalyLikelihoods(data, estimatorParams)
)
# The likelihood should be reasonable high everywhere
self.assertTrue(likelihoods2.min() > 0.01)
def testCaseIncreasedSpikeFrequency(self):
"""
Test E: bunches of anomalies every 20 records that become even more
frequent. This should be anomalous.
"""
# Generate initial data
data = []
for _ in range(30):
data = self._addSampleData(data, spikePeriod=0, numSamples=30)
data = self._addSampleData(data, spikePeriod=3, numSamples=10)
_, _, estimatorParams = (
an.estimateAnomalyLikelihoods(data[0:1000])
)
# Now feed in a more frequent distribution
data = self._addSampleData(spikePeriod=0, numSamples=30)
data = self._addSampleData(data, spikePeriod=1, numSamples=10)
likelihoods2, _, _ = (
an.updateAnomalyLikelihoods(data, estimatorParams)
)
# The likelihood should become anomalous but only near the end
self.assertTrue(likelihoods2[0:30].min() > 0.01)
self.assertTrue(likelihoods2[-5:].min() < 0.002)
@unittest.skip("Currently fails because the periodicity is greater than the "
"window size. Requires some algorithm enhancements. "
"Filed as https://github.com/numenta/nupic/issues/948.")
def testCaseMissingBunchesOfSpikes(self):
"""
Test F: bunches of anomalies every 20 records that disappear. This should
be anomalous.
"""
# Generate initial data
data = []
for _ in range(30):
data = self._addSampleData(data, spikePeriod=0, numSamples=30)
data = self._addSampleData(data, spikePeriod=3, numSamples=10)
_, _, estimatorParams = (
an.estimateAnomalyLikelihoods(data)
)
# Now feed in a more frequent distribution
data = self._addSampleData(spikePeriod=0, numSamples=40)
likelihoods2, _, _ = (
an.updateAnomalyLikelihoods(data, estimatorParams)
)
# The likelihood should become anomalous but only near the end
self.assertTrue(likelihoods2[0:30].min() > 0.01)
self.assertTrue(likelihoods2[-5:].min() < 0.00001)
def testCaseIncreasedAnomalyScore(self):
"""
Test F: small anomaly score every 20 records, but then a large one when you
would expect a small one. This should be anomalous.
"""
# Generate initial data
data = []
data = self._addSampleData(data, spikePeriod=20,
spikeValue=0.4, numSamples=1000)
_, _, estimatorParams = (
an.estimateAnomalyLikelihoods(data)
)
# Now feed in a more frequent distribution
data = self._addSampleData(spikePeriod=20, spikeValue=1.0,
numSamples=100)
likelihoods2, _, _ = (
an.updateAnomalyLikelihoods(data, estimatorParams)
)
# We should detect highly unusual behavior
self.assertTrue(likelihoods2.min() < 0.0003)
# We should detect it pretty often
self.assertTrue((likelihoods2 < 0.0003).sum() > 40)
if __name__ == "__main__":
unittest.main()
| 9,004 | Python | .py | 220 | 35.563636 | 80 | 0.690165 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,104 | knn_classifier_test.py | numenta_nupic-legacy/tests/unit/nupic/algorithms/knn_classifier_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy as np
import tempfile
import unittest
from nupic.algorithms.knn_classifier import KNNClassifier
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.algorithms.knn_classifier_capnp import KNNClassifierProto
class KNNClassifierTest(unittest.TestCase):
def testSparsifyVector(self):
classifier = KNNClassifier(distanceMethod="norm", distanceNorm=2.0)
inputPattern = np.array([0, 1, 3, 7, 11], dtype=np.int32)
# Each of the 4 tests correspond with the each decisional branch in the
# sparsifyVector method
#
# tests: if not self.relativeThreshold:
outputPattern = classifier._sparsifyVector(inputPattern, doWinners=True)
self.assertTrue(np.array_equal(np.array([0, 1, 3, 7, 11], dtype=np.int32),
outputPattern))
# tests: elif self.sparseThreshold > 0:
classifier = KNNClassifier(distanceMethod="norm", distanceNorm=2.0,
relativeThreshold=True, sparseThreshold=.2)
outputPattern = classifier._sparsifyVector(inputPattern, doWinners=True)
self.assertTrue(np.array_equal(np.array([0, 0, 3, 7, 11], dtype=np.int32),
outputPattern))
# tests: if doWinners:
classifier = KNNClassifier(distanceMethod="norm", distanceNorm=2.0,
relativeThreshold=True, sparseThreshold=.2, numWinners=2)
outputPattern = classifier._sparsifyVector(inputPattern, doWinners=True)
self.assertTrue(np.array_equal(np.array([0, 0, 0, 0, 0], dtype=np.int32),
outputPattern))
# tests: Do binarization
classifier = KNNClassifier(distanceMethod="norm", distanceNorm=2.0,
relativeThreshold=True, sparseThreshold=.2, doBinarization=True)
outputPattern = classifier._sparsifyVector(inputPattern, doWinners=True)
self.assertTrue(np.array_equal(np.array(
[0., 0., 1., 1., 1.], dtype=np.float32), outputPattern))
def testDistanceMetrics(self):
classifier = KNNClassifier(distanceMethod="norm", distanceNorm=2.0)
dimensionality = 40
protoA = np.array([0, 1, 3, 7, 11], dtype=np.int32)
protoB = np.array([20, 28, 30], dtype=np.int32)
classifier.learn(protoA, 0, isSparse=dimensionality)
classifier.learn(protoB, 0, isSparse=dimensionality)
# input is an arbitrary point, close to protoA, orthogonal to protoB
input = np.zeros(dimensionality)
input[:4] = 1.0
# input0 is used to test that the distance from a point to itself is 0
input0 = np.zeros(dimensionality)
input0[protoA] = 1.0
# Test l2 norm metric
_, _, dist, _ = classifier.infer(input)
l2Distances = [0.65465367, 1.0]
for actual, predicted in zip(l2Distances, dist):
self.assertAlmostEqual(
actual, predicted, places=5,
msg="l2 distance norm is not calculated as expected.")
_, _, dist0, _ = classifier.infer(input0)
self.assertEqual(
0.0, dist0[0], msg="l2 norm did not calculate 0 distance as expected.")
# Test l1 norm metric
classifier.distanceNorm = 1.0
_, _, dist, _ = classifier.infer(input)
l1Distances = [0.42857143, 1.0]
for actual, predicted in zip(l1Distances, dist):
self.assertAlmostEqual(
actual, predicted, places=5,
msg="l1 distance norm is not calculated as expected.")
_, _, dist0, _ = classifier.infer(input0)
self.assertEqual(
0.0, dist0[0], msg="l1 norm did not calculate 0 distance as expected.")
# Test raw overlap metric
classifier.distanceMethod = "rawOverlap"
_, _, dist, _ = classifier.infer(input)
rawOverlaps = [1, 4]
for actual, predicted in zip(rawOverlaps, dist):
self.assertEqual(
actual, predicted, msg="Raw overlap is not calculated as expected.")
_, _, dist0, _ = classifier.infer(input0)
self.assertEqual(
0.0, dist0[0],
msg="Raw overlap did not calculate 0 distance as expected.")
# Test pctOverlapOfInput metric
classifier.distanceMethod = "pctOverlapOfInput"
_, _, dist, _ = classifier.infer(input)
pctOverlaps = [0.25, 1.0]
for actual, predicted in zip(pctOverlaps, dist):
self.assertAlmostEqual(
actual, predicted, places=5,
msg="pctOverlapOfInput is not calculated as expected.")
_, _, dist0, _ = classifier.infer(input0)
self.assertEqual(
0.0, dist0[0],
msg="pctOverlapOfInput did not calculate 0 distance as expected.")
# Test pctOverlapOfProto metric
classifier.distanceMethod = "pctOverlapOfProto"
_, _, dist, _ = classifier.infer(input)
pctOverlaps = [0.40, 1.0]
for actual, predicted in zip(pctOverlaps, dist):
self.assertAlmostEqual(
actual, predicted, places=5,
msg="pctOverlapOfProto is not calculated as expected.")
_, _, dist0, _ = classifier.infer(input0)
self.assertEqual(
0.0, dist0[0],
msg="pctOverlapOfProto did not calculate 0 distance as expected.")
# Test pctOverlapOfLarger metric
classifier.distanceMethod = "pctOverlapOfLarger"
_, _, dist, _ = classifier.infer(input)
pctOverlaps = [0.40, 1.0]
for actual, predicted in zip(pctOverlaps, dist):
self.assertAlmostEqual(
actual, predicted, places=5,
msg="pctOverlapOfLarger is not calculated as expected.")
_, _, dist0, _ = classifier.infer(input0)
self.assertEqual(
0.0, dist0[0],
msg="pctOverlapOfLarger did not calculate 0 distance as expected.")
def testOverlapDistanceMethodStandard(self):
"""Tests standard learning case for raw overlap"""
params = {"distanceMethod": "rawOverlap"}
classifier = KNNClassifier(**params)
dimensionality = 40
a = np.array([1, 3, 7, 11, 13, 17, 19, 23, 29], dtype=np.int32)
b = np.array([2, 4, 8, 12, 14, 18, 20, 28, 30], dtype=np.int32)
numPatterns = classifier.learn(a, 0, isSparse=dimensionality)
self.assertEquals(numPatterns, 1)
numPatterns = classifier.learn(b, 1, isSparse=dimensionality)
self.assertEquals(numPatterns, 2)
denseA = np.zeros(dimensionality)
denseA[a] = 1.0
cat, _, _, _ = classifier.infer(denseA)
self.assertEquals(cat, 0)
denseB = np.zeros(dimensionality)
denseB[b] = 1.0
cat, _, _, _ = classifier.infer(denseB)
self.assertEquals(cat, 1)
def testMinSparsity(self):
"""Tests overlap distance with min sparsity"""
# Require sparsity >= 20%
params = {"distanceMethod": "rawOverlap", "minSparsity": 0.2}
classifier = KNNClassifier(**params)
dimensionality = 30
a = np.array([1, 3, 7, 11, 13, 17, 19, 23, 29], dtype=np.int32)
b = np.array([2, 4, 8, 12, 14, 18, 20, 21, 28], dtype=np.int32)
# This has 20% sparsity and should be inserted
c = np.array([2, 3, 8, 11, 14, 18], dtype=np.int32)
# This has 17% sparsity and should NOT be inserted
d = np.array([2, 3, 8, 11, 18], dtype=np.int32)
numPatterns = classifier.learn(a, 0, isSparse=dimensionality)
self.assertEquals(numPatterns, 1)
numPatterns = classifier.learn(b, 1, isSparse=dimensionality)
self.assertEquals(numPatterns, 2)
numPatterns = classifier.learn(c, 1, isSparse=dimensionality)
self.assertEquals(numPatterns, 3)
numPatterns = classifier.learn(d, 1, isSparse=dimensionality)
self.assertEquals(numPatterns, 3)
# Test that inference ignores low sparsity vectors but not others
e = np.array([2, 4, 5, 6, 8, 12, 14, 18, 20], dtype=np.int32)
dense= np.zeros(dimensionality)
dense[e] = 1.0
cat, inference, _, _ = classifier.infer(dense)
self.assertIsNotNone(cat)
self.assertGreater(inference.sum(),0.0)
# This has 20% sparsity and should be used for inference
f = np.array([2, 5, 8, 11, 14, 18], dtype=np.int32)
dense= np.zeros(dimensionality)
dense[f] = 1.0
cat, inference, _, _ = classifier.infer(dense)
self.assertIsNotNone(cat)
self.assertGreater(inference.sum(),0.0)
# This has 17% sparsity and should return null inference results
g = np.array([2, 3, 8, 11, 19], dtype=np.int32)
dense= np.zeros(dimensionality)
dense[g] = 1.0
cat, inference, _, _ = classifier.infer(dense)
self.assertIsNone(cat)
self.assertEqual(inference.sum(),0.0)
def testPartitionIdExcluded(self):
"""
Tests that paritionId properly excludes training data points during
inference
"""
params = {"distanceMethod": "rawOverlap"}
classifier = KNNClassifier(**params)
dimensionality = 40
a = np.array([1, 3, 7, 11, 13, 17, 19, 23, 29], dtype=np.int32)
b = np.array([2, 4, 8, 12, 14, 18, 20, 28, 30], dtype=np.int32)
denseA = np.zeros(dimensionality)
denseA[a] = 1.0
denseB = np.zeros(dimensionality)
denseB[b] = 1.0
classifier.learn(a, 0, isSparse=dimensionality, partitionId=0)
classifier.learn(b, 1, isSparse=dimensionality, partitionId=1)
cat, _, _, _ = classifier.infer(denseA, partitionId=1)
self.assertEquals(cat, 0)
cat, _, _, _ = classifier.infer(denseA, partitionId=0)
self.assertEquals(cat, 1)
cat, _, _, _ = classifier.infer(denseB, partitionId=0)
self.assertEquals(cat, 1)
cat, _, _, _ = classifier.infer(denseB, partitionId=1)
self.assertEquals(cat, 0)
# Ensure it works even if you invoke learning again. To make it a bit more
# complex this time we insert A again but now with Id=2
classifier.learn(a, 0, isSparse=dimensionality, partitionId=2)
# Even though first A should be ignored, the second instance of A should
# not be ignored.
cat, _, _, _ = classifier.infer(denseA, partitionId=0)
self.assertEquals(cat, 0)
def testGetPartitionId(self):
"""
Test a sequence of calls to KNN to ensure we can retrieve partition Id:
- We first learn on some patterns (including one pattern with no
partitionId in the middle) and test that we can retrieve Ids.
- We then invoke inference and then check partitionId again.
- We check incorrect indices to ensure we get an exception.
- We check the case where the partitionId to be ignored is not in
the list.
- We learn on one more pattern and check partitionIds again
- We remove rows and ensure partitionIds still work
"""
params = {"distanceMethod": "rawOverlap"}
classifier = KNNClassifier(**params)
dimensionality = 40
a = np.array([1, 3, 7, 11, 13, 17, 19, 23, 29], dtype=np.int32)
b = np.array([2, 4, 8, 12, 14, 18, 20, 28, 30], dtype=np.int32)
c = np.array([1, 2, 3, 14, 16, 19, 22, 24, 33], dtype=np.int32)
d = np.array([2, 4, 8, 12, 14, 19, 22, 24, 33], dtype=np.int32)
e = np.array([1, 3, 7, 12, 14, 19, 22, 24, 33], dtype=np.int32)
denseA = np.zeros(dimensionality)
denseA[a] = 1.0
classifier.learn(a, 0, isSparse=dimensionality, partitionId=433)
classifier.learn(b, 1, isSparse=dimensionality, partitionId=213)
classifier.learn(c, 1, isSparse=dimensionality, partitionId=None)
classifier.learn(d, 1, isSparse=dimensionality, partitionId=433)
self.assertEquals(classifier.getPartitionId(0), 433)
self.assertEquals(classifier.getPartitionId(1), 213)
self.assertEquals(classifier.getPartitionId(2), None)
self.assertEquals(classifier.getPartitionId(3), 433)
cat, _, _, _ = classifier.infer(denseA, partitionId=213)
self.assertEquals(cat, 0)
# Test with patternId not in classifier
cat, _, _, _ = classifier.infer(denseA, partitionId=666)
self.assertEquals(cat, 0)
# Partition Ids should be maintained after inference
self.assertEquals(classifier.getPartitionId(0), 433)
self.assertEquals(classifier.getPartitionId(1), 213)
self.assertEquals(classifier.getPartitionId(2), None)
self.assertEquals(classifier.getPartitionId(3), 433)
# Should return exceptions if we go out of bounds
with self.assertRaises(RuntimeError):
classifier.getPartitionId(4)
with self.assertRaises(RuntimeError):
classifier.getPartitionId(-1)
# Learn again
classifier.learn(e, 4, isSparse=dimensionality, partitionId=413)
self.assertEquals(classifier.getPartitionId(4), 413)
# Test getPatternIndicesWithPartitionId
self.assertItemsEqual(classifier.getPatternIndicesWithPartitionId(433),
[0, 3])
self.assertItemsEqual(classifier.getPatternIndicesWithPartitionId(666),
[])
self.assertItemsEqual(classifier.getPatternIndicesWithPartitionId(413),
[4])
self.assertEquals(classifier.getNumPartitionIds(), 3)
# Check that the full set of partition ids is what we expect
self.assertItemsEqual(classifier.getPartitionIdList(),
[433, 213, np.inf, 433, 413])
self.assertItemsEqual(classifier.getPartitionIdKeys(), [433, 413, 213])
# Remove two rows - all indices shift down
self.assertEquals(classifier._removeRows([0,2]), 2)
self.assertItemsEqual(classifier.getPatternIndicesWithPartitionId(433),
[1])
self.assertItemsEqual(classifier.getPatternIndicesWithPartitionId(413),
[2])
# Remove another row and check number of partitions have decreased
classifier._removeRows([0])
self.assertEquals(classifier.getNumPartitionIds(), 2)
# Check that the full set of partition ids is what we expect
self.assertItemsEqual(classifier.getPartitionIdList(), [433, 413])
self.assertItemsEqual(classifier.getPartitionIdKeys(), [433, 413])
def testGetPartitionIdWithNoIdsAtFirst(self):
"""
Tests that we can correctly retrieve partition Id even if the first few
vectors do not have Ids
"""
params = {"distanceMethod": "rawOverlap"}
classifier = KNNClassifier(**params)
dimensionality = 40
a = np.array([1, 3, 7, 11, 13, 17, 19, 23, 29], dtype=np.int32)
b = np.array([2, 4, 8, 12, 14, 18, 20, 28, 30], dtype=np.int32)
c = np.array([1, 2, 3, 14, 16, 19, 22, 24, 33], dtype=np.int32)
d = np.array([2, 4, 8, 12, 14, 19, 22, 24, 33], dtype=np.int32)
denseA = np.zeros(dimensionality)
denseA[a] = 1.0
denseD = np.zeros(dimensionality)
denseD[d] = 1.0
classifier.learn(a, 0, isSparse=dimensionality, partitionId=None)
classifier.learn(b, 1, isSparse=dimensionality, partitionId=None)
classifier.learn(c, 2, isSparse=dimensionality, partitionId=211)
classifier.learn(d, 1, isSparse=dimensionality, partitionId=405)
cat, _, _, _ = classifier.infer(denseA, partitionId=405)
self.assertEquals(cat, 0)
cat, _, _, _ = classifier.infer(denseD, partitionId=405)
self.assertEquals(cat, 2)
cat, _, _, _ = classifier.infer(denseD)
self.assertEquals(cat, 1)
@unittest.skipUnless(__debug__, "Only applicable when asserts are enabled")
def testOverlapDistanceMethodBadSparsity(self):
"""Sparsity (input dimensionality) less than input array"""
params = {"distanceMethod": "rawOverlap"}
classifier = KNNClassifier(**params)
a = np.array([1, 3, 7, 11, 13, 17, 19, 23, 29], dtype=np.int32)
# Learn with incorrect dimensionality, less than some bits (23, 29)
with self.assertRaises(AssertionError):
classifier.learn(a, 0, isSparse=20)
def testOverlapDistanceMethodInconsistentDimensionality(self):
"""Inconsistent sparsity (input dimensionality)"""
params = {"distanceMethod": "rawOverlap"}
classifier = KNNClassifier(**params)
dimensionality = 40
a = np.array([1, 3, 7, 11, 13, 17, 19, 23, 29], dtype=np.int32)
# Learn with incorrect dimensionality, greater than largest ON bit, but
# inconsistent when inferring
numPatterns = classifier.learn(a, 0, isSparse=31)
self.assertEquals(numPatterns, 1)
denseA = np.zeros(dimensionality)
denseA[a] = 1.0
cat, _, _, _ = classifier.infer(denseA)
self.assertEquals(cat, 0)
@unittest.skipUnless(__debug__, "Only applicable when asserts are enabled")
def testOverlapDistanceMethodStandardUnsorted(self):
"""If sparse representation indices are unsorted expect error."""
params = {"distanceMethod": "rawOverlap"}
classifier = KNNClassifier(**params)
dimensionality = 40
a = np.array([29, 3, 7, 11, 13, 17, 19, 23, 1], dtype=np.int32)
b = np.array([2, 4, 20, 12, 14, 18, 8, 28, 30], dtype=np.int32)
with self.assertRaises(AssertionError):
classifier.learn(a, 0, isSparse=dimensionality)
with self.assertRaises(AssertionError):
classifier.learn(b, 1, isSparse=dimensionality)
def testOverlapDistanceMethodEmptyArray(self):
"""Tests case where pattern has no ON bits"""
params = {"distanceMethod": "rawOverlap"}
classifier = KNNClassifier(**params)
dimensionality = 40
a = np.array([], dtype=np.int32)
numPatterns = classifier.learn(a, 0, isSparse=dimensionality)
self.assertEquals(numPatterns, 1)
denseA = np.zeros(dimensionality)
denseA[a] = 1.0
cat, _, _, _ = classifier.infer(denseA)
self.assertEquals(cat, 0)
@unittest.skip("Finish when infer has options for sparse and dense "
"https://github.com/numenta/nupic/issues/2198")
def testOverlapDistanceMethod_ClassifySparse(self):
params = {"distanceMethod": "rawOverlap"}
classifier = KNNClassifier(**params)
dimensionality = 40
a = np.array([1, 3, 7, 11, 13, 17, 19, 23, 29], dtype=np.int32)
b = np.array([2, 4, 8, 12, 14, 18, 20, 28, 30], dtype=np.int32)
classifier.learn(a, 0, isSparse=dimensionality)
classifier.learn(b, 1, isSparse=dimensionality)
# TODO Test case where infer is passed a sparse representation after
# infer() has been extended to handle sparse and dense
cat, _, _, _ = classifier.infer(a)
self.assertEquals(cat, 0)
cat, _, _, _ = classifier.infer(b)
self.assertEquals(cat, 1)
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testWriteRead(self):
knn = KNNClassifier(distanceMethod="norm", numSVDDims=2, numSVDSamples=2,
useSparseMemory=True, minSparsity=0.1,
distThreshold=0.1)
dimensionality = 40
a = np.array([1, 3, 7, 11, 13, 17, 19, 23, 29], dtype=np.int32)
b = np.array([2, 4, 8, 12, 14, 18, 20, 28, 30], dtype=np.int32)
c = np.array([1, 2, 3, 14, 16, 19, 22, 24, 33], dtype=np.int32)
d = np.array([2, 4, 8, 12, 14, 19, 22, 24, 33], dtype=np.int32)
knn.learn(a, 0, isSparse=dimensionality, partitionId=None)
knn.learn(b, 1, isSparse=dimensionality, partitionId=None)
knn.learn(c, 2, isSparse=dimensionality, partitionId=211)
knn.learn(d, 1, isSparse=dimensionality, partitionId=405)
knn.finishLearning()
proto = KNNClassifierProto.new_message()
knn.write(proto)
with tempfile.TemporaryFile() as f:
proto.write(f)
f.seek(0)
protoDeserialized = KNNClassifierProto.read(f)
knnDeserialized = KNNClassifier.read(protoDeserialized)
denseA = np.zeros(dimensionality)
denseA[a] = 1.0
expected = knn.infer(denseA)
actual = knnDeserialized.infer(denseA)
self.assertEqual(expected[0], actual[0])
self.assertItemsEqual(expected[1], actual[1])
self.assertItemsEqual(expected[2], actual[2])
self.assertItemsEqual(expected[3], actual[3])
self.assertItemsEqual(knn.getPartitionIdList(),
knnDeserialized.getPartitionIdList())
if __name__ == "__main__":
unittest.main()
| 20,334 | Python | .py | 427 | 41.87822 | 78 | 0.687882 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,105 | anomaly_likelihood_test.py | numenta_nupic-legacy/tests/unit/nupic/algorithms/anomaly_likelihood_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for anomaly likelihood module."""
# disable pylint warning: "Access to a protected member xxxxx of a client class"
# pylint: disable=W0212
import copy
import datetime
import math
import numpy
import pickle
import unittest2 as unittest
import mock
from nupic.algorithms import anomaly_likelihood as an
from nupic.support.unittesthelpers.testcasebase import TestCaseBase
def _sampleDistribution(params, numSamples, verbosity=0):
"""
Given the parameters of a distribution, generate numSamples points from it.
This routine is mostly for testing.
:returns: A numpy array of samples.
"""
if params.has_key("name"):
if params["name"] == "normal":
samples = numpy.random.normal(loc=params["mean"],
scale=math.sqrt(params["variance"]),
size=numSamples)
elif params["name"] == "pareto":
samples = numpy.random.pareto(params["alpha"], size=numSamples)
elif params["name"] == "beta":
samples = numpy.random.beta(a=params["alpha"], b=params["beta"],
size=numSamples)
else:
raise ValueError("Undefined distribution: " + params["name"])
else:
raise ValueError("Bad distribution params: " + str(params))
if verbosity > 0:
print "\nSampling from distribution:", params
print "After estimation, mean=", numpy.mean(samples), \
"var=", numpy.var(samples), "stdev=", math.sqrt(numpy.var(samples))
return samples
def _generateSampleData(mean=0.2, variance=0.2, metricMean=0.2,
metricVariance=0.2):
"""
Generate 1440 samples of fake metrics data with a particular distribution
of anomaly scores and metric values. Here we generate values every minute.
"""
data = []
p = {"mean": mean,
"name": "normal",
"stdev": math.sqrt(variance),
"variance": variance}
samples = _sampleDistribution(p, 1440)
p = {"mean": metricMean,
"name": "normal",
"stdev": math.sqrt(metricVariance),
"variance": metricVariance}
metricValues = _sampleDistribution(p, 1440)
for hour in range(0, 24):
for minute in range(0, 60):
data.append(
[
datetime.datetime(2013, 2, 2, hour, minute, 0),
metricValues[hour * 60 + minute],
samples[hour * 60 + minute],
]
)
return data
class AnomalyLikelihoodClassTest(TestCaseBase):
"""Tests the high-level AnomalyLikelihood class"""
def testCalcSkipRecords(self):
# numIngested is less than both learningPeriod and windowSize
numSkip = an.AnomalyLikelihood._calcSkipRecords(
numIngested=5,
windowSize=10,
learningPeriod=10)
self.assertEqual(numSkip, 5)
# numIngested is equal to learningPeriod, but less than windowSize
numSkip = an.AnomalyLikelihood._calcSkipRecords(
numIngested=10,
windowSize=15,
learningPeriod=10)
self.assertEqual(numSkip, 10)
# edge case: learningPeriod is 0
numSkip = an.AnomalyLikelihood._calcSkipRecords(
numIngested=10,
windowSize=10,
learningPeriod=0)
self.assertEqual(numSkip, 0)
# boundary case: numIngested is equal to learningPeriod and windowSize
numSkip = an.AnomalyLikelihood._calcSkipRecords(
numIngested=10,
windowSize=10,
learningPeriod=10)
self.assertEqual(numSkip, 10)
# learning samples partially shifted out
numSkip = an.AnomalyLikelihood._calcSkipRecords(
numIngested=14,
windowSize=10,
learningPeriod=10)
self.assertEqual(numSkip, 6)
# learning samples fully shifted out
numSkip = an.AnomalyLikelihood._calcSkipRecords(
numIngested=20,
windowSize=10,
learningPeriod=10)
self.assertEqual(numSkip, 0)
# learning samples plus others shifted out
numSkip = an.AnomalyLikelihood._calcSkipRecords(
numIngested=25,
windowSize=10,
learningPeriod=10)
self.assertEqual(numSkip, 0)
def testHistoricWindowSize(self):
l = an.AnomalyLikelihood(claLearningPeriod=2,
estimationSamples=2,
historicWindowSize=3)
l.anomalyProbability(5, 0.1, timestamp=1) # burn in
self.assertEqual(len(l._historicalScores), 1)
l.anomalyProbability(5, 0.1, timestamp=2)
self.assertEqual(len(l._historicalScores), 2)
l.anomalyProbability(5, 0.1, timestamp=3)
self.assertEqual(len(l._historicalScores), 3)
l.anomalyProbability(5, 0.1, timestamp=4)
self.assertEqual(len(l._historicalScores), 3)
def testdWindowSizeImpactOnEstimateAnomalyLikelihoodsArgs(self):
# Verify that AnomalyLikelihood's historicWindowSize plays nice with args
# passed to estimateAnomalyLikelihoods"""
originalEstimateAnomalyLikelihoods = an.estimateAnomalyLikelihoods
estimationArgs = []
def estimateAnomalyLikelihoodsWrap(anomalyScores,
averagingWindow=10,
skipRecords=0,
verbosity=0):
estimationArgs.append((tuple(anomalyScores), skipRecords))
return originalEstimateAnomalyLikelihoods(anomalyScores,
averagingWindow=averagingWindow,
skipRecords=skipRecords,
verbosity=verbosity)
estimateAnomalyLikelihoodsPatch = mock.patch(
"nupic.algorithms.anomaly_likelihood.estimateAnomalyLikelihoods",
side_effect=estimateAnomalyLikelihoodsWrap, autospec=True)
with estimateAnomalyLikelihoodsPatch as estimateAnomalyLikelihoodsMock:
l = an.AnomalyLikelihood(claLearningPeriod=2,
estimationSamples=2,
historicWindowSize=3)
l.anomalyProbability(10, 0.1, timestamp=1)
self.assertEqual(estimateAnomalyLikelihoodsMock.call_count, 0)
l.anomalyProbability(20, 0.2, timestamp=2)
self.assertEqual(estimateAnomalyLikelihoodsMock.call_count, 0)
l.anomalyProbability(30, 0.3, timestamp=3)
self.assertEqual(estimateAnomalyLikelihoodsMock.call_count, 0)
l.anomalyProbability(40, 0.4, timestamp=4)
self.assertEqual(estimateAnomalyLikelihoodsMock.call_count, 0)
# Estimation should kick in after claLearningPeriod + estimationSamples
# samples have been ingested
l.anomalyProbability(50, 0.5, timestamp=5)
self.assertEqual(estimateAnomalyLikelihoodsMock.call_count, 1)
# NOTE: we cannot use mock's assert_called_with, because the sliding
# window container changes in-place after estimateAnomalyLikelihoods is
# called
scores, numSkip = estimationArgs.pop()
self.assertEqual(scores, ((2, 20, 0.2), (3, 30, 0.3), (4, 40, 0.4)))
self.assertEqual(numSkip, 1)
def testReestimationPeriodArg(self):
estimateAnomalyLikelihoodsWrap = mock.Mock(
wraps=an.estimateAnomalyLikelihoods,
autospec=True)
estimateAnomalyLikelihoodsPatch = mock.patch(
"nupic.algorithms.anomaly_likelihood.estimateAnomalyLikelihoods",
side_effect=estimateAnomalyLikelihoodsWrap, autospec=True)
with estimateAnomalyLikelihoodsPatch:
l = an.AnomalyLikelihood(claLearningPeriod=2,
estimationSamples=2,
historicWindowSize=3,
reestimationPeriod=2)
# burn-in
l.anomalyProbability(10, 0.1, timestamp=1)
l.anomalyProbability(10, 0.1, timestamp=2)
l.anomalyProbability(10, 0.1, timestamp=3)
l.anomalyProbability(10, 0.1, timestamp=4)
self.assertEqual(estimateAnomalyLikelihoodsWrap.call_count, 0)
l.anomalyProbability(10, 0.1, timestamp=5)
self.assertEqual(estimateAnomalyLikelihoodsWrap.call_count, 1)
l.anomalyProbability(10, 0.1, timestamp=6)
self.assertEqual(estimateAnomalyLikelihoodsWrap.call_count, 1)
l.anomalyProbability(10, 0.1, timestamp=7)
self.assertEqual(estimateAnomalyLikelihoodsWrap.call_count, 2)
l.anomalyProbability(10, 0.1, timestamp=8)
self.assertEqual(estimateAnomalyLikelihoodsWrap.call_count, 2)
def testAnomalyProbabilityResultsDuringProbationaryPeriod(self):
originalUpdateAnomalyLikelihoods = an.updateAnomalyLikelihoods
def updateAnomalyLikelihoodsWrap(anomalyScores, params, verbosity=0):
likelihoods, avgRecordList, params = originalUpdateAnomalyLikelihoods(
anomalyScores=anomalyScores,
params=params,
verbosity=verbosity)
self.assertEqual(len(likelihoods), 1)
return [0.1], avgRecordList, params
updateAnomalyLikelihoodsPatch = mock.patch(
"nupic.algorithms.anomaly_likelihood.updateAnomalyLikelihoods",
side_effect=updateAnomalyLikelihoodsWrap, autospec=True)
with updateAnomalyLikelihoodsPatch:
l = an.AnomalyLikelihood(claLearningPeriod=2,
estimationSamples=2,
historicWindowSize=3)
# 0.5 result is expected during burn-in
self.assertEqual(l.anomalyProbability(10, 0.1, timestamp=1), 0.5)
self.assertEqual(l.anomalyProbability(10, 0.1, timestamp=2), 0.5)
self.assertEqual(l.anomalyProbability(10, 0.1, timestamp=3), 0.5)
self.assertEqual(l.anomalyProbability(10, 0.1, timestamp=4), 0.5)
self.assertEqual(l.anomalyProbability(10, 0.1, timestamp=5), 0.9)
self.assertEqual(l.anomalyProbability(10, 0.1, timestamp=6), 0.9)
def testEquals(self):
l = an.AnomalyLikelihood(claLearningPeriod=2, estimationSamples=2)
l2 = an.AnomalyLikelihood(claLearningPeriod=2, estimationSamples=2)
self.assertEqual(l, l2)
# Use 5 iterations to force the distribution to be created (4 probationary
# samples + 1)
l2.anomalyProbability(5, 0.1, timestamp=1) # burn in
l2.anomalyProbability(5, 0.1, timestamp=2)
l2.anomalyProbability(5, 0.1, timestamp=3)
l2.anomalyProbability(5, 0.1, timestamp=4)
self.assertIsNone(l2._distribution)
l2.anomalyProbability(1, 0.3, timestamp=5)
self.assertIsNotNone(l2._distribution)
self.assertNotEqual(l, l2)
l.anomalyProbability(5, 0.1, timestamp=1) # burn in
l.anomalyProbability(5, 0.1, timestamp=2)
l.anomalyProbability(5, 0.1, timestamp=3)
l.anomalyProbability(5, 0.1, timestamp=4)
self.assertIsNone(l._distribution)
l.anomalyProbability(1, 0.3, timestamp=5)
self.assertIsNotNone(l._distribution)
self.assertEqual(l, l2, "equal? \n%s\n vs. \n%s" % (l, l2))
def testSerialization(self):
"""serialization using pickle"""
l = an.AnomalyLikelihood(claLearningPeriod=2, estimationSamples=2)
l.anomalyProbability("hi", 0.1, timestamp=1) # burn in
l.anomalyProbability("hi", 0.1, timestamp=2)
l.anomalyProbability("hello", 0.3, timestamp=3)
stored = pickle.dumps(l)
restored = pickle.loads(stored)
self.assertEqual(l, restored)
class AnomalyLikelihoodAlgorithmTest(TestCaseBase):
"""Tests the low-level algorithm functions"""
def assertWithinEpsilon(self, a, b, epsilon=0.005):
self.assertLessEqual(abs(a - b), epsilon,
"Values %g and %g are not within %g" % (a, b, epsilon))
def testNormalProbability(self):
"""
Test that the tailProbability function returns correct normal values
"""
# Test a standard normal distribution
# Values taken from http://en.wikipedia.org/wiki/Standard_normal_table
p = {"name": "normal", "mean": 0.0, "variance": 1.0, "stdev": 1.0}
self.assertWithinEpsilon(an.tailProbability(0.0, p), 0.5)
self.assertWithinEpsilon(an.tailProbability(0.3, p), 0.3820885780)
self.assertWithinEpsilon(an.tailProbability(1.0, p), 0.1587)
self.assertWithinEpsilon(an.tailProbability(1.0, p),
an.tailProbability(-1.0, p))
self.assertWithinEpsilon(an.tailProbability(-0.3, p),
an.tailProbability(0.3, p))
# Non standard normal distribution
p = {"name": "normal", "mean": 1.0, "variance": 4.0, "stdev": 2.0}
self.assertWithinEpsilon(an.tailProbability(1.0, p), 0.5)
self.assertWithinEpsilon(an.tailProbability(2.0, p), 0.3085)
self.assertWithinEpsilon(an.tailProbability(3.0, p), 0.1587)
self.assertWithinEpsilon(an.tailProbability(3.0, p),
an.tailProbability(-1.0, p))
self.assertWithinEpsilon(an.tailProbability(0.0, p),
an.tailProbability(2.0, p))
# Non standard normal distribution
p = {"name": "normal", "mean": -2.0, "variance": 0.5,
"stdev": math.sqrt(0.5)}
self.assertWithinEpsilon(an.tailProbability(-2.0, p), 0.5)
self.assertWithinEpsilon(an.tailProbability(-1.5, p), 0.241963652)
self.assertWithinEpsilon(an.tailProbability(-2.5, p),
an.tailProbability(-1.5, p))
def testEstimateNormal(self):
"""
This passes in a known set of data and ensures the estimateNormal
function returns the expected results.
"""
# 100 samples drawn from mean=0.4, stdev = 0.5
samples = numpy.array(
[0.32259025, -0.44936321, -0.15784842, 0.72142628, 0.8794327,
0.06323451, -0.15336159, -0.02261703, 0.04806841, 0.47219226,
0.31102718, 0.57608799, 0.13621071, 0.92446815, 0.1870912,
0.46366935, -0.11359237, 0.66582357, 1.20613048, -0.17735134,
0.20709358, 0.74508479, 0.12450686, -0.15468728, 0.3982757,
0.87924349, 0.86104855, 0.23688469, -0.26018254, 0.10909429,
0.65627481, 0.39238532, 0.77150761, 0.47040352, 0.9676175,
0.42148897, 0.0967786, -0.0087355, 0.84427985, 1.46526018,
1.19214798, 0.16034816, 0.81105554, 0.39150407, 0.93609919,
0.13992161, 0.6494196, 0.83666217, 0.37845278, 0.0368279,
-0.10201944, 0.41144746, 0.28341277, 0.36759426, 0.90439446,
0.05669459, -0.11220214, 0.34616676, 0.49898439, -0.23846184,
1.06400524, 0.72202135, -0.2169164, 1.136582, -0.69576865,
0.48603271, 0.72781008, -0.04749299, 0.15469311, 0.52942518,
0.24816816, 0.3483905, 0.7284215, 0.93774676, 0.07286373,
1.6831539, 0.3851082, 0.0637406, -0.92332861, -0.02066161,
0.93709862, 0.82114131, 0.98631562, 0.05601529, 0.72214694,
0.09667526, 0.3857222, 0.50313998, 0.40775344, -0.69624046,
-0.4448494, 0.99403206, 0.51639049, 0.13951548, 0.23458214,
1.00712699, 0.40939048, -0.06436434, -0.02753677, -0.23017904])
params = an.estimateNormal(samples)
self.assertWithinEpsilon(params["mean"], 0.3721)
self.assertWithinEpsilon(params["variance"], 0.22294)
self.assertWithinEpsilon(params["stdev"], 0.47216)
self.assertEqual(params["name"], "normal")
def testSampleDistribution(self):
"""
Test that sampleDistribution from a generated distribution returns roughly
the same parameters.
"""
# 1000 samples drawn from mean=0.4, stdev = 0.1
p = {"mean": 0.5,
"name": "normal",
"stdev": math.sqrt(0.1),
"variance": 0.1}
samples = _sampleDistribution(p, 1000)
# Ensure estimate is reasonable
np = an.estimateNormal(samples)
self.assertWithinEpsilon(p["mean"], np["mean"], 0.1)
self.assertWithinEpsilon(p["variance"], np["variance"], 0.1)
self.assertWithinEpsilon(p["stdev"], np["stdev"], 0.1)
self.assertTrue(np["name"], "normal")
def testEstimateAnomalyLikelihoods(self):
"""
This calls estimateAnomalyLikelihoods to estimate the distribution on fake
data and validates the results
"""
# Generate an estimate using fake distribution of anomaly scores.
data1 = _generateSampleData(mean=0.2)
likelihoods, avgRecordList, estimatorParams = (
an.estimateAnomalyLikelihoods(data1[0:1000])
)
self.assertEqual(len(likelihoods), 1000)
self.assertEqual(len(avgRecordList), 1000)
self.assertTrue(an.isValidEstimatorParams(estimatorParams))
# Check that the sum is correct
avgParams = estimatorParams["movingAverage"]
total = 0
for v in avgRecordList:
total = total + v[2]
self.assertTrue(avgParams["total"], total)
# Check that the estimated mean is correct
dParams = estimatorParams["distribution"]
self.assertWithinEpsilon(dParams["mean"],
total / float(len(avgRecordList)))
# Number of points with lower than 2% probability should be pretty low
# but not zero. Can't use exact 2% here due to random variations
self.assertLessEqual(numpy.sum(likelihoods < 0.02), 50)
self.assertGreaterEqual(numpy.sum(likelihoods < 0.02), 1)
def testEstimateAnomalyLikelihoodsCategoryValues(self):
start = datetime.datetime(2017, 1, 1, 0, 0, 0)
delta = datetime.timedelta(minutes=5)
dts = [start + (i * delta) for i in xrange(10)]
values = ["a", "b", "c", "d", "e"] * 2
rawScores = [0.1 * i for i in xrange(10)]
data = zip(dts, values, rawScores)
likelihoods, avgRecordList, estimatorParams = (
an.estimateAnomalyLikelihoods(data)
)
self.assertEqual(len(likelihoods), 10)
self.assertEqual(len(avgRecordList), 10)
self.assertTrue(an.isValidEstimatorParams(estimatorParams))
def testEstimateAnomalyLikelihoodsMalformedRecords(self):
"""
This calls estimateAnomalyLikelihoods with malformed records, which should
be quietly skipped.
"""
# Generate a fake distribution of anomaly scores, and add malformed records
data1 = _generateSampleData(mean=0.2)
data1 = data1[0:1000] + [(2, 2)] + [(2, 2, 2, 2)] + [()] + [(2)]
likelihoods, avgRecordList, estimatorParams = (
an.estimateAnomalyLikelihoods(data1[0:1004])
)
self.assertEqual(len(likelihoods), 1000)
self.assertEqual(len(avgRecordList), 1000)
self.assertTrue(an.isValidEstimatorParams(estimatorParams))
# Check that the sum is correct
avgParams = estimatorParams["movingAverage"]
total = 0
for v in avgRecordList:
total = total + v[2]
self.assertTrue(avgParams["total"], total)
# Check that the estimated mean is correct
dParams = estimatorParams["distribution"]
self.assertWithinEpsilon(dParams["mean"],
total / float(len(avgRecordList)))
def testSkipRecords(self):
"""
This calls estimateAnomalyLikelihoods with various values of skipRecords
"""
# Check happy path
data1 = _generateSampleData(mean=0.1)[0:200]
data1 = data1 + (_generateSampleData(mean=0.9)[0:200])
likelihoods, _, estimatorParams = (
an.estimateAnomalyLikelihoods(data1, skipRecords=200)
)
# Check results are correct, i.e. we are actually skipping the first 50
dParams = estimatorParams["distribution"]
self.assertWithinEpsilon(dParams["mean"], 0.9, epsilon=0.1)
# Check case where skipRecords > num records
# In this case a null distribution should be returned which makes all
# the likelihoods reasonably high
likelihoods, _, estimatorParams = (
an.estimateAnomalyLikelihoods(data1, skipRecords=500)
)
self.assertEqual(len(likelihoods), len(data1))
self.assertTrue(likelihoods.sum() >= 0.3 * len(likelihoods))
# Check the case where skipRecords == num records
likelihoods, _, estimatorParams = (
an.estimateAnomalyLikelihoods(data1, skipRecords=len(data1))
)
self.assertEqual(len(likelihoods), len(data1))
self.assertTrue(likelihoods.sum() >= 0.3 * len(likelihoods))
def testUpdateAnomalyLikelihoods(self):
"""
A slight more complex test. This calls estimateAnomalyLikelihoods
to estimate the distribution on fake data, followed by several calls
to updateAnomalyLikelihoods.
"""
#------------------------------------------
# Step 1. Generate an initial estimate using fake distribution of anomaly
# scores.
data1 = _generateSampleData(mean=0.2)[0:1000]
_, _, estimatorParams = (
an.estimateAnomalyLikelihoods(data1, averagingWindow=5)
)
#------------------------------------------
# Step 2. Generate some new data with a higher average anomaly
# score. Using the estimator from step 1, to compute likelihoods. Now we
# should see a lot more anomalies.
data2 = _generateSampleData(mean=0.6)[0:300]
likelihoods2, avgRecordList2, estimatorParams2 = (
an.updateAnomalyLikelihoods(data2, estimatorParams)
)
self.assertEqual(len(likelihoods2), len(data2))
self.assertEqual(len(avgRecordList2), len(data2))
self.assertTrue(an.isValidEstimatorParams(estimatorParams))
# The new running total should be different
self.assertNotEqual(estimatorParams2["movingAverage"]["total"],
estimatorParams["movingAverage"]["total"])
# We should have many more samples where likelihood is < 0.01, but not all
self.assertGreaterEqual(numpy.sum(likelihoods2 < 0.01), 25)
self.assertLessEqual(numpy.sum(likelihoods2 < 0.01), 250)
#------------------------------------------
# Step 3. Generate some new data with the expected average anomaly score. We
# should see fewer anomalies than in Step 2.
data3 = _generateSampleData(mean=0.2)[0:1000]
likelihoods3, avgRecordList3, estimatorParams3 = (
an.updateAnomalyLikelihoods(data3, estimatorParams2)
)
self.assertEqual(len(likelihoods3), len(data3))
self.assertEqual(len(avgRecordList3), len(data3))
self.assertTrue(an.isValidEstimatorParams(estimatorParams3))
# The new running total should be different
self.assertNotEqual(estimatorParams3["movingAverage"]["total"],
estimatorParams["movingAverage"]["total"])
self.assertNotEqual(estimatorParams3["movingAverage"]["total"],
estimatorParams2["movingAverage"]["total"])
# We should have a small number samples where likelihood is < 0.02, but at
# least one
self.assertGreaterEqual(numpy.sum(likelihoods3 < 0.01), 1)
self.assertLessEqual(numpy.sum(likelihoods3 < 0.01), 100)
#------------------------------------------
# Step 4. Validate that sending data incrementally is the same as sending
# in one batch
allData = data1
allData.extend(data2)
allData.extend(data3)
# Compute moving average of all the data and check it's the same
_, historicalValuesAll, totalAll = (
an._anomalyScoreMovingAverage(allData, windowSize=5)
)
self.assertEqual(sum(historicalValuesAll),
sum(estimatorParams3["movingAverage"]["historicalValues"]))
self.assertEqual(totalAll,
estimatorParams3["movingAverage"]["total"])
def testFlatAnomalyScores(self):
"""
This calls estimateAnomalyLikelihoods with flat distributions and
ensures things don't crash.
"""
# Generate an estimate using fake distribution of anomaly scores.
data1 = _generateSampleData(mean=42.0, variance=1e-10)
likelihoods, avgRecordList, estimatorParams = (
an.estimateAnomalyLikelihoods(data1[0:1000])
)
self.assertEqual(len(likelihoods), 1000)
self.assertEqual(len(avgRecordList), 1000)
self.assertTrue(an.isValidEstimatorParams(estimatorParams))
## Check that the estimated mean is correct
dParams = estimatorParams["distribution"]
self.assertWithinEpsilon(dParams["mean"], data1[0][2])
# If you deviate from the mean, you should get probability 0
# Test this by sending in just slightly different values.
data2 = _generateSampleData(mean=42.5, variance=1e-10)
likelihoods2, _, _ = (
an.updateAnomalyLikelihoods(data2[0:10], estimatorParams)
)
# The likelihoods should go to zero very quickly
self.assertLessEqual(likelihoods2.sum(), 0.01)
# Test edge case where anomaly scores are very close to 0
# In this case we don't let likelihood to get too low. An average
# anomaly score of 0.1 should be essentially zero, but an average
# of 0.04 should be higher
data3 = _generateSampleData(mean=0.01, variance=1e-6)
_, _, estimatorParams3 = (
an.estimateAnomalyLikelihoods(data3[0:1000])
)
data4 = _generateSampleData(mean=0.1, variance=1e-6)
likelihoods4, _, estimatorParams4 = (
an.updateAnomalyLikelihoods(data4[0:20], estimatorParams3)
)
# Average of 0.1 should go to zero
self.assertLessEqual(likelihoods4[10:].mean(), 0.002)
data5 = _generateSampleData(mean=0.05, variance=1e-6)
likelihoods5, _, _ = (
an.updateAnomalyLikelihoods(data5[0:20], estimatorParams4)
)
# The likelihoods should be low but not near zero
self.assertLessEqual(likelihoods5[10:].mean(), 0.28)
self.assertGreater(likelihoods5[10:].mean(), 0.015)
def testFlatMetricScores(self):
"""
This calls estimateAnomalyLikelihoods with flat metric values. In this case
we should use the null distribution, which gets reasonably high likelihood
for everything.
"""
# Generate samples with very flat metric values
data1 = _generateSampleData(
metricMean=42.0, metricVariance=1e-10)[0:1000]
likelihoods, _, estimatorParams = (
an.estimateAnomalyLikelihoods(data1)
)
# Check that we do indeed get reasonable likelihood values
self.assertEqual(len(likelihoods), len(data1))
self.assertTrue(likelihoods.sum() >= 0.4 * len(likelihoods))
# Check that we do indeed get null distribution
self.assertDictEqual(estimatorParams["distribution"], an.nullDistribution())
def testVeryFewScores(self):
"""
This calls estimateAnomalyLikelihoods and updateAnomalyLikelihoods
with one or no scores.
"""
# Generate an estimate using two data points
data1 = _generateSampleData(mean=42.0, variance=1e-10)
_, _, estimatorParams = (
an.estimateAnomalyLikelihoods(data1[0:2])
)
self.assertTrue(an.isValidEstimatorParams(estimatorParams))
# Check that the estimated mean is that value
dParams = estimatorParams["distribution"]
self.assertWithinEpsilon(dParams["mean"], data1[0][2])
# Can't generate an estimate using no data points
data1 = numpy.zeros(0)
with self.assertRaises(ValueError):
an.estimateAnomalyLikelihoods(data1)
# Can't update with no scores
with self.assertRaises(ValueError):
an.updateAnomalyLikelihoods(data1, estimatorParams)
def testBadParams(self):
"""
Calls updateAnomalyLikelihoods with bad params.
"""
# Generate an estimate using one data point
data1 = _generateSampleData(mean=42.0, variance=1e-10)
_, _, estimatorParams = (
an.estimateAnomalyLikelihoods(data1[0:1])
)
self.assertTrue(an.isValidEstimatorParams(estimatorParams))
# Can't pass in a bad params structure
with self.assertRaises(ValueError):
an.updateAnomalyLikelihoods(data1, {"haha": "heehee"})
# Can't pass in something not a dict
with self.assertRaises(ValueError):
an.updateAnomalyLikelihoods(data1, 42.0)
def testFilterLikelihodsInputType(self):
"""
Calls _filterLikelihoods with both input types -- numpy array of floats and
list of floats.
"""
l =[0.0, 0.0, 0.3, 0.3, 0.5]
l2 = an._filterLikelihoods(l)
n = numpy.array(l)
n2 = an._filterLikelihoods(n)
filtered = [0.0, 0.001, 0.3, 0.3, 0.5]
for i in range(len(l)):
self.assertAlmostEqual(
l2[i], filtered[i],
msg="Input of type list returns incorrect result")
for i in range(len(n)):
self.assertAlmostEqual(
n2[i], filtered[i],
msg="Input of type numpy array returns incorrect result")
def testFilterLikelihoods(self):
"""
Tests _filterLikelihoods function for several cases:
i. Likelihood goes straight to redzone, skipping over yellowzone, repeats
ii. Case (i) with different values, and numpy array instead of float list
iii. A scenario where changing the redzone from four to five 9s should
filter differently
"""
redThreshold = 0.9999
yellowThreshold = 0.999
# Case (i): values at indices 1 and 7 should be filtered to yellowzone
l = [1.0, 1.0, 0.9, 0.8, 0.5, 0.4, 1.0, 1.0, 0.6, 0.0]
l = [1 - x for x in l]
l2 = copy.copy(l)
l2[1] = 1 - yellowThreshold
l2[7] = 1 - yellowThreshold
l3 = an._filterLikelihoods(l, redThreshold=redThreshold)
for i in range(len(l2)):
self.assertAlmostEqual(l2[i], l3[i], msg="Failure in case (i)")
# Case (ii): values at indices 1-10 should be filtered to yellowzone
l = numpy.array([0.999978229, 0.999978229, 0.999999897, 1, 1, 1, 1,
0.999999994, 0.999999966, 0.999999966, 0.999994331,
0.999516576, 0.99744487])
l = 1.0 - l
l2 = copy.copy(l)
l2[1:11] = 1 - yellowThreshold
l3 = an._filterLikelihoods(l, redThreshold=redThreshold)
for i in range(len(l2)):
self.assertAlmostEqual(l2[i], l3[i], msg="Failure in case (ii)")
# Case (iii): redThreshold difference should be at index 2
l = numpy.array([0.999968329, 0.999999897, 1, 1, 1,
1, 0.999999994, 0.999999966, 0.999999966,
0.999994331, 0.999516576, 0.99744487])
l = 1.0 - l
l2a = copy.copy(l)
l2b = copy.copy(l)
l2a[1:10] = 1 - yellowThreshold
l2b[2:10] = 1 - yellowThreshold
l3a = an._filterLikelihoods(l, redThreshold=redThreshold)
l3b = an._filterLikelihoods(l, redThreshold=0.99999)
for i in range(len(l2a)):
self.assertAlmostEqual(l2a[i], l3a[i],
msg="Failure in case (iii), list a")
for i in range(len(l2b)):
self.assertAlmostEqual(l2b[i], l3b[i],
msg="Failure in case (iii), list b")
self.assertFalse(numpy.array_equal(l3a, l3b),
msg="Failure in case (iii), list 3")
if __name__ == "__main__":
unittest.main()
| 30,911 | Python | .py | 665 | 39.461654 | 80 | 0.684888 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,106 | backtracking_tm_cpp_test.py | numenta_nupic-legacy/tests/unit/nupic/algorithms/backtracking_tm_cpp_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Tests for the C++ implementation of the temporal memory."""
import unittest2 as unittest
from nupic.algorithms.backtracking_tm_cpp import BacktrackingTMCPP
from tests.unit.nupic.algorithms import backtracking_tm_test
# Run the Python TM test against the BacktrackingTMCPP.
backtracking_tm_test.BacktrackingTM = BacktrackingTMCPP
BacktrackingTMTest = backtracking_tm_test.BacktrackingTMTest
if __name__ == '__main__':
unittest.main()
| 1,421 | Python | .py | 29 | 47.689655 | 72 | 0.712635 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,107 | spatial_pooler_unit_test.py | numenta_nupic-legacy/tests/unit/nupic/algorithms/spatial_pooler_unit_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# Disable since test code accesses private members in the class to be tested
# pylint: disable=W0212
import numbers
import numpy
import tempfile
import unittest
from copy import copy
from mock import Mock
from nupic.bindings.math import GetNTAReal, Random
from nupic.algorithms.spatial_pooler import (BinaryCorticalColumns,
CorticalColumns,
SpatialPooler)
from nupic.support.unittesthelpers.algorithm_test_helpers import (
getNumpyRandomGenerator, getSeed)
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.proto import SpatialPoolerProto_capnp
uintDType = "uint32"
realDType = GetNTAReal()
class SpatialPoolerTest(unittest.TestCase):
"""Unit Tests for SpatialPooler class."""
def setUp(self):
self._params = {
"inputDimensions": [5],
"columnDimensions": [5],
"potentialRadius": 5,
"potentialPct": 0.5,
"globalInhibition": False,
"localAreaDensity": -1.0,
"numActiveColumnsPerInhArea": 3,
"stimulusThreshold": 0,
"synPermInactiveDec": 0.01,
"synPermActiveInc": 0.1,
"synPermConnected": 0.10,
"minPctOverlapDutyCycle": 0.1,
"dutyCyclePeriod": 10,
"boostStrength": 10.0,
"seed": getSeed(),
"spVerbosity": 0
}
self._sp = SpatialPooler(**self._params)
def testCompute1(self):
"""Checks that feeding in the same input vector leads to polarized
permanence values: either zeros or ones, but no fractions"""
sp = SpatialPooler(
inputDimensions=[9],
columnDimensions=[5],
potentialRadius=3,
potentialPct=0.5,
globalInhibition=False,
localAreaDensity=-1.0,
numActiveColumnsPerInhArea=3,
stimulusThreshold=1,
synPermInactiveDec=0.1,
synPermActiveInc=0.1,
synPermConnected=0.10,
minPctOverlapDutyCycle=0.1,
dutyCyclePeriod=10,
boostStrength=10.0,
seed=getSeed(),
spVerbosity=0)
sp._potentialPools = BinaryCorticalColumns(numpy.ones([sp._numColumns,
sp._numInputs]))
sp._inhibitColumns = Mock(return_value = numpy.array(range(5)))
inputVector = numpy.array([1, 0, 1, 0, 1, 0, 0, 1, 1])
activeArray = numpy.zeros(5)
for i in xrange(20):
sp.compute(inputVector, True, activeArray)
for i in xrange(sp._numColumns):
perm = sp._permanences.getRow(i)
self.assertEqual(list(perm), list(inputVector))
def testCompute2(self):
"""Checks that columns only change the permanence values for
inputs that are within their potential pool"""
sp = SpatialPooler(
inputDimensions=[10],
columnDimensions=[5],
potentialRadius=3,
potentialPct=0.5,
globalInhibition=False,
localAreaDensity=-1.0,
numActiveColumnsPerInhArea=3,
stimulusThreshold=1,
synPermInactiveDec=0.01,
synPermActiveInc=0.1,
synPermConnected=0.10,
minPctOverlapDutyCycle=0.1,
dutyCyclePeriod=10,
boostStrength=10.0,
seed=getSeed(),
spVerbosity=0)
sp._inhibitColumns = Mock(return_value = numpy.array(range(5)))
inputVector = numpy.ones(sp._numInputs)
activeArray = numpy.zeros(5)
for i in xrange(20):
sp.compute(inputVector, True, activeArray)
for columnIndex in xrange(sp._numColumns):
potential = sp._potentialPools[columnIndex]
perm = sp._permanences.getRow(columnIndex)
self.assertEqual(list(perm), list(potential))
def testZeroOverlap_NoStimulusThreshold_GlobalInhibition(self):
"""When stimulusThreshold is 0, allow columns without any overlap to become
active. This test focuses on the global inhibition code path."""
inputSize = 10
nColumns = 20
sp = SpatialPooler(inputDimensions=[inputSize],
columnDimensions=[nColumns],
potentialRadius=10,
globalInhibition=True,
numActiveColumnsPerInhArea=3,
stimulusThreshold=0,
seed=getSeed())
inputVector = numpy.zeros(inputSize)
activeArray = numpy.zeros(nColumns)
sp.compute(inputVector, True, activeArray)
self.assertEqual(3, len(activeArray.nonzero()[0]))
def testZeroOverlap_StimulusThreshold_GlobalInhibition(self):
"""When stimulusThreshold is > 0, don't allow columns without any overlap to
become active. This test focuses on the global inhibition code path."""
inputSize = 10
nColumns = 20
sp = SpatialPooler(inputDimensions=[inputSize],
columnDimensions=[nColumns],
potentialRadius=10,
globalInhibition=True,
numActiveColumnsPerInhArea=3,
stimulusThreshold=1,
seed=getSeed())
inputVector = numpy.zeros(inputSize)
activeArray = numpy.zeros(nColumns)
sp.compute(inputVector, True, activeArray)
self.assertEqual(0, len(activeArray.nonzero()[0]))
def testZeroOverlap_NoStimulusThreshold_LocalInhibition(self):
"""When stimulusThreshold is 0, allow columns without any overlap to become
active. This test focuses on the local inhibition code path."""
inputSize = 10
nColumns = 20
sp = SpatialPooler(inputDimensions=[inputSize],
columnDimensions=[nColumns],
potentialRadius=5,
globalInhibition=False,
numActiveColumnsPerInhArea=1,
stimulusThreshold=0,
seed=getSeed())
# This exact number of active columns is determined by the inhibition
# radius, which changes based on the random synapses (i.e. weird math).
# Force it to a known number.
sp.setInhibitionRadius(2);
inputVector = numpy.zeros(inputSize)
activeArray = numpy.zeros(nColumns)
sp.compute(inputVector, True, activeArray)
self.assertEqual(len(activeArray.nonzero()[0]), 6)
def testZeroOverlap_StimulusThreshold_LocalInhibition(self):
"""When stimulusThreshold is > 0, don't allow columns without any overlap to
become active. This test focuses on the local inhibition code path."""
inputSize = 10
nColumns = 20
sp = SpatialPooler(inputDimensions=[inputSize],
columnDimensions=[nColumns],
potentialRadius=10,
globalInhibition=False,
numActiveColumnsPerInhArea=3,
stimulusThreshold=1,
seed=getSeed())
inputVector = numpy.zeros(inputSize)
activeArray = numpy.zeros(nColumns)
sp.compute(inputVector, True, activeArray)
self.assertEqual(0, len(activeArray.nonzero()[0]))
def testOverlapsOutput(self):
"""Checks that overlaps and boostedOverlaps are correctly returned"""
sp = SpatialPooler(inputDimensions=[5],
columnDimensions=[3],
potentialRadius=5,
numActiveColumnsPerInhArea=5,
globalInhibition=True,
seed=1,
synPermActiveInc=0.1,
synPermInactiveDec=0.1)
inputVector = numpy.ones(5)
activeArray = numpy.zeros(3)
expOutput = numpy.array([2, 0, 0], dtype=realDType)
boostFactors = 2.0 * numpy.ones(3)
sp.setBoostFactors(boostFactors)
sp.compute(inputVector, True, activeArray)
overlaps = sp.getOverlaps()
boostedOverlaps = sp.getBoostedOverlaps()
for i in range(sp.getNumColumns()):
self.assertEqual(overlaps[i], expOutput[i])
for i in range(sp.getNumColumns()):
self.assertEqual(boostedOverlaps[i], (2 * expOutput[i]))
def testExactOutput(self):
"""
Given a specific input and initialization params the SP should return this
exact output.
Previously output varied between platforms (OSX/Linux etc)
"""
expectedOutput = [57, 80, 135, 215, 281, 350, 431, 534, 556, 565, 574, 595,
663, 759, 777, 823, 932, 933, 1031, 1126, 1184, 1262,
1468, 1479, 1516, 1531, 1585, 1672, 1793, 1807, 1906,
1927, 1936, 1939, 1940, 1944, 1957, 1978, 2040, 2047]
sp = SpatialPooler(
inputDimensions = [1,188],
columnDimensions = [2048, 1],
potentialRadius = 94,
potentialPct = 0.5,
globalInhibition = 1,
localAreaDensity = -1.0,
numActiveColumnsPerInhArea = 40.0,
stimulusThreshold = 0,
synPermInactiveDec = 0.01,
synPermActiveInc = 0.1,
synPermConnected = 0.1,
minPctOverlapDutyCycle=0.001,
dutyCyclePeriod = 1000,
boostStrength = 10.0,
seed = 1956,
spVerbosity = 0
)
inputVector = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
inputArray = numpy.array(inputVector).astype(realDType)
activeArray = numpy.zeros(2048)
sp.compute(inputArray, 1, activeArray)
# Get only the active column indices
spOutput = [i for i, v in enumerate(activeArray) if v != 0]
self.assertEqual(sorted(spOutput), expectedOutput)
def testStripNeverLearned(self):
sp = self._sp
sp._activeDutyCycles = numpy.array([0.5, 0.1, 0, 0.2, 0.4, 0])
activeArray = numpy.array([1, 1, 1, 0, 1, 0])
sp.stripUnlearnedColumns(activeArray)
stripped = numpy.where(activeArray == 1)[0]
trueStripped = [0, 1, 4]
self.assertListEqual(trueStripped, list(stripped))
sp._activeDutyCycles = numpy.array([0.9, 0, 0, 0, 0.4, 0.3])
activeArray = numpy.ones(6)
sp.stripUnlearnedColumns(activeArray)
stripped = numpy.where(activeArray == 1)[0]
trueStripped = [0, 4, 5]
self.assertListEqual(trueStripped, list(stripped))
sp._activeDutyCycles = numpy.array([0, 0, 0, 0, 0, 0])
activeArray = numpy.ones(6)
sp.stripUnlearnedColumns(activeArray)
stripped = numpy.where(activeArray == 1)[0]
trueStripped = []
self.assertListEqual(trueStripped, list(stripped))
sp._activeDutyCycles = numpy.ones(6)
activeArray = numpy.ones(6)
sp.stripUnlearnedColumns(activeArray)
stripped = numpy.where(activeArray == 1)[0]
trueStripped = range(6)
self.assertListEqual(trueStripped, list(stripped))
def testMapColumn(self):
params = self._params.copy()
# Test 1D
params.update({
"columnDimensions": [4],
"inputDimensions": [12]
})
sp = SpatialPooler(**params)
self.assertEqual(sp._mapColumn(0), 1)
self.assertEqual(sp._mapColumn(1), 4)
self.assertEqual(sp._mapColumn(2), 7)
self.assertEqual(sp._mapColumn(3), 10)
# Test 1D with same dimensions of columns and inputs
params.update({
"columnDimensions": [4],
"inputDimensions": [4]
})
sp = SpatialPooler(**params)
self.assertEqual(sp._mapColumn(0), 0)
self.assertEqual(sp._mapColumn(1), 1)
self.assertEqual(sp._mapColumn(2), 2)
self.assertEqual(sp._mapColumn(3), 3)
# Test 1D with dimensions of length 1
params.update({
"columnDimensions": [1],
"inputDimensions": [1]
})
sp = SpatialPooler(**params)
self.assertEqual(sp._mapColumn(0), 0)
# Test 2D
params.update({
"columnDimensions": [12, 4],
"inputDimensions": [36, 12]
})
sp = SpatialPooler(**params)
self.assertEqual(sp._mapColumn(0), 13)
self.assertEqual(sp._mapColumn(4), 49)
self.assertEqual(sp._mapColumn(5), 52)
self.assertEqual(sp._mapColumn(7), 58)
self.assertEqual(sp._mapColumn(47), 418)
# Test 2D with some input dimensions smaller than column dimensions.
params.update({
"columnDimensions": [4, 4],
"inputDimensions": [3, 5]
})
sp = SpatialPooler(**params)
self.assertEqual(sp._mapColumn(0), 0)
self.assertEqual(sp._mapColumn(3), 4)
self.assertEqual(sp._mapColumn(15), 14)
def testMapPotential1D(self):
params = self._params.copy()
params.update({
"inputDimensions": [12],
"columnDimensions": [4],
"potentialRadius": 2,
"wrapAround": False
})
# Test without wrapAround and potentialPct = 1
params["potentialPct"] = 1
sp = SpatialPooler(**params)
expectedMask = [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]
mask = sp._mapPotential(0)
self.assertListEqual(mask.tolist(), expectedMask)
expectedMask = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0]
mask = sp._mapPotential(2)
self.assertListEqual(mask.tolist(), expectedMask)
# Test with wrapAround and potentialPct = 1
params["potentialPct"] = 1
params["wrapAround"] = True
sp = SpatialPooler(**params)
expectedMask = [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1]
mask = sp._mapPotential(0)
self.assertListEqual(mask.tolist(), expectedMask)
expectedMask = [1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1]
mask = sp._mapPotential(3)
self.assertListEqual(mask.tolist(), expectedMask)
# Test with potentialPct < 1
params["potentialPct"] = 0.5
sp = SpatialPooler(**params)
supersetMask = numpy.array([1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1])
mask = sp._mapPotential(0)
self.assertEqual(numpy.sum(mask), 3)
unionMask = supersetMask | mask.astype(int)
self.assertListEqual(unionMask.tolist(), supersetMask.tolist())
def testMapPotential2D(self):
params = self._params.copy()
params.update({
"columnDimensions": [2, 4],
"inputDimensions": [6, 12],
"potentialRadius": 1,
"potentialPct": 1,
"wrapAround": False,
})
# Test without wrapAround
sp = SpatialPooler(**params)
trueIndicies = [0, 12, 24,
1, 13, 25,
2, 14, 26]
mask = sp._mapPotential(0)
self.assertSetEqual(set(numpy.flatnonzero(mask).tolist()), set(trueIndicies))
trueIndicies = [6, 18, 30,
7, 19, 31,
8, 20, 32]
mask = sp._mapPotential(2)
self.assertSetEqual(set(numpy.flatnonzero(mask).tolist()), set(trueIndicies))
# Test with wrapAround
params.update({
"potentialRadius": 2,
"wrapAround": True,
})
sp = SpatialPooler(**params)
trueIndicies = [71, 11, 23, 35, 47,
60, 0, 12, 24, 36,
61, 1, 13, 25, 37,
62, 2, 14, 26, 38,
63, 3, 15, 27, 39]
mask = sp._mapPotential(0)
self.assertSetEqual(set(numpy.flatnonzero(mask).tolist()), set(trueIndicies))
trueIndicies = [68, 8, 20, 32, 44,
69, 9, 21, 33, 45,
70, 10, 22, 34, 46,
71, 11, 23, 35, 47,
60, 0, 12, 24, 36]
mask = sp._mapPotential(3)
self.assertSetEqual(set(numpy.flatnonzero(mask).tolist()), set(trueIndicies))
def testMapPotential1Column1Input(self):
params = self._params.copy()
params.update({
"inputDimensions": [1],
"columnDimensions": [1],
"potentialRadius": 2,
"wrapAround": False,
})
# Test without wrapAround and potentialPct = 1
params["potentialPct"] = 1
sp = SpatialPooler(**params)
expectedMask = [1]
mask = sp._mapPotential(0)
self.assertListEqual(mask.tolist(), expectedMask)
def testInhibitColumns(self):
sp = self._sp
sp._inhibitColumnsGlobal = Mock(return_value = 1)
sp._inhibitColumnsLocal = Mock(return_value = 2)
randomState = getNumpyRandomGenerator()
sp._numColumns = 5
sp._inhibitionRadius = 10
sp._columnDimensions = [5]
overlaps = randomState.random_sample(sp._numColumns).astype(realDType)
sp._inhibitColumnsGlobal.reset_mock()
sp._inhibitColumnsLocal.reset_mock()
sp._numActiveColumnsPerInhArea = 5
sp._localAreaDensity = 0.1
sp._globalInhibition = True
sp._inhibitionRadius = 5
trueDensity = sp._localAreaDensity
sp._inhibitColumns(overlaps)
self.assertEqual(True, sp._inhibitColumnsGlobal.called)
self.assertEqual(False, sp._inhibitColumnsLocal.called)
density = sp._inhibitColumnsGlobal.call_args[0][1]
self.assertEqual(trueDensity, density)
sp._inhibitColumnsGlobal.reset_mock()
sp._inhibitColumnsLocal.reset_mock()
sp._numColumns = 500
sp._tieBreaker = numpy.zeros(500)
sp._columnDimensions = numpy.array([50, 10])
sp._numActiveColumnsPerInhArea = -1
sp._localAreaDensity = 0.1
sp._globalInhibition = False
sp._inhibitionRadius = 7
# 0.1 * (2*9+1)**2 = 22.5
trueDensity = sp._localAreaDensity
overlaps = randomState.random_sample(sp._numColumns).astype(realDType)
sp._inhibitColumns(overlaps)
self.assertEqual(False, sp._inhibitColumnsGlobal.called)
self.assertEqual(True, sp._inhibitColumnsLocal.called)
self.assertEqual(trueDensity, density)
# Test translation of numColumnsPerInhArea into local area density
sp._numColumns = 1000
sp._tieBreaker = numpy.zeros(1000)
sp._columnDimensions = numpy.array([100, 10])
sp._inhibitColumnsGlobal.reset_mock()
sp._inhibitColumnsLocal.reset_mock()
sp._numActiveColumnsPerInhArea = 3
sp._localAreaDensity = -1
sp._globalInhibition = False
sp._inhibitionRadius = 4
trueDensity = 3.0/81.0
overlaps = randomState.random_sample(sp._numColumns).astype(realDType)
# 3.0 / (((2*4) + 1) ** 2)
sp._inhibitColumns(overlaps)
self.assertEqual(False, sp._inhibitColumnsGlobal.called)
self.assertEqual(True, sp._inhibitColumnsLocal.called)
density = sp._inhibitColumnsLocal.call_args[0][1]
self.assertEqual(trueDensity, density)
# Test clipping of local area density to 0.5
sp._numColumns = 1000
sp._tieBreaker = numpy.zeros(1000)
sp._columnDimensions = numpy.array([100, 10])
sp._inhibitColumnsGlobal.reset_mock()
sp._inhibitColumnsLocal.reset_mock()
sp._numActiveColumnsPerInhArea = 7
sp._localAreaDensity = -1
sp._globalInhibition = False
sp._inhibitionRadius = 1
trueDensity = 0.5
overlaps = randomState.random_sample(sp._numColumns).astype(realDType)
sp._inhibitColumns(overlaps)
self.assertEqual(False, sp._inhibitColumnsGlobal.called)
self.assertEqual(True, sp._inhibitColumnsLocal.called)
density = sp._inhibitColumnsLocal.call_args[0][1]
self.assertEqual(trueDensity, density)
def testUpdateBoostFactors(self):
sp = self._sp
sp._boostStrength = 10.0
sp._numColumns = 6
sp._activeDutyCycles = numpy.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
sp._boostFactors = numpy.zeros(sp._numColumns)
sp._updateBoostFactors()
numpy.testing.assert_almost_equal(
sp._boostFactors, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
sp._boostStrength = 10.0
sp._numColumns = 6
sp._columnDimensions = numpy.array([6])
sp._numActiveColumnsPerInhArea = 1
sp._inhibitionRadius = 5
sp._wrapAround = True
sp._activeDutyCycles = numpy.array([0.1, 0.3, 0.02, 0.04, 0.7, 0.12])
sp._updateBoostFactors()
numpy.testing.assert_almost_equal(
sp._boostFactors,
[3.1059927, 0.4203504, 6.912514, 5.6594878, 0.007699, 2.5429718])
sp._boostStrength = 2.0
sp._numColumns = 6
sp._activeDutyCycles = numpy.array([0.1, 0.3, 0.02, 0.04, 0.7, 0.12])
sp._updateBoostFactors()
numpy.testing.assert_almost_equal(
sp._boostFactors,
[1.2544117, 0.8408573, 1.4720657, 1.4143452, 0.3778215, 1.2052255])
sp._globalInhibition = True
sp._boostStrength = 10.0
sp._numColumns = 6
sp._numActiveColumnsPerInhArea = 1
sp._inhibitionRadius = 3
sp._activeDutyCycles = numpy.array([0.1, 0.3, 0.02, 0.04, 0.7, 0.12])
sp._updateBoostFactors()
numpy.testing.assert_almost_equal(
sp._boostFactors,
[1.947734, 0.2635971, 4.3347618, 3.5490028, 0.0048279, 1.5946698])
def testUpdateInhibitionRadius(self):
sp = self._sp
# Test global inhibition case
sp._globalInhibition = True
sp._columnDimensions = numpy.array([57, 31, 2])
sp._updateInhibitionRadius()
self.assertEqual(sp._inhibitionRadius, 57)
sp._globalInhibition = False
sp._avgConnectedSpanForColumnND = Mock(return_value = 3)
sp._avgColumnsPerInput = Mock(return_value = 4)
trueInhibitionRadius = 6
# ((3 * 4) - 1) / 2 => round up
sp._updateInhibitionRadius()
self.assertEqual(trueInhibitionRadius, sp._inhibitionRadius)
# Test clipping at 1.0
sp._globalInhibition = False
sp._avgConnectedSpanForColumnND = Mock(return_value = 0.5)
sp._avgColumnsPerInput = Mock(return_value = 1.2)
trueInhibitionRadius = 1
sp._updateInhibitionRadius()
self.assertEqual(trueInhibitionRadius, sp._inhibitionRadius)
# Test rounding up
sp._globalInhibition = False
sp._avgConnectedSpanForColumnND = Mock(return_value = 2.4)
sp._avgColumnsPerInput = Mock(return_value = 2)
trueInhibitionRadius = 2
# ((2 * 2.4) - 1) / 2.0 => round up
sp._updateInhibitionRadius()
self.assertEqual(trueInhibitionRadius, sp._inhibitionRadius)
def testAvgColumnsPerInput(self):
sp = self._sp
sp._columnDimensions = numpy.array([2, 2, 2, 2])
sp._inputDimensions = numpy.array([4, 4, 4, 4])
self.assertEqual(sp._avgColumnsPerInput(), 0.5)
sp._columnDimensions = numpy.array([2, 2, 2, 2])
sp._inputDimensions = numpy.array( [7, 5, 1, 3])
# 2/7 0.4 2 0.666
trueAvgColumnPerInput = (2.0/7 + 2.0/5 + 2.0/1 + 2/3.0) / 4
self.assertEqual(sp._avgColumnsPerInput(), trueAvgColumnPerInput)
sp._columnDimensions = numpy.array([3, 3])
sp._inputDimensions = numpy.array( [3, 3])
# 1 1
trueAvgColumnPerInput = 1
self.assertEqual(sp._avgColumnsPerInput(), trueAvgColumnPerInput)
sp._columnDimensions = numpy.array([25])
sp._inputDimensions = numpy.array( [5])
# 5
trueAvgColumnPerInput = 5
self.assertEqual(sp._avgColumnsPerInput(), trueAvgColumnPerInput)
sp._columnDimensions = numpy.array([3, 3, 3, 5, 5, 6, 6])
sp._inputDimensions = numpy.array( [3, 3, 3, 5, 5, 6, 6])
# 1 1 1 1 1 1 1
trueAvgColumnPerInput = 1
self.assertEqual(sp._avgColumnsPerInput(), trueAvgColumnPerInput)
sp._columnDimensions = numpy.array([3, 6, 9, 12])
sp._inputDimensions = numpy.array( [3, 3, 3 , 3])
# 1 2 3 4
trueAvgColumnPerInput = 2.5
self.assertEqual(sp._avgColumnsPerInput(), trueAvgColumnPerInput)
def testAvgConnectedSpanForColumn1D(self):
sp = self._sp
sp._numColumns = 9
sp._columnDimensions = numpy.array([9])
sp._inputDimensions = numpy.array([12])
sp._connectedSynapses = (
BinaryCorticalColumns([[0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1]]))
trueAvgConnectedSpan = [7, 5, 1, 5, 0, 2, 3, 3, 8]
for i in xrange(sp._numColumns):
connectedSpan = sp._avgConnectedSpanForColumn1D(i)
self.assertEqual(trueAvgConnectedSpan[i], connectedSpan)
def testAvgConnectedSpanForColumn2D(self):
sp = self._sp
sp._numColumns = 9
sp._columnDimensions = numpy.array([9])
sp._numInpts = 8
sp._inputDimensions = numpy.array([8])
sp._connectedSynapses = (
BinaryCorticalColumns([[0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1]]))
trueAvgConnectedSpan = [7, 5, 1, 5, 0, 2, 3, 3, 8]
for i in xrange(sp._numColumns):
connectedSpan = sp._avgConnectedSpanForColumn1D(i)
self.assertEqual(trueAvgConnectedSpan[i], connectedSpan)
sp._numColumns = 7
sp._columnDimensions = numpy.array([7])
sp._numInputs = 20
sp._inputDimensions = numpy.array([5, 4])
sp._connectedSynapses = BinaryCorticalColumns(sp._numInputs)
sp._connectedSynapses.resize(sp._numColumns, sp._numInputs)
connected = numpy.array([
[[0, 1, 1, 1],
[0, 1, 1, 1],
[0, 1, 1, 1],
[0, 0, 0, 0],
[0, 0, 0, 0]],
# rowspan = 3, colspan = 3, avg = 3
[[1, 1, 1, 1],
[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]],
# rowspan = 2 colspan = 4, avg = 3
[[1, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]],
# row span = 5, colspan = 4, avg = 4.5
[[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 1, 0, 0]],
# rowspan = 5, colspan = 1, avg = 3
[[0, 0, 0, 0],
[1, 0, 0, 1],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]],
# rowspan = 1, colspan = 4, avg = 2.5
[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]],
# rowspan = 2, colspan = 2, avg = 2
[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
# rowspan = 0, colspan = 0, avg = 0
])
trueAvgConnectedSpan = [3, 3, 4.5, 3, 2.5, 2, 0]
for columnIndex in xrange(sp._numColumns):
sp._connectedSynapses.replace(
columnIndex, connected[columnIndex].reshape(-1).nonzero()[0]
)
for i in xrange(sp._numColumns):
connectedSpan = sp._avgConnectedSpanForColumn2D(i)
self.assertEqual(trueAvgConnectedSpan[i], connectedSpan)
def testAvgConnectedSpanForColumnND(self):
sp = self._sp
sp._inputDimensions = numpy.array([4, 4, 2, 5])
sp._numInputs = numpy.prod(sp._inputDimensions)
sp._numColumns = 5
sp._columnDimensions = numpy.array([5])
sp._connectedSynapses = BinaryCorticalColumns(sp._numInputs)
sp._connectedSynapses.resize(sp._numColumns, sp._numInputs)
connected = numpy.zeros(sp._numInputs).reshape(sp._inputDimensions)
connected[1][0][1][0] = 1
connected[1][0][1][1] = 1
connected[3][2][1][0] = 1
connected[3][0][1][0] = 1
connected[1][0][1][3] = 1
connected[2][2][1][0] = 1
# span: 3 3 1 4, avg = 11/4
sp._connectedSynapses.replace(0, connected.reshape(-1).nonzero()[0])
connected = numpy.zeros(sp._numInputs).reshape(sp._inputDimensions)
connected[2][0][1][0] = 1
connected[2][0][0][0] = 1
connected[3][0][0][0] = 1
connected[3][0][1][0] = 1
# spn: 2 1 2 1, avg = 6/4
sp._connectedSynapses.replace(1, connected.reshape(-1).nonzero()[0])
connected = numpy.zeros(sp._numInputs).reshape(sp._inputDimensions)
connected[0][0][1][4] = 1
connected[0][0][0][3] = 1
connected[0][0][0][1] = 1
connected[1][0][0][2] = 1
connected[0][0][1][1] = 1
connected[3][3][1][1] = 1
# span: 4 4 2 4, avg = 14/4
sp._connectedSynapses.replace(2, connected.reshape(-1).nonzero()[0])
connected = numpy.zeros(sp._numInputs).reshape(sp._inputDimensions)
connected[3][3][1][4] = 1
connected[0][0][0][0] = 1
# span: 4 4 2 5, avg = 15/4
sp._connectedSynapses.replace(3, connected.reshape(-1).nonzero()[0])
connected = numpy.zeros(sp._numInputs).reshape(sp._inputDimensions)
# span: 0 0 0 0, avg = 0
sp._connectedSynapses.replace(4, connected.reshape(-1).nonzero()[0])
trueAvgConnectedSpan = [11.0/4, 6.0/4, 14.0/4, 15.0/4, 0]
for i in xrange(sp._numColumns):
connectedSpan = sp._avgConnectedSpanForColumnND(i)
self.assertAlmostEqual(trueAvgConnectedSpan[i], connectedSpan)
def testBumpUpWeakColumns(self):
sp = SpatialPooler(inputDimensions=[8],
columnDimensions=[5])
sp._synPermBelowStimulusInc = 0.01
sp._synPermTrimThreshold = 0.05
sp._overlapDutyCycles = numpy.array([0, 0.009, 0.1, 0.001, 0.002])
sp._minOverlapDutyCycles = numpy.array(5*[0.01])
sp._potentialPools = BinaryCorticalColumns(
[[1, 1, 1, 1, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 1, 0, 1],
[0, 0, 1, 0, 1, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1]])
sp._permanences = CorticalColumns(
[[0.200, 0.120, 0.090, 0.040, 0.000, 0.000, 0.000, 0.000],
[0.150, 0.000, 0.000, 0.000, 0.180, 0.120, 0.000, 0.450],
[0.000, 0.000, 0.014, 0.000, 0.032, 0.044, 0.110, 0.000],
[0.041, 0.000, 0.000, 0.000, 0.000, 0.000, 0.178, 0.000],
[0.100, 0.738, 0.045, 0.002, 0.050, 0.008, 0.208, 0.034]])
truePermanences = [
[0.210, 0.130, 0.100, 0.000, 0.000, 0.000, 0.000, 0.000],
# Inc Inc Inc Trim - - - -
[0.160, 0.000, 0.000, 0.000, 0.190, 0.130, 0.000, 0.460],
# Inc - - - Inc Inc - Inc
[0.000, 0.000, 0.014, 0.000, 0.032, 0.044, 0.110, 0.000], #unchanged
# - - - - - - - -
[0.051, 0.000, 0.000, 0.000, 0.000, 0.000, 0.188, 0.000],
# Inc Trim Trim - - - Inc -
[0.110, 0.748, 0.055, 0.000, 0.060, 0.000, 0.218, 0.000]]
sp._bumpUpWeakColumns()
for i in xrange(sp._numColumns):
perm = list(sp._permanences.getRow(i))
for j in xrange(sp._numInputs):
self.assertAlmostEqual(truePermanences[i][j], perm[j])
def testUpdateMinDutyCycleLocal(self):
# wrapAround=False
sp = SpatialPooler(inputDimensions=(5,),
columnDimensions=(8,),
globalInhibition=False,
wrapAround=False)
sp.setInhibitionRadius(1)
sp.setOverlapDutyCycles([0.7, 0.1, 0.5, 0.01, 0.78, 0.55, 0.1, 0.001])
sp.setActiveDutyCycles([0.9, 0.3, 0.5, 0.7, 0.1, 0.01, 0.08, 0.12])
sp.setMinPctOverlapDutyCycles(0.2);
sp._updateMinDutyCyclesLocal()
resultMinOverlapDutyCycles = numpy.zeros(sp.getNumColumns())
sp.getMinOverlapDutyCycles(resultMinOverlapDutyCycles)
for actual, expected in zip(resultMinOverlapDutyCycles,
[0.14, 0.14, 0.1, 0.156, 0.156, 0.156, 0.11, 0.02]):
self.assertAlmostEqual(actual, expected)
# wrapAround=True
sp = SpatialPooler(inputDimensions=(5,),
columnDimensions=(8,),
globalInhibition=False,
wrapAround=True)
sp.setInhibitionRadius(1)
sp.setOverlapDutyCycles([0.7, 0.1, 0.5, 0.01, 0.78, 0.55, 0.1, 0.001])
sp.setActiveDutyCycles([0.9, 0.3, 0.5, 0.7, 0.1, 0.01, 0.08, 0.12])
sp.setMinPctOverlapDutyCycles(0.2);
sp._updateMinDutyCyclesLocal()
resultMinOverlapDutyCycles = numpy.zeros(sp.getNumColumns())
sp.getMinOverlapDutyCycles(resultMinOverlapDutyCycles)
for actual, expected in zip(resultMinOverlapDutyCycles,
[0.14, 0.14, 0.1, 0.156, 0.156, 0.156, 0.11, 0.14]):
self.assertAlmostEqual(actual, expected)
def testUpdateMinDutyCyclesGlobal(self):
sp = self._sp
sp._minPctOverlapDutyCycles = 0.01
sp._numColumns = 5
sp._overlapDutyCycles = numpy.array([0.06, 1, 3, 6, 0.5])
sp._activeDutyCycles = numpy.array([0.6, 0.07, 0.5, 0.4, 0.3])
sp._updateMinDutyCyclesGlobal()
trueMinOverlapDutyCycles = sp._numColumns*[0.01*6]
for i in xrange(sp._numColumns):
self.assertAlmostEqual(trueMinOverlapDutyCycles[i],
sp._minOverlapDutyCycles[i])
sp._minPctOverlapDutyCycles = 0.015
sp._numColumns = 5
sp._overlapDutyCycles = numpy.array([0.86, 2.4, 0.03, 1.6, 1.5])
sp._activeDutyCycles = numpy.array([0.16, 0.007, 0.15, 0.54, 0.13])
sp._updateMinDutyCyclesGlobal()
trueMinOverlapDutyCycles = sp._numColumns*[0.015*2.4]
for i in xrange(sp._numColumns):
self.assertAlmostEqual(trueMinOverlapDutyCycles[i],
sp._minOverlapDutyCycles[i])
sp._minPctOverlapDutyCycles = 0.015
sp._numColumns = 5
sp._overlapDutyCycles = numpy.zeros(5)
sp._activeDutyCycles = numpy.zeros(5)
sp._updateMinDutyCyclesGlobal()
trueMinOverlapDutyCycles = sp._numColumns * [0]
for i in xrange(sp._numColumns):
self.assertAlmostEqual(trueMinOverlapDutyCycles[i],
sp._minOverlapDutyCycles[i])
def testIsUpdateRound(self):
sp = self._sp
sp._updatePeriod = 50
sp._iterationNum = 1
self.assertEqual(sp._isUpdateRound(), False)
sp._iterationNum = 39
self.assertEqual(sp._isUpdateRound(), False)
sp._iterationNum = 50
self.assertEqual(sp._isUpdateRound(), True)
sp._iterationNum = 1009
self.assertEqual(sp._isUpdateRound(), False)
sp._iterationNum = 1250
self.assertEqual(sp._isUpdateRound(), True)
sp._updatePeriod = 125
sp._iterationNum = 0
self.assertEqual(sp._isUpdateRound(), True)
sp._iterationNum = 200
self.assertEqual(sp._isUpdateRound(), False)
sp._iterationNum = 249
self.assertEqual(sp._isUpdateRound(), False)
sp._iterationNum = 1330
self.assertEqual(sp._isUpdateRound(), False)
sp._iterationNum = 1249
self.assertEqual(sp._isUpdateRound(), False)
sp._iterationNum = 1375
self.assertEqual(sp._isUpdateRound(), True)
def testAdaptSynapses(self):
sp = SpatialPooler(inputDimensions=[8],
columnDimensions=[4],
synPermInactiveDec=0.01,
synPermActiveInc=0.1)
sp._synPermTrimThreshold = 0.05
sp._potentialPools = BinaryCorticalColumns(
[[1, 1, 1, 1, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 1, 0, 1],
[0, 0, 1, 0, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 0, 1, 0]])
inputVector = numpy.array([1, 0, 0, 1, 1, 0, 1, 0])
activeColumns = numpy.array([0, 1, 2])
sp._permanences = CorticalColumns(
[[0.200, 0.120, 0.090, 0.040, 0.000, 0.000, 0.000, 0.000],
[0.150, 0.000, 0.000, 0.000, 0.180, 0.120, 0.000, 0.450],
[0.000, 0.000, 0.014, 0.000, 0.000, 0.000, 0.110, 0.000],
[0.040, 0.000, 0.000, 0.000, 0.000, 0.000, 0.178, 0.000]])
truePermanences = [
[0.300, 0.110, 0.080, 0.140, 0.000, 0.000, 0.000, 0.000],
# Inc Dec Dec Inc - - - -
[0.250, 0.000, 0.000, 0.000, 0.280, 0.110, 0.000, 0.440],
# Inc - - - Inc Dec - Dec
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.210, 0.000],
# - - Trim - - - Inc -
[0.040, 0.000, 0.000, 0.000, 0.000, 0.000, 0.178, 0.000]]
# - - - - - - - -
sp._adaptSynapses(inputVector, activeColumns)
for i in xrange(sp._numColumns):
perm = list(sp._permanences.getRow(i))
for j in xrange(sp._numInputs):
self.assertAlmostEqual(truePermanences[i][j], perm[j])
sp._potentialPools = BinaryCorticalColumns(
[[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 1, 0]])
inputVector = numpy.array([1, 0, 0, 1, 1, 0, 1, 0])
activeColumns = numpy.array([0, 1, 2])
sp._permanences = CorticalColumns(
[[0.200, 0.120, 0.090, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.017, 0.232, 0.400, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.014, 0.051, 0.730, 0.000, 0.000, 0.000],
[0.170, 0.000, 0.000, 0.000, 0.000, 0.000, 0.380, 0.000]])
truePermanences = [
[0.30, 0.110, 0.080, 0.000, 0.000, 0.000, 0.000, 0.000],
# Inc Dec Dec - - - - -
[0.000, 0.000, 0.222, 0.500, 0.000, 0.000, 0.000, 0.000],
# - Trim Dec Inc - - - -
[0.000, 0.000, 0.000, 0.151, 0.830, 0.000, 0.000, 0.000],
# - - Trim Inc Inc - - -
[0.170, 0.000, 0.000, 0.000, 0.000, 0.000, 0.380, 0.000]]
# - - - - - - - -
sp._adaptSynapses(inputVector, activeColumns)
for i in xrange(sp._numColumns):
perm = list(sp._permanences.getRow(i))
for j in xrange(sp._numInputs):
self.assertAlmostEqual(truePermanences[i][j], perm[j])
def testRaisePermanenceThreshold(self):
sp = self._sp
sp._inputDimensions=numpy.array([5])
sp._columnDimensions=numpy.array([5])
sp._synPermConnected=0.1
sp._stimulusThreshold=3
sp._synPermBelowStimulusInc = 0.01
sp._permanences = CorticalColumns(
[[0.0, 0.11, 0.095, 0.092, 0.01],
[0.12, 0.15, 0.02, 0.12, 0.09],
[0.51, 0.081, 0.025, 0.089, 0.31],
[0.18, 0.0601, 0.11, 0.011, 0.03],
[0.011, 0.011, 0.011, 0.011, 0.011]])
sp._connectedSynapses = BinaryCorticalColumns([[0, 1, 0, 0, 0],
[1, 1, 0, 1, 0],
[1, 0, 0, 0, 1],
[1, 0, 1, 0, 0],
[0, 0, 0, 0, 0]])
sp._connectedCounts = numpy.array([1, 3, 2, 2, 0])
truePermanences = [
[0.01, 0.12, 0.105, 0.102, 0.02], # incremented once
[0.12, 0.15, 0.02, 0.12, 0.09], # no change
[0.53, 0.101, 0.045, 0.109, 0.33], # increment twice
[0.22, 0.1001, 0.15, 0.051, 0.07], # increment four times
[0.101, 0.101, 0.101, 0.101, 0.101]] #increment 9 times
maskPP = numpy.array(range(5))
for i in xrange(sp._numColumns):
perm = sp._permanences.getRow(i)
sp._raisePermanenceToThreshold(perm, maskPP)
for j in xrange(sp._numInputs):
self.assertAlmostEqual(truePermanences[i][j], perm[j])
def testUpdatePermanencesForColumn(self):
sp = SpatialPooler(inputDimensions=[5],
columnDimensions=[5],
synPermConnected=0.1)
sp._synPermTrimThreshold = 0.05
permanences = numpy.array([
[-0.10, 0.500, 0.400, 0.010, 0.020],
[0.300, 0.010, 0.020, 0.120, 0.090],
[0.070, 0.050, 1.030, 0.190, 0.060],
[0.180, 0.090, 0.110, 0.010, 0.030],
[0.200, 0.101, 0.050, -0.09, 1.100]])
# These are the 'true permanences' reflected in trueConnectedSynapses
# truePermanences = SparseMatrix(
# [[0.000, 0.500, 0.400, 0.000, 0.000],
# Clip - - Trim Trim
# [0.300, 0.000, 0.000, 0.120, 0.090],
# - Trim Trim - -
# [0.070, 0.050, 1.000, 0.190, 0.060],
# - - Clip - -
# [0.180, 0.090, 0.110, 0.000, 0.000],
# - - - Trim Trim
# [0.200, 0.101, 0.050, 0.000, 1.000]])
# - - - Clip Clip
trueConnectedSynapses = [
[0, 1, 1, 0, 0],
[1, 0, 0, 1, 0],
[0, 0, 1, 1, 0],
[1, 0, 1, 0, 0],
[1, 1, 0, 0, 1]]
trueConnectedCounts = [2, 2, 2, 2, 3]
for columnIndex in xrange(sp._numColumns):
sp._updatePermanencesForColumn(permanences[columnIndex], columnIndex)
self.assertListEqual(
trueConnectedSynapses[columnIndex],
list(sp._connectedSynapses[columnIndex])
)
self.assertListEqual(trueConnectedCounts, list(sp._connectedCounts))
def testCalculateOverlap(self):
"""
Test that column computes overlap and percent overlap correctly.
"""
sp = SpatialPooler(inputDimensions = [10],
columnDimensions = [5])
sp._connectedSynapses = (
BinaryCorticalColumns([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1]]))
sp._connectedCounts = numpy.array([10.0, 8.0, 6.0, 4.0, 2.0])
inputVector = numpy.zeros(sp._numInputs, dtype='float32')
overlaps = sp._calculateOverlap(inputVector)
overlapsPct = sp._calculateOverlapPct(overlaps)
trueOverlaps = list(numpy.array([0, 0, 0, 0, 0], dtype=realDType))
trueOverlapsPct = list(numpy.array([0, 0, 0, 0, 0]))
self.assertListEqual(list(overlaps), trueOverlaps)
self.assertListEqual(list(overlapsPct), trueOverlapsPct)
sp._connectedSynapses = (
BinaryCorticalColumns([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1]]))
sp._connectedCounts = numpy.array([10.0, 8.0, 6.0, 4.0, 2.0])
inputVector = numpy.ones(sp._numInputs, dtype='float32')
overlaps = sp._calculateOverlap(inputVector)
overlapsPct = sp._calculateOverlapPct(overlaps)
trueOverlaps = list(numpy.array([10, 8, 6, 4, 2], dtype=realDType))
trueOverlapsPct = list(numpy.array([1, 1, 1, 1, 1]))
self.assertListEqual(list(overlaps), trueOverlaps)
self.assertListEqual(list(overlapsPct), trueOverlapsPct)
sp._connectedSynapses = (
BinaryCorticalColumns([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1]]))
sp._connectedCounts = numpy.array([10.0, 8.0, 6.0, 4.0, 2.0])
inputVector = numpy.zeros(sp._numInputs, dtype='float32')
inputVector[9] = 1
overlaps = sp._calculateOverlap(inputVector)
overlapsPct = sp._calculateOverlapPct(overlaps)
trueOverlaps = list(numpy.array([1, 1, 1, 1, 1], dtype=realDType))
trueOverlapsPct = list(numpy.array([0.1, 0.125, 1.0/6, 0.25, 0.5]))
self.assertListEqual(list(overlaps), trueOverlaps)
self.assertListEqual(list(overlapsPct), trueOverlapsPct)
# Zig-zag
sp._connectedSynapses = (
BinaryCorticalColumns([[1, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 1]]))
sp._connectedCounts = numpy.array([2.0, 2.0, 2.0, 2.0, 2.0])
inputVector = numpy.zeros(sp._numInputs, dtype='float32')
inputVector[range(0, 10, 2)] = 1
overlaps = sp._calculateOverlap(inputVector)
overlapsPct = sp._calculateOverlapPct(overlaps)
trueOverlaps = list(numpy.array([1, 1, 1, 1, 1], dtype=realDType))
trueOverlapsPct = list(numpy.array([0.5, 0.5, 0.5, 0.5, 0.5]))
self.assertListEqual(list(overlaps), trueOverlaps)
self.assertListEqual(list(overlapsPct), trueOverlapsPct)
def testInitPermanence1(self):
"""
test initial permanence generation. ensure that
a correct amount of synapses are initialized in
a connected state, with permanence values drawn from
the correct ranges
"""
sp = self._sp
sp._inputDimensions = numpy.array([10])
sp._numInputs = 10
sp._raisePermanenceToThreshold = Mock()
sp._potentialRadius = 2
connectedPct = 1
mask = numpy.array([1, 1, 1, 0, 0, 0, 0, 0, 1, 1])
perm = sp._initPermanence(mask, connectedPct)
connected = (perm >= sp._synPermConnected).astype(int)
numcon = (connected.nonzero()[0]).size
self.assertEqual(numcon, 5)
connectedPct = 0
perm = sp._initPermanence(mask, connectedPct)
connected = (perm >= sp._synPermConnected).astype(int)
numcon = (connected.nonzero()[0]).size
self.assertEqual(numcon, 0)
connectedPct = 0.5
sp._potentialRadius = 100
sp._numInputs = 100
mask = numpy.ones(100)
perm = sp._initPermanence(mask, connectedPct)
connected = (perm >= sp._synPermConnected).astype(int)
numcon = (connected.nonzero()[0]).size
self.assertGreater(numcon, 0)
self.assertLess(numcon, sp._numInputs)
minThresh = 0.0
maxThresh = sp._synPermMax
self.assertEqual(numpy.logical_and((perm >= minThresh),
(perm <= maxThresh)).all(), True)
def testInitPermanence2(self):
"""
Test initial permanence generation. ensure that permanence values
are only assigned to bits within a column's potential pool.
"""
sp = self._sp
sp._raisePermanenceToThreshold = Mock()
sp._numInputs = 10
connectedPct = 1
mask = numpy.array([1, 1, 0, 0, 0, 0, 0, 0, 0, 0])
perm = sp._initPermanence(mask, connectedPct)
connected = list((perm > 0).astype(int))
trueConnected = [1, 1, 0, 0, 0, 0, 0, 0, 0, 0]
self.assertListEqual(connected, trueConnected)
sp._numInputs = 10
connectedPct = 1
mask = numpy.array([0, 0, 0, 0, 1, 1, 1, 0, 0, 0])
perm = sp._initPermanence(mask, connectedPct)
connected = list((perm > 0).astype(int))
trueConnected = [0, 0, 0, 0, 1, 1, 1, 0, 0, 0]
self.assertListEqual(connected, trueConnected)
sp._numInputs = 10
connectedPct = 1
mask = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1])
perm = sp._initPermanence(mask, connectedPct)
connected = list((perm > 0).astype(int))
trueConnected = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1]
self.assertListEqual(connected, trueConnected)
sp._numInputs = 10
connectedPct = 1
mask = numpy.array([1, 1, 1, 1, 1, 1, 1, 0, 1, 1])
perm = sp._initPermanence(mask, connectedPct)
connected = list((perm > 0).astype(int))
trueConnected = [1, 1, 1, 1, 1, 1, 1, 0, 1, 1]
self.assertListEqual(connected, trueConnected)
def testUpdateDutyCycleHelper(self):
"""
Tests that duty cycles are updated properly according
to the mathematical formula. also check the effects of
supplying a maxPeriod to the function.
"""
dc = numpy.zeros(5)
dc = numpy.array([1000.0, 1000.0, 1000.0, 1000.0, 1000.0])
period = 1000
newvals = numpy.zeros(5)
newDc = SpatialPooler._updateDutyCyclesHelper(dc, newvals, period)
trueNewDc = [999, 999, 999, 999, 999]
self.assertListEqual(list(newDc), trueNewDc)
dc = numpy.array([1000.0, 1000.0, 1000.0, 1000.0, 1000.0])
period = 1000
newvals = numpy.zeros(5)
newvals.fill(1000)
newDc = SpatialPooler._updateDutyCyclesHelper(dc, newvals, period)
trueNewDc = list(dc)
self.assertListEqual(list(newDc), trueNewDc)
dc = numpy.array([1000, 1000, 1000, 1000, 1000])
newvals = numpy.array([2000, 4000, 5000, 6000, 7000])
period = 1000
newDc = SpatialPooler._updateDutyCyclesHelper(dc, newvals, period)
trueNewDc = [1001, 1003, 1004, 1005, 1006]
self.assertListEqual(list(newDc), trueNewDc)
dc = numpy.array([1000, 800, 600, 400, 2000])
newvals = numpy.zeros(5)
period = 2
newDc = SpatialPooler._updateDutyCyclesHelper(dc, newvals, period)
trueNewDc = [500, 400, 300, 200, 1000]
self.assertListEqual(list(newDc), trueNewDc)
def testInhibitColumnsGlobal(self):
"""
Tests that global inhibition correctly picks the
correct top number of overlap scores as winning columns.
"""
sp = self._sp
density = 0.3
sp._numColumns = 10
overlaps = numpy.array([1, 2, 1, 4, 8, 3, 12, 5, 4, 1], dtype=realDType)
active = list(sp._inhibitColumnsGlobal(overlaps, density))
trueActive = numpy.zeros(sp._numColumns)
trueActive = [4, 6, 7]
self.assertListEqual(list(trueActive), sorted(active)) # ignore order of columns
density = 0.5
sp._numColumns = 10
overlaps = numpy.array(range(10), dtype=realDType)
active = list(sp._inhibitColumnsGlobal(overlaps, density))
trueActive = numpy.zeros(sp._numColumns)
trueActive = range(5, 10)
self.assertListEqual(trueActive, sorted(active))
def testInhibitColumnsLocal(self):
sp = self._sp
density = 0.5
sp._numColumns = 10
sp._columnDimensions = numpy.array([sp._numColumns])
sp._inhibitionRadius = 2
overlaps = numpy.array([1, 2, 7, 0, 3, 4, 16, 1, 1.5, 1.7], dtype=realDType)
# L W W L L W W L W W (wrapAround=True)
# L W W L L W W L L W (wrapAround=False)
sp._wrapAround = True
trueActive = [1, 2, 5, 6, 8, 9]
active = list(sp._inhibitColumnsLocal(overlaps, density))
self.assertListEqual(trueActive, sorted(active))
sp._wrapAround = False
trueActive = [1, 2, 5, 6, 9]
active = list(sp._inhibitColumnsLocal(overlaps, density))
self.assertListEqual(trueActive, sorted(active))
density = 0.5
sp._numColumns = 10
sp._columnDimensions = numpy.array([sp._numColumns])
sp._inhibitionRadius = 3
overlaps = numpy.array([1, 2, 7, 0, 3, 4, 16, 1, 1.5, 1.7], dtype=realDType)
# L W W L W W W L L W (wrapAround=True)
# L W W L W W W L L L (wrapAround=False)
sp._wrapAround = True
trueActive = [1, 2, 4, 5, 6, 9]
active = list(sp._inhibitColumnsLocal(overlaps, density))
self.assertListEqual(trueActive, active)
sp._wrapAround = False
trueActive = [1, 2, 4, 5, 6, 9]
active = list(sp._inhibitColumnsLocal(overlaps, density))
self.assertListEqual(trueActive, active)
# Test add to winners
density = 0.3333
sp._numColumns = 10
sp._columnDimensions = numpy.array([sp._numColumns])
sp._inhibitionRadius = 3
overlaps = numpy.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=realDType)
# W W L L W W L L L L (wrapAround=True)
# W W L L W W L L W L (wrapAround=False)
sp._wrapAround = True
trueActive = [0, 1, 4, 5]
active = list(sp._inhibitColumnsLocal(overlaps, density))
self.assertListEqual(trueActive, sorted(active))
sp._wrapAround = False
trueActive = [0, 1, 4, 5, 8]
active = list(sp._inhibitColumnsLocal(overlaps, density))
self.assertListEqual(trueActive, sorted(active))
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testWriteRead(self):
sp1 = SpatialPooler(
inputDimensions=[9],
columnDimensions=[5],
potentialRadius=3,
potentialPct=0.5,
globalInhibition=False,
localAreaDensity=-1.0,
numActiveColumnsPerInhArea=3,
stimulusThreshold=1,
synPermInactiveDec=0.01,
synPermActiveInc=0.1,
synPermConnected=0.10,
minPctOverlapDutyCycle=0.1,
dutyCyclePeriod=10,
boostStrength=10.0,
seed=42,
spVerbosity=0)
# Run a record through before serializing
inputVector = numpy.array([1, 0, 1, 0, 1, 0, 0, 1, 1])
activeArray1 = numpy.zeros(5)
sp1.compute(inputVector, True, activeArray1)
proto1 = SpatialPoolerProto_capnp.SpatialPoolerProto.new_message()
sp1.write(proto1)
# Write the proto to a temp file and read it back into a new proto
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = SpatialPoolerProto_capnp.SpatialPoolerProto.read(f)
# Load the deserialized proto
sp2 = SpatialPooler.read(proto2)
ephemeral = set(["_boostedOverlaps", "_overlaps"])
# Check that the two spatial poolers have the same attributes
self.assertSetEqual(set(sp1.__dict__.keys()), set(sp2.__dict__.keys()))
for k, v1 in sp1.__dict__.iteritems():
v2 = getattr(sp2, k)
if k in ephemeral:
continue
if isinstance(v1, numpy.ndarray):
self.assertEqual(v1.dtype, v2.dtype,
"Key %s has differing dtypes: %s vs %s" % (
k, v1.dtype, v2.dtype))
self.assertTrue(numpy.isclose(v1, v2).all(), k)
elif isinstance(v1, Random) or isinstance(v1, BinaryCorticalColumns):
pass
elif isinstance(v1, float):
self.assertAlmostEqual(v1, v2)
elif isinstance(v1, numbers.Integral):
self.assertEqual(long(v1), long(v2), k)
else:
self.assertEqual(type(v1), type(v2), k)
self.assertEqual(v1, v2, k)
# Run a record through after deserializing and check results match
activeArray2 = numpy.zeros(5)
sp1.compute(inputVector, True, activeArray1)
sp2.compute(inputVector, True, activeArray2)
indices1 = set(activeArray1.nonzero()[0])
indices2 = set(activeArray2.nonzero()[0])
self.assertSetEqual(indices1, indices2)
def testRandomSPDoesNotLearn(self):
sp = SpatialPooler(inputDimensions=[5],
columnDimensions=[10])
inputArray = (numpy.random.rand(5) > 0.5).astype(uintDType)
activeArray = numpy.zeros(sp._numColumns).astype(realDType)
# Should start off at 0
self.assertEqual(sp._iterationNum, 0)
self.assertEqual(sp._iterationLearnNum, 0)
# Store the initialized state
initialPerms = copy(sp._permanences)
sp.compute(inputArray, False, activeArray)
# Should have incremented general counter but not learning counter
self.assertEqual(sp._iterationNum, 1)
self.assertEqual(sp._iterationLearnNum, 0)
# Check the initial perm state was not modified either
self.assertEqual(sp._permanences, initialPerms)
@unittest.skip("Ported from the removed FlatSpatialPooler but fails. \
See: https://github.com/numenta/nupic/issues/1897")
def testActiveColumnsEqualNumActive(self):
'''
After feeding in a record the number of active columns should
always be equal to numActivePerInhArea
'''
for i in [1, 10, 50]:
numActive = i
inputShape = 10
sp = SpatialPooler(inputDimensions=[inputShape],
columnDimensions=[100],
numActiveColumnsPerInhArea=numActive)
inputArray = (numpy.random.rand(inputShape) > 0.5).astype(uintDType)
inputArray2 = (numpy.random.rand(inputShape) > 0.8).astype(uintDType)
activeArray = numpy.zeros(sp._numColumns).astype(realDType)
# Default, learning on
sp.compute(inputArray, True, activeArray)
sp.compute(inputArray2, True, activeArray)
self.assertEqual(sum(activeArray), numActive)
# learning OFF
sp.compute(inputArray, False, activeArray)
sp.compute(inputArray2, False, activeArray)
self.assertEqual(sum(activeArray), numActive)
if __name__ == "__main__":
unittest.main()
| 57,130 | Python | .py | 1,323 | 35.248677 | 84 | 0.602243 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,108 | sdr_classifier_test.py | numenta_nupic-legacy/tests/unit/nupic/algorithms/sdr_classifier_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for SDRClassifier module."""
import cPickle as pickle
import random
import sys
import tempfile
import types
import unittest2 as unittest
import numpy
from nupic.algorithms.sdr_classifier import SDRClassifier
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.proto import SdrClassifier_capnp
class SDRClassifierTest(unittest.TestCase):
"""Unit tests for SDRClassifier class."""
def setUp(self):
self._classifier = SDRClassifier
def testInitialization(self):
c = self._classifier([1], 0.1, 0.1, 0)
self.assertEqual(type(c), self._classifier)
def testInitInvalidParams(self):
# Invalid steps
kwargs = {"steps": [0.3], "alpha": 0.1, "actValueAlpha": 0.1}
self.assertRaises(TypeError, self._classifier, **kwargs)
kwargs = {"steps": [], "alpha": 0.1, "actValueAlpha": 0.1}
self.assertRaises(TypeError, self._classifier, **kwargs)
kwargs = {"steps": [-1], "alpha": 0.1, "actValueAlpha": 0.1}
self.assertRaises(ValueError, self._classifier, **kwargs)
# Invalid alpha
kwargs = {"steps": [1], "alpha": -1.0, "actValueAlpha": 0.1}
self.assertRaises(ValueError, self._classifier, **kwargs)
# Invalid alpha
kwargs = {"steps": [1], "alpha": 0.1, "actValueAlpha": -1.0}
self.assertRaises(ValueError, self._classifier, **kwargs)
def testSingleValue(self):
"""Send same value 10 times and expect high likelihood for prediction."""
classifier = self._classifier(steps=[1], alpha=1.0)
# Enough times to perform Inference and learn associations
retval = []
for recordNum in xrange(10):
retval = self._compute(classifier, recordNum, [1, 5], 0, 10)
self.assertEqual(retval["actualValues"][0], 10)
self.assertGreater(retval[1][0], 0.9)
def testSingleValue0Steps(self):
"""Send same value 10 times and expect high likelihood for prediction
using 0-step ahead prediction"""
classifier = self._classifier(steps=[0], alpha=1.0)
# Enough times to perform Inference and learn associations
retval = []
for recordNum in xrange(10):
retval = self._compute(classifier, recordNum, [1, 5], 0, 10)
self.assertEqual(retval["actualValues"][0], 10)
self.assertGreater(retval[0][0], 0.9)
def testComputeResultTypes(self):
c = self._classifier([1], 0.1, 0.1, 0)
result = c.compute(recordNum=0,
patternNZ=[1, 5, 9],
classification= {"bucketIdx": 4, "actValue": 34.7},
learn=True,
infer=True)
self.assertSetEqual(set(result.keys()), set(("actualValues", 1)))
self.assertEqual(type(result["actualValues"]), list)
self.assertEqual(len(result["actualValues"]), 1)
self.assertEqual(type(result["actualValues"][0]), float)
self.assertEqual(type(result[1]), numpy.ndarray)
self.assertEqual(result[1].itemsize, 8)
self.assertAlmostEqual(result["actualValues"][0], 34.7, places=5)
def testBucketIdxNumpyInt64Input(self):
c = self._classifier([1], 0.1, 0.1, 0)
result = c.compute(0, [1, 5, 9],
{"bucketIdx": numpy.int64(4), "actValue": 34.7}, True,
True)
self.assertSetEqual(set(result.keys()), set(("actualValues", 1)))
self.assertEqual(len(result["actualValues"]), 1)
self.assertAlmostEqual(result["actualValues"][0], 34.7, places=5)
def testComputeInferOrLearnOnly(self):
c = self._classifier([1], 1.0, 0.1, 0)
# learn only
recordNum=0
retval = c.compute(recordNum=recordNum, patternNZ=[1, 5, 9],
classification={"bucketIdx": 4, "actValue": 34.7},
learn=True, infer=False)
self.assertEquals({}, retval)
recordNum += 1
# infer only
recordNum=0
retval1 = c.compute(recordNum=recordNum, patternNZ=[1, 5, 9],
classification={"bucketIdx": 2, "actValue": 14.2},
learn=False, infer=True)
recordNum += 1
retval2 = c.compute(recordNum=recordNum, patternNZ=[1, 5, 9],
classification={"bucketIdx": 3, "actValue": 20.5},
learn=False, infer=True)
recordNum += 1
self.assertSequenceEqual(list(retval1[1]), list(retval2[1]))
# return None when learn and infer are both False
retval3 = c.compute(recordNum=recordNum, patternNZ=[1, 2],
classification={"bucketIdx": 2, "actValue": 14.2},
learn=False, infer=False)
self.assertEquals({}, retval3)
def testCompute1(self):
c = self._classifier([1], 0.1, 0.1, 0)
result = c.compute(recordNum=0,
patternNZ=[1, 5, 9],
classification={"bucketIdx": 4, "actValue": 34.7},
learn=True,
infer=True)
self.assertSetEqual(set(result.keys()), set(("actualValues", 1)))
self.assertEqual(len(result["actualValues"]), 1)
self.assertAlmostEqual(result["actualValues"][0], 34.7, places=5)
def testCompute2(self):
c = self._classifier([1], 0.1, 0.1, 0)
c.compute(recordNum=0, patternNZ=[1, 5, 9],
classification={"bucketIdx": 4, "actValue": 34.7},
learn=True, infer=True)
result = c.compute(recordNum=1, patternNZ=[1, 5, 9],
classification={"bucketIdx": 4, "actValue": 34.7},
learn=True, infer=True)
self.assertSetEqual(set(result.keys()), set(("actualValues", 1)))
self.assertAlmostEqual(result["actualValues"][4], 34.7, places=5)
def testComputeComplex(self):
c = self._classifier([1], 1.0, 0.1, 0)
recordNum=0
c.compute(recordNum=recordNum, patternNZ=[1, 5, 9],
classification={"bucketIdx": 4, "actValue": 34.7},
learn=True, infer=True)
recordNum += 1
c.compute(recordNum=recordNum, patternNZ=[0, 6, 9, 11],
classification={"bucketIdx": 5, "actValue": 41.7},
learn=True, infer=True)
recordNum += 1
c.compute(recordNum=recordNum, patternNZ=[6, 9],
classification={"bucketIdx": 5, "actValue": 44.9},
learn=True, infer=True)
recordNum += 1
c.compute(recordNum=recordNum, patternNZ=[1, 5, 9],
classification={"bucketIdx": 4, "actValue": 42.9},
learn=True, infer=True)
recordNum += 1
result = c.compute(recordNum=recordNum, patternNZ=[1, 5, 9],
classification={"bucketIdx": 4, "actValue": 34.7},
learn=True, infer=True)
recordNum += 1
self.assertSetEqual(set(result.keys()), set(("actualValues", 1)))
self.assertAlmostEqual(result["actualValues"][4], 35.520000457763672,
places=5)
self.assertAlmostEqual(result["actualValues"][5], 42.020000457763672,
places=5)
self.assertEqual(len(result[1]), 6)
self.assertAlmostEqual(result[1][0], 0.034234, places=5)
self.assertAlmostEqual(result[1][1], 0.034234, places=5)
self.assertAlmostEqual(result[1][2], 0.034234, places=5)
self.assertAlmostEqual(result[1][3], 0.034234, places=5)
self.assertAlmostEqual(result[1][4], 0.093058, places=5)
self.assertAlmostEqual(result[1][5], 0.770004, places=5)
def testComputeWithMissingValue(self):
c = self._classifier([1], 0.1, 0.1, 0)
result = c.compute(
recordNum=0, patternNZ=[1, 5, 9],
classification={"bucketIdx": None, "actValue": None},
learn=True, infer=True)
self.assertSetEqual(set(result.keys()), set(("actualValues", 1)))
self.assertEqual(len(result["actualValues"]), 1)
self.assertEqual(result["actualValues"][0], None)
def testComputeCategory(self):
c = self._classifier([1], 0.1, 0.1, 0)
c.compute(recordNum=0, patternNZ=[1, 5, 9],
classification={"bucketIdx": 4, "actValue": "D"},
learn=True, infer=True)
result = c.compute(recordNum=1, patternNZ=[1, 5, 9],
classification={"bucketIdx": 4, "actValue": "D"},
learn=True, infer=True)
self.assertSetEqual(set(result.keys()), set(("actualValues", 1)))
self.assertEqual(result["actualValues"][4], "D")
predictResult = c.compute(recordNum=2, patternNZ=[1, 5, 9],
classification={"bucketIdx": 5,
"actValue": None},
learn=True, infer=True)
for value in predictResult["actualValues"]:
self.assertIsInstance(value, (types.NoneType, types.StringType))
def testComputeCategory2(self):
c = self._classifier([1], 0.1, 0.1, 0)
c.compute(recordNum=0, patternNZ=[1, 5, 9],
classification={"bucketIdx": 4, "actValue": "D"},
learn=True, infer=True)
result = c.compute(recordNum=1, patternNZ=[1, 5, 9],
classification={"bucketIdx": 4, "actValue": "E"},
learn=True, infer=True)
self.assertSetEqual(set(result.keys()), set(("actualValues", 1)))
self.assertEqual(result["actualValues"][4], "D")
def testSerialization(self):
c = self._classifier([1], 1.0, 0.1, 0)
c.compute(recordNum=0,
patternNZ=[1, 5, 9],
classification={"bucketIdx": 4, "actValue": 34.7},
learn=True, infer=True)
c.compute(recordNum=1,
patternNZ=[0, 6, 9, 11],
classification={"bucketIdx": 5, "actValue": 41.7},
learn=True, infer=True)
c.compute(recordNum=2,
patternNZ=[6, 9],
classification={"bucketIdx": 5, "actValue": 44.9},
learn=True, infer=True)
c.compute(recordNum=3,
patternNZ=[1, 5, 9],
classification={"bucketIdx": 4, "actValue": 42.9},
learn=True, infer=True)
serialized = pickle.dumps(c)
c = pickle.loads(serialized)
self.assertEqual(c.steps, [1])
self.assertEqual(c.alpha, 1.0)
self.assertEqual(c.actValueAlpha, 0.1)
result = c.compute(recordNum=4,
patternNZ=[1, 5, 9],
classification={"bucketIdx": 4, "actValue": 34.7},
learn=True, infer=True)
self.assertSetEqual(set(result.keys()), set(("actualValues", 1)))
self.assertAlmostEqual(result["actualValues"][4], 35.520000457763672,
places=5)
self.assertAlmostEqual(result["actualValues"][5], 42.020000457763672,
places=5)
self.assertEqual(len(result[1]), 6)
self.assertAlmostEqual(result[1][0], 0.034234, places=5)
self.assertAlmostEqual(result[1][1], 0.034234, places=5)
self.assertAlmostEqual(result[1][2], 0.034234, places=5)
self.assertAlmostEqual(result[1][3], 0.034234, places=5)
self.assertAlmostEqual(result[1][4], 0.093058, places=5)
self.assertAlmostEqual(result[1][5], 0.770004, places=5)
def testOverlapPattern(self):
classifier = self._classifier(alpha=10.0)
_ = self._compute(classifier, recordNum=0, pattern=[1, 5], bucket=9,
value=9)
_ = self._compute(classifier, recordNum=1, pattern=[1, 5], bucket=9,
value=9)
retval = self._compute(classifier, recordNum=2, pattern=[3, 5], bucket=2,
value=2)
# Since overlap - should be previous with high likelihood
self.assertEqual(retval["actualValues"][9], 9)
self.assertGreater(retval[1][9], 0.9)
retval = self._compute(classifier, recordNum=3, pattern=[3, 5], bucket=2,
value=2)
# Second example: now new value should be more probable than old
self.assertGreater(retval[1][2], retval[1][9])
def testMultistepSingleValue(self):
classifier = self._classifier(steps=[1, 2])
retval = []
for recordNum in range(10):
retval = self._compute(classifier, recordNum=recordNum, pattern=[1, 5],
bucket=0, value=10)
# Only should return one actual value bucket.
self.assertEqual(retval["actualValues"], [10])
# # Should have a probability of 100% for that bucket.
self.assertEqual(retval[1], [1.])
self.assertEqual(retval[2], [1.])
def testMultistepSimple(self):
classifier = self._classifier(steps=[1, 2], alpha=10.0)
retval = []
recordNum = 0
for i in range(100):
retval = self._compute(classifier, recordNum=recordNum, pattern=[i % 10],
bucket=i % 10, value=(i % 10) * 10)
recordNum += 1
# Only should return one actual value bucket.
self.assertEqual(retval["actualValues"],
[0, 10, 20, 30, 40, 50, 60, 70, 80, 90])
self.assertGreater(retval[1][0], 0.99)
for i in xrange(1, 10):
self.assertLess(retval[1][i], 0.01)
self.assertGreater(retval[2][1], 0.99)
for i in [0] + range(2, 10):
self.assertLess(retval[2][i], 0.01)
def testMissingRecords(self):
""" Test missing record support.
Here, we intend the classifier to learn the associations:
[1,3,5] => bucketIdx 1
[2,4,6] => bucketIdx 2
[7,8,9] => don"t care
If it doesn"t pay attention to the recordNums in this test, it will learn the
wrong associations.
"""
c = self._classifier([1], 1.0, 0.1, 0)
recordNum = 0
c.compute(recordNum=recordNum, patternNZ=[1, 3, 5],
classification={"bucketIdx": 0, "actValue": 0},
learn=True, infer=True)
recordNum += 1
c.compute(recordNum=recordNum, patternNZ=[2, 4, 6],
classification={"bucketIdx": 1, "actValue": 1},
learn=True, infer=True)
recordNum += 1
c.compute(recordNum=recordNum, patternNZ=[1, 3, 5],
classification={"bucketIdx": 2, "actValue": 2},
learn=True, infer=True)
recordNum += 1
c.compute(recordNum=recordNum, patternNZ=[2, 4, 6],
classification={"bucketIdx": 1, "actValue": 1},
learn=True, infer=True)
recordNum += 1
# -----------------------------------------------------------------------
# At this point, we should have learned [1,3,5] => bucket 1
# [2,4,6] => bucket 2
result = c.compute(recordNum=recordNum, patternNZ=[1, 3, 5],
classification={"bucketIdx": 2, "actValue": 2},
learn=True, infer=True)
recordNum += 1
self.assertLess(result[1][0], 0.1)
self.assertGreater(result[1][1], 0.9)
self.assertLess(result[1][2], 0.1)
result = c.compute(recordNum=recordNum, patternNZ=[2, 4, 6],
classification={"bucketIdx": 1, "actValue": 1},
learn=True, infer=True)
recordNum += 1
self.assertLess(result[1][0], 0.1)
self.assertLess(result[1][1], 0.1)
self.assertGreater(result[1][2], 0.9)
# -----------------------------------------------------------------------
# Feed in records that skip and make sure they don"t mess up what we
# learned
# If we skip a record, the CLA should NOT learn that [2,4,6] from
# the previous learn associates with bucket 0
recordNum += 1
result = c.compute(recordNum=recordNum, patternNZ=[1, 3, 5],
classification={"bucketIdx": 0, "actValue": 0},
learn=True, infer=True)
recordNum += 1
self.assertLess(result[1][0], 0.1)
self.assertGreater(result[1][1], 0.9)
self.assertLess(result[1][2], 0.1)
# If we skip a record, the CLA should NOT learn that [1,3,5] from
# the previous learn associates with bucket 0
recordNum += 1
result = c.compute(recordNum=recordNum, patternNZ=[2, 4, 6],
classification={"bucketIdx": 0, "actValue": 0},
learn=True, infer=True)
recordNum += 1
self.assertLess(result[1][0], 0.1)
self.assertLess(result[1][1], 0.1)
self.assertGreater(result[1][2], 0.9)
# If we skip a record, the CLA should NOT learn that [2,4,6] from
# the previous learn associates with bucket 0
recordNum += 1
result = c.compute(recordNum=recordNum, patternNZ=[1, 3, 5],
classification={"bucketIdx": 0, "actValue": 0},
learn=True, infer=True)
recordNum += 1
self.assertLess(result[1][0], 0.1)
self.assertGreater(result[1][1], 0.9)
self.assertLess(result[1][2], 0.1)
def testMissingRecordInitialization(self):
"""
Test missing record edge TestCase
Test an edge case in the classifier initialization when there is a missing
record in the first n records, where n is the # of prediction steps.
"""
c = self._classifier([2], 0.1, 0.1, 0)
result = c.compute(
recordNum=0, patternNZ=[1, 5, 9],
classification={"bucketIdx": 0, "actValue": 34.7},
learn=True, infer=True)
result = c.compute(
recordNum=2, patternNZ=[1, 5, 9],
classification={"bucketIdx": 0, "actValue": 34.7},
learn=True, infer=True)
self.assertSetEqual(set(result.keys()), set(("actualValues", 2)))
self.assertEqual(len(result["actualValues"]), 1)
self.assertAlmostEqual(result["actualValues"][0], 34.7)
def testPredictionDistribution(self):
""" Test the distribution of predictions.
Here, we intend the classifier to learn the associations:
[1,3,5] => bucketIdx 0 (30%)
=> bucketIdx 1 (30%)
=> bucketIdx 2 (40%)
[2,4,6] => bucketIdx 1 (50%)
=> bucketIdx 3 (50%)
The classifier should get the distribution almost right given enough
repetitions and a small learning rate
"""
c = self._classifier([0], 0.001, 0.1, 0)
SDR1 = [1, 3, 5]
SDR2 = [2, 4, 6]
recordNum = 0
random.seed(42)
for _ in xrange(5000):
randomNumber = random.random()
if randomNumber < 0.3:
bucketIdx = 0
elif randomNumber < 0.6:
bucketIdx = 1
else:
bucketIdx = 2
c.compute(recordNum=recordNum, patternNZ=SDR1,
classification={"bucketIdx": bucketIdx, "actValue": bucketIdx},
learn=True, infer=False)
recordNum += 1
randomNumber = random.random()
if randomNumber < 0.5:
bucketIdx = 1
else:
bucketIdx = 3
c.compute(recordNum=recordNum, patternNZ=SDR2,
classification={"bucketIdx": bucketIdx, "actValue": bucketIdx},
learn=True, infer=False)
recordNum += 1
result1 = c.compute(
recordNum=recordNum, patternNZ=SDR1, classification=None,
learn=False, infer=True)
recordNum += 1
self.assertAlmostEqual(result1[0][0], 0.3, places=1)
self.assertAlmostEqual(result1[0][1], 0.3, places=1)
self.assertAlmostEqual(result1[0][2], 0.4, places=1)
result2 = c.compute(
recordNum=recordNum, patternNZ=SDR2, classification=None,
learn=False, infer=True)
recordNum += 1
self.assertAlmostEqual(result2[0][1], 0.5, places=1)
self.assertAlmostEqual(result2[0][3], 0.5, places=1)
def testPredictionDistributionOverlap(self):
""" Test the distribution of predictions with overlapping input SDRs
Here, we intend the classifier to learn the associations:
SDR1 => bucketIdx 0 (30%)
=> bucketIdx 1 (30%)
=> bucketIdx 2 (40%)
SDR2 => bucketIdx 1 (50%)
=> bucketIdx 3 (50%)
SDR1 and SDR2 has 10% overlaps (2 bits out of 20)
The classifier should get the distribution almost right despite the overlap
"""
c = self._classifier([0], 0.0005, 0.1, 0)
recordNum = 0
# generate 2 SDRs with 2 shared bits
SDR1 = numpy.arange(0, 39, step=2)
SDR2 = numpy.arange(1, 40, step=2)
SDR2[3] = SDR1[5]
SDR2[5] = SDR1[11]
random.seed(42)
for _ in xrange(5000):
randomNumber = random.random()
if randomNumber < 0.3:
bucketIdx = 0
elif randomNumber < 0.6:
bucketIdx = 1
else:
bucketIdx = 2
c.compute(recordNum=recordNum, patternNZ=SDR1,
classification={"bucketIdx": bucketIdx, "actValue": bucketIdx},
learn=True, infer=False)
recordNum += 1
randomNumber = random.random()
if randomNumber < 0.5:
bucketIdx = 1
else:
bucketIdx = 3
c.compute(recordNum=recordNum, patternNZ=SDR2,
classification={"bucketIdx": bucketIdx, "actValue": bucketIdx},
learn=True, infer=False)
recordNum += 1
result1 = c.compute(
recordNum=recordNum, patternNZ=SDR1, classification=None,
learn=False, infer=True)
recordNum += 1
self.assertAlmostEqual(result1[0][0], 0.3, places=1)
self.assertAlmostEqual(result1[0][1], 0.3, places=1)
self.assertAlmostEqual(result1[0][2], 0.4, places=1)
result2 = c.compute(
recordNum=recordNum, patternNZ=SDR2, classification=None,
learn=False, infer=True)
recordNum += 1
self.assertAlmostEqual(result2[0][1], 0.5, places=1)
self.assertAlmostEqual(result2[0][3], 0.5, places=1)
def testPredictionMultipleCategories(self):
""" Test the distribution of predictions.
Here, we intend the classifier to learn the associations:
[1,3,5] => bucketIdx 0 & 1
[2,4,6] => bucketIdx 2 & 3
The classifier should get the distribution almost right given enough
repetitions and a small learning rate
"""
c = self._classifier([0], 0.001, 0.1, 0)
SDR1 = [1, 3, 5]
SDR2 = [2, 4, 6]
recordNum = 0
random.seed(42)
for _ in xrange(5000):
c.compute(recordNum=recordNum, patternNZ=SDR1,
classification={"bucketIdx": [0, 1], "actValue": [0, 1]},
learn=True, infer=False)
recordNum += 1
c.compute(recordNum=recordNum, patternNZ=SDR2,
classification={"bucketIdx": [2, 3], "actValue": [2, 3]},
learn=True, infer=False)
recordNum += 1
result1 = c.compute(
recordNum=recordNum, patternNZ=SDR1, classification=None,
learn=False, infer=True)
recordNum += 1
self.assertAlmostEqual(result1[0][0], 0.5, places=1)
self.assertAlmostEqual(result1[0][1], 0.5, places=1)
result2 = c.compute(
recordNum=recordNum, patternNZ=SDR2, classification=None,
learn=False, infer=True)
recordNum += 1
self.assertAlmostEqual(result2[0][2], 0.5, places=1)
self.assertAlmostEqual(result2[0][3], 0.5, places=1)
def testPredictionDistributionContinuousLearning(self):
""" Test continuous learning
First, we intend the classifier to learn the associations:
SDR1 => bucketIdx 0 (30%)
=> bucketIdx 1 (30%)
=> bucketIdx 2 (40%)
SDR2 => bucketIdx 1 (50%)
=> bucketIdx 3 (50%)
After 20000 iterations, we change the association to
SDR1 => bucketIdx 0 (30%)
=> bucketIdx 1 (20%)
=> bucketIdx 3 (40%)
No further training for SDR2
The classifier should adapt continuously and learn new associations for
SDR1, but at the same time remember the old association for SDR2
"""
c = self._classifier([0], 0.001, 0.1, 0)
recordNum = 0
SDR1 = [1, 3, 5]
SDR2 = [2, 4, 6]
random.seed(42)
for _ in xrange(10000):
randomNumber = random.random()
if randomNumber < 0.3:
bucketIdx = 0
elif randomNumber < 0.6:
bucketIdx = 1
else:
bucketIdx = 2
c.compute(recordNum=recordNum, patternNZ=SDR1,
classification={"bucketIdx": bucketIdx, "actValue": bucketIdx},
learn=True, infer=False)
recordNum += 1
randomNumber = random.random()
if randomNumber < 0.5:
bucketIdx = 1
else:
bucketIdx = 3
c.compute(recordNum=recordNum, patternNZ=SDR2,
classification={"bucketIdx": bucketIdx, "actValue": bucketIdx},
learn=True, infer=True)
recordNum += 1
result1 = c.compute(
recordNum=recordNum, patternNZ=SDR1,
classification={"bucketIdx": 0, "actValue": 0},
learn=False, infer=True)
recordNum += 1
self.assertAlmostEqual(result1[0][0], 0.3, places=1)
self.assertAlmostEqual(result1[0][1], 0.3, places=1)
self.assertAlmostEqual(result1[0][2], 0.4, places=1)
result2 = c.compute(
recordNum=recordNum, patternNZ=SDR2,
classification={"bucketIdx": 0, "actValue": 0},
learn=False, infer=True)
recordNum += 1
self.assertAlmostEqual(result2[0][1], 0.5, places=1)
self.assertAlmostEqual(result2[0][3], 0.5, places=1)
for _ in xrange(20000):
randomNumber = random.random()
if randomNumber < 0.3:
bucketIdx = 0
elif randomNumber < 0.6:
bucketIdx = 1
else:
bucketIdx = 3
c.compute(recordNum=recordNum, patternNZ=SDR1,
classification={"bucketIdx": bucketIdx, "actValue": bucketIdx},
learn=True, infer=False)
recordNum += 1
result1new = c.compute(
recordNum=recordNum, patternNZ=SDR1, classification=None,
learn=False, infer=True)
recordNum += 1
self.assertAlmostEqual(result1new[0][0], 0.3, places=1)
self.assertAlmostEqual(result1new[0][1], 0.3, places=1)
self.assertAlmostEqual(result1new[0][3], 0.4, places=1)
result2new = c.compute(
recordNum=recordNum, patternNZ=SDR2, classification=None,
learn=False, infer=True)
recordNum += 1
self.assertSequenceEqual(list(result2[0]), list(result2new[0]))
def testMultiStepPredictions(self):
""" Test multi-step predictions
We train the 0-step and the 1-step classifiers simultaneously on
data stream
(SDR1, bucketIdx0)
(SDR2, bucketIdx1)
(SDR1, bucketIdx0)
(SDR2, bucketIdx1)
...
We intend the 0-step classifier to learn the associations:
SDR1 => bucketIdx 0
SDR2 => bucketIdx 1
and the 1-step classifier to learn the associations
SDR1 => bucketIdx 1
SDR2 => bucketIdx 0
"""
c = self._classifier([0, 1], 1.0, 0.1, 0)
SDR1 = [1, 3, 5]
SDR2 = [2, 4, 6]
recordNum = 0
for _ in xrange(100):
c.compute(recordNum=recordNum, patternNZ=SDR1,
classification={"bucketIdx": 0, "actValue": 0},
learn=True, infer=False)
recordNum += 1
c.compute(recordNum=recordNum, patternNZ=SDR2,
classification={"bucketIdx": 1, "actValue": 1},
learn=True, infer=False)
recordNum += 1
result1 = c.compute(
recordNum=recordNum, patternNZ=SDR1, classification=None,
learn=False, infer=True)
result2 = c.compute(
recordNum=recordNum, patternNZ=SDR2, classification=None,
learn=False, infer=True)
self.assertAlmostEqual(result1[0][0], 1.0, places=1)
self.assertAlmostEqual(result1[0][1], 0.0, places=1)
self.assertAlmostEqual(result2[0][0], 0.0, places=1)
self.assertAlmostEqual(result2[0][1], 1.0, places=1)
def testSoftMaxOverflow(self):
"""
Test if the softmax normalization overflows
"""
c = SDRClassifier([1], 1.0, 0.1, 0)
weight = numpy.array([[sys.float_info.max_exp + 1]])
res = c.inferSingleStep([0], weight)
self.assertFalse(numpy.isnan(res), "SoftMax overflow")
def _doWriteReadChecks(self, computeBeforeSerializing):
c1 = SDRClassifier([0], 0.1, 0.1, 0)
# Create a vector of input bit indices
input1 = [1, 5, 9]
if computeBeforeSerializing:
result = c1.compute(recordNum=0,
patternNZ=input1,
classification={'bucketIdx': 4, 'actValue': 34.7},
learn=True, infer=True)
proto1 = SdrClassifier_capnp.SdrClassifierProto.new_message()
c1.write(proto1)
# Write the proto to a temp file and read it back into a new proto
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = SdrClassifier_capnp.SdrClassifierProto.read(f)
# Load the deserialized proto
c2 = SDRClassifier.read(proto2)
self.assertEqual(c1.steps, c2.steps)
self.assertEqual(c1._maxSteps, c2._maxSteps)
self.assertAlmostEqual(c1.alpha, c2.alpha)
self.assertAlmostEqual(c1.actValueAlpha, c2.actValueAlpha)
self.assertEqual(c1._patternNZHistory, c2._patternNZHistory)
self.assertEqual(c1._weightMatrix.keys(), c2._weightMatrix.keys())
for step in c1._weightMatrix.keys():
c1Weight = c1._weightMatrix[step]
c2Weight = c2._weightMatrix[step]
self.assertSequenceEqual(list(c1Weight.flatten()),
list(c2Weight.flatten()))
self.assertEqual(c1._maxBucketIdx, c2._maxBucketIdx)
self.assertEqual(c1._maxInputIdx, c2._maxInputIdx)
self.assertEqual(len(c1._actualValues), len(c2._actualValues))
for i in xrange(len(c1._actualValues)):
self.assertAlmostEqual(c1._actualValues[i], c2._actualValues[i], 5)
self.assertEqual(c1._version, c2._version)
self.assertEqual(c1.verbosity, c2.verbosity)
# NOTE: the previous step's actual values determine the size of lists in
# results
expectedActualValuesLen = len(c1._actualValues)
result1 = c1.compute(recordNum=1,
patternNZ=input1,
classification={'bucketIdx': 4, 'actValue': 34.7},
learn=True, infer=True)
result2 = c2.compute(recordNum=1,
patternNZ=input1,
classification={'bucketIdx': 4, 'actValue': 34.7},
learn=True, infer=True)
self.assertEqual(result1.keys(), result2.keys())
for key in result1.keys():
self.assertEqual(len(result1[key]), len(result2[key]))
self.assertEqual(len(result1[key]), expectedActualValuesLen)
for i in xrange(expectedActualValuesLen):
self.assertAlmostEqual(result1[key][i], result2[key][i], 5)
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testWriteRead(self):
self._doWriteReadChecks(computeBeforeSerializing=True)
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testWriteReadNoComputeBeforeSerializing(self):
self._doWriteReadChecks(computeBeforeSerializing=False)
def test_pFormatArray(self):
from nupic.algorithms.sdr_classifier import _pFormatArray
pretty = _pFormatArray(range(10))
self.assertIsInstance(pretty, basestring)
self.assertEqual(pretty[0], "[")
self.assertEqual(pretty[-1], "]")
self.assertEqual(len(pretty.split(" ")), 12)
def _checkValue(self, retval, index, value):
self.assertEqual(retval["actualValues"][index], value)
@staticmethod
def _compute(classifier, recordNum, pattern, bucket, value):
classification = {"bucketIdx": bucket, "actValue": value}
return classifier.compute(recordNum, pattern, classification, True, True)
if __name__ == "__main__":
unittest.main()
| 31,949 | Python | .py | 730 | 35.806849 | 81 | 0.626526 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,109 | backtracking_tm_constant_test.py | numenta_nupic-legacy/tests/unit/nupic/algorithms/backtracking_tm_constant_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file tests that we can learn and predict the particularly vexing case of a
single constant signal!
"""
import numpy as np
import unittest2 as unittest
from nupic.algorithms import fdrutilities as fdrutils
from nupic.algorithms.backtracking_tm import BacktrackingTM
from nupic.algorithms.backtracking_tm_cpp import BacktrackingTMCPP
_SEED = 42
VERBOSITY = 1
np.random.seed(_SEED)
def _printOneTrainingVector(x):
"Print a single vector succinctly."
print ''.join('1' if k != 0 else '.' for k in x)
def _getSimplePatterns(numOnes, numPatterns):
"""Very simple patterns. Each pattern has numOnes consecutive
bits on. There are numPatterns*numOnes bits in the vector. These patterns
are used as elements of sequences when building up a training set."""
numCols = numOnes * numPatterns
p = []
for i in xrange(numPatterns):
x = np.zeros(numCols, dtype='float32')
x[i*numOnes:(i + 1)*numOnes] = 1
p.append(x)
return p
def _createTms(numCols):
"""Create two instances of temporal poolers (backtracking_tm.py
and backtracking_tm_cpp.py) with identical parameter settings."""
# Keep these fixed:
minThreshold = 4
activationThreshold = 5
newSynapseCount = 7
initialPerm = 0.3
connectedPerm = 0.5
permanenceInc = 0.1
permanenceDec = 0.05
globalDecay = 0
cellsPerColumn = 1
cppTm = BacktrackingTMCPP(numberOfCols=numCols,
cellsPerColumn=cellsPerColumn,
initialPerm=initialPerm,
connectedPerm=connectedPerm,
minThreshold=minThreshold,
newSynapseCount=newSynapseCount,
permanenceInc=permanenceInc,
permanenceDec=permanenceDec,
activationThreshold=activationThreshold,
globalDecay=globalDecay, burnIn=1,
seed=_SEED, verbosity=VERBOSITY,
checkSynapseConsistency=True,
pamLength=1000)
# Ensure we are copying over learning states for TMDiff
cppTm.retrieveLearningStates = True
pyTm = BacktrackingTM(numberOfCols=numCols,
cellsPerColumn=cellsPerColumn,
initialPerm=initialPerm,
connectedPerm=connectedPerm,
minThreshold=minThreshold,
newSynapseCount=newSynapseCount,
permanenceInc=permanenceInc,
permanenceDec=permanenceDec,
activationThreshold=activationThreshold,
globalDecay=globalDecay, burnIn=1,
seed=_SEED, verbosity=VERBOSITY,
pamLength=1000)
return cppTm, pyTm
class TMConstantTest(unittest.TestCase):
def setUp(self):
self.cppTm, self.pyTm = _createTms(100)
def _basicTest(self, tm=None):
"""Test creation, pickling, and basic run of learning and inference."""
trainingSet = _getSimplePatterns(10, 10)
# Learn on several constant sequences, with a reset in between
for _ in range(2):
for seq in trainingSet[0:5]:
for _ in range(10):
tm.learn(seq)
tm.reset()
print "Learning completed"
# Infer
print "Running inference"
tm.collectStats = True
for seq in trainingSet[0:5]:
tm.reset()
tm.resetStats()
for _ in range(10):
tm.infer(seq)
if VERBOSITY > 1 :
print
_printOneTrainingVector(seq)
tm.printStates(False, False)
print
print
if VERBOSITY > 1:
print tm.getStats()
# Ensure our predictions are accurate for each sequence
self.assertGreater(tm.getStats()['predictionScoreAvg2'], 0.8)
print ("tm.getStats()['predictionScoreAvg2'] = ",
tm.getStats()['predictionScoreAvg2'])
print "TMConstant basicTest ok"
def testCppTmBasic(self):
self._basicTest(self.cppTm)
def testPyTmBasic(self):
self._basicTest(self.pyTm)
def testIdenticalTms(self):
self.assertTrue(fdrutils.tmDiff2(self.cppTm, self.pyTm))
if __name__=="__main__":
unittest.main()
| 5,242 | Python | .py | 129 | 32.403101 | 79 | 0.644195 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,110 | sp_overlap_test.py | numenta_nupic-legacy/tests/unit/nupic/algorithms/sp_overlap_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This is a legacy test from trunk and may replicate spatial pooler tests.
The allocation of cells to new patterns is explored. After all the cells
have been allocated, cells must be reused. This test makes sure that the
allocation of new cells is such that we achieve maximum generality and
predictive power.
Note: Since the sp pooler has 2048 cells with a sparsity of 40 cells active
per iteration, 100% allocation is reached at the 51st unique pattern.
"""
import unittest2 as unittest
import random as rnd
import time
import numpy
from nupic.bindings.math import GetNTAReal
from nupic.encoders import scalar
from nupic.bindings.algorithms import SpatialPooler
realDType = GetNTAReal()
SEED = 42
class TestSPFrequency(unittest.TestCase):
def testCategory(self):
"""Test that the most frequent possible option is chosen for a scalar
encoded field """
self.frequency(n=100, w=21, seed=SEED, numColors=90, encoder = 'scalar')
def testScalar(self):
"""Test that the most frequent possible option is chosen for a category
encoded field """
self.frequency(n=30, w=21, seed=SEED, numColors=90, encoder = 'category')
@unittest.skip("Not working...")
def testScalarLong(self):
"""Test that the most frequent possible option is chosen for a scalar
encoded field. Run through many different numbers of patterns and random
seeds"""
for n in [52, 70, 80, 90, 100, 110]:
self.frequency(n=100, w=21, seed=SEED, numColors=n, encoder='scalar')
@unittest.skip("Not working...")
def testCategoryLong(self):
"""Test that the most frequent possible option is chosen for a category
encoded field. Run through many different numbers of patterns and random
seeds"""
for n in [52, 70, 80, 90, 100, 110]:
self.frequency(n=100, w=21, seed=SEED, numColors=n)
def frequency(self,
n=15,
w=7,
columnDimensions = 2048,
numActiveColumnsPerInhArea = 40,
stimulusThreshold = 0,
spSeed = 1,
spVerbosity = 0,
numColors = 2,
seed=42,
minVal=0,
maxVal=10,
encoder = 'category',
forced=True):
""" Helper function that tests whether the SP predicts the most
frequent record """
print "\nRunning SP overlap test..."
print encoder, 'encoder,', 'Random seed:', seed, 'and', numColors, 'colors'
#Setting up SP and creating training patterns
# Instantiate Spatial Pooler
spImpl = SpatialPooler(
columnDimensions=(columnDimensions, 1),
inputDimensions=(1, n),
potentialRadius=n/2,
numActiveColumnsPerInhArea=numActiveColumnsPerInhArea,
spVerbosity=spVerbosity,
stimulusThreshold=stimulusThreshold,
potentialPct=0.5,
seed=spSeed,
globalInhibition=True,
)
rnd.seed(seed)
numpy.random.seed(seed)
colors = []
coincs = []
reUsedCoincs = []
spOutput = []
patterns = set([])
# Setting up the encodings
if encoder=='scalar':
enc = scalar.ScalarEncoder(name='car', w=w, n=n, minval=minVal,
maxval=maxVal, periodic=False, forced=True) # forced: it's strongly recommended to use w>=21, in the example we force skip the check for readibility
for y in xrange(numColors):
temp = enc.encode(rnd.random()*maxVal)
colors.append(numpy.array(temp, dtype=numpy.uint32))
else:
for y in xrange(numColors):
sdr = numpy.zeros(n, dtype=numpy.uint32)
# Randomly setting w out of n bits to 1
sdr[rnd.sample(xrange(n), w)] = 1
colors.append(sdr)
# Training the sp
print 'Starting to train the sp on', numColors, 'patterns'
startTime = time.time()
for i in xrange(numColors):
# TODO: See https://github.com/numenta/nupic/issues/2072
spInput = colors[i]
onCells = numpy.zeros(columnDimensions, dtype=numpy.uint32)
spImpl.compute(spInput, True, onCells)
spOutput.append(onCells.tolist())
activeCoincIndices = set(onCells.nonzero()[0])
# Checking if any of the active cells have been previously active
reUsed = activeCoincIndices.intersection(patterns)
if len(reUsed) == 0:
# The set of all coincidences that have won at least once
coincs.append((i, activeCoincIndices, colors[i]))
else:
reUsedCoincs.append((i, activeCoincIndices, colors[i]))
# Adding the active cells to the set of coincs that have been active at
# least once
patterns.update(activeCoincIndices)
if (i + 1) % 100 == 0:
print 'Record number:', i + 1
print "Elapsed time: %.2f seconds" % (time.time() - startTime)
print len(reUsedCoincs), "re-used coinc(s),"
# Check if results match expectations
summ = []
for z in coincs:
summ.append(sum([len(z[1].intersection(y[1])) for y in reUsedCoincs]))
zeros = len([x for x in summ if x==0])
factor = max(summ)*len(summ)/sum(summ)
if len(reUsed) < 10:
self.assertLess(factor, 41,
"\nComputed factor: %d\nExpected Less than %d" % (
factor, 41))
self.assertLess(zeros, 0.99*len(summ),
"\nComputed zeros: %d\nExpected Less than %d" % (
zeros, 0.99*len(summ)))
else:
self.assertLess(factor, 8,
"\nComputed factor: %d\nExpected Less than %d" % (
factor, 8))
self.assertLess(zeros, 12,
"\nComputed zeros: %d\nExpected Less than %d" % (
zeros, 12))
def hammingDistance(s1, s2):
assert len(s1) == len(s2)
return sum(ch1 != ch2 for ch1, ch2 in zip(s1, s2))
if __name__ == '__main__':
unittest.main()
| 7,074 | Python | .py | 161 | 35.658385 | 181 | 0.627765 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,111 | spatial_pooler_cpp_api_test.py | numenta_nupic-legacy/tests/unit/nupic/algorithms/spatial_pooler_cpp_api_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import unittest2 as unittest
from nupic.bindings.algorithms import SpatialPooler as CPPSpatialPooler
import spatial_pooler_py_api_test
spatial_pooler_py_api_test.SpatialPooler = CPPSpatialPooler
SpatialPoolerCPPAPITest = spatial_pooler_py_api_test.SpatialPoolerAPITest
if __name__ == "__main__":
unittest.main()
| 1,296 | Python | .py | 27 | 46.703704 | 73 | 0.705463 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,112 | trace_test.py | numenta_nupic-legacy/tests/unit/nupic/algorithms/monitor_mixin/trace_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import unittest
from nupic.algorithms.monitor_mixin.trace import IndicesTrace
class IndicesTraceTest(unittest.TestCase):
def setUp(self):
self.trace = IndicesTrace(self, "active cells")
self.trace.data.append(set([1, 2, 3]))
self.trace.data.append(set([4, 5]))
self.trace.data.append(set([6]))
self.trace.data.append(set([]))
def testMakeCountsTrace(self):
countsTrace = self.trace.makeCountsTrace()
self.assertEqual(countsTrace.title, "# active cells")
self.assertEqual(countsTrace.data, [3, 2, 1, 0])
def testMakeCumCountsTrace(self):
countsTrace = self.trace.makeCumCountsTrace()
self.assertEqual(countsTrace.title, "# (cumulative) active cells")
self.assertEqual(countsTrace.data, [3, 5, 6, 6])
if __name__ == '__main__':
unittest.main()
| 1,784 | Python | .py | 39 | 43.051282 | 72 | 0.690352 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,113 | metric_test.py | numenta_nupic-legacy/tests/unit/nupic/algorithms/monitor_mixin/metric_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import unittest
from nupic.algorithms.monitor_mixin.metric import Metric
from nupic.algorithms.monitor_mixin.trace import CountsTrace, BoolsTrace
class MetricTest(unittest.TestCase):
def setUp(self):
self.trace = CountsTrace(self, "# active cells")
self.trace.data = [1, 2, 3, 4, 5, 0]
def testCreateFromTrace(self):
metric = Metric.createFromTrace(self.trace)
self.assertEqual(metric.title, self.trace.title)
self.assertEqual(metric.min, 0)
self.assertEqual(metric.max, 5)
self.assertEqual(metric.sum, 15)
self.assertEqual(metric.mean, 2.5)
self.assertEqual(metric.standardDeviation, 1.707825127659933)
def testCreateFromTraceExcludeResets(self):
resetTrace = BoolsTrace(self, "resets")
resetTrace.data = [True, False, False, True, False, False]
metric = Metric.createFromTrace(self.trace, excludeResets=resetTrace)
self.assertEqual(metric.title, self.trace.title)
self.assertEqual(metric.min, 0)
self.assertEqual(metric.max, 5)
self.assertEqual(metric.sum, 10)
self.assertEqual(metric.mean, 2.5)
self.assertEqual(metric.standardDeviation, 1.8027756377319946)
if __name__ == '__main__':
unittest.main()
| 2,174 | Python | .py | 47 | 43.255319 | 73 | 0.713677 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,114 | object_json_test.py | numenta_nupic-legacy/tests/unit/nupic/support/object_json_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for object_json module."""
import datetime
import StringIO
from nupic.data.inference_shifter import InferenceShifter
from nupic.swarming.hypersearch import object_json as json
from nupic.support.unittesthelpers.testcasebase import (TestCaseBase,
unittest)
class TestObjectJson(TestCaseBase):
"""Unit tests for object_json module."""
def testPrimitives(self):
self.assertEqual(json.loads(json.dumps(None)), None)
self.assertEqual(json.loads(json.dumps(True)), True)
self.assertEqual(json.loads(json.dumps(False)), False)
self.assertEqual(json.loads(json.dumps(-5)), -5)
self.assertEqual(json.loads(json.dumps(0)), 0)
self.assertEqual(json.loads(json.dumps(5)), 5)
self.assertEqual(json.loads(json.dumps(7.7)), 7.7)
self.assertEqual(json.loads(json.dumps('hello')), 'hello')
self.assertEqual(json.loads(json.dumps(5L)), 5L)
self.assertEqual(json.loads(json.dumps(u'hello')), u'hello')
self.assertEqual(json.loads(json.dumps([5, 6, 7])), [5, 6, 7])
self.assertEqual(json.loads(json.dumps({'5': 6, '7': 8})), {'5': 6, '7': 8})
def testDates(self):
d = datetime.date(year=2012, month=9, day=25)
serialized = json.dumps(d)
self.assertEqual(serialized,
'{"py/object": "datetime.date", '
'"py/repr": "datetime.date(2012, 9, 25)"}')
deserialized = json.loads(serialized)
self.assertEqual(type(deserialized), datetime.date)
self.assertEqual(deserialized.isoformat(), d.isoformat())
def testDatetimes(self):
d = datetime.datetime(year=2012, month=9, day=25, hour=14, minute=33,
second=8, microsecond=455969)
serialized = json.dumps(d)
self.assertEqual(serialized,
'{"py/object": "datetime.datetime", "py/repr": '
'"datetime.datetime(2012, 9, 25, 14, 33, 8, 455969)"}')
deserialized = json.loads(serialized)
self.assertEqual(type(deserialized), datetime.datetime)
self.assertEqual(deserialized.isoformat(), d.isoformat())
def testDumpsTuple(self):
self.assertEqual(json.dumps((5, 6, 7)), '{"py/tuple": [5, 6, 7]}')
def testTuple(self):
self.assertTupleEqual(json.loads(json.dumps((5, 6, 7))), (5, 6, 7))
def testComplex(self):
self.assertEqual(json.loads(json.dumps(2 + 1j)), 2 + 1j)
def testBasicDumps(self):
d = {'a': 1, 'b': {'c': 2}}
s = json.dumps(d, sort_keys=True)
self.assertEqual(s, '{"a": 1, "b": {"c": 2}}')
def testDumpsWithIndent(self):
d = {'a': 1, 'b': {'c': 2}}
s = json.dumps(d, indent=2, sort_keys=True)
self.assertEqual(s, '{\n "a": 1,\n "b": {\n "c": 2\n }\n}')
def testDump(self):
d = {'a': 1, 'b': {'c': 2}}
f = StringIO.StringIO()
json.dump(d, f)
self.assertEqual(f.getvalue(), '{"a": 1, "b": {"c": 2}}')
def testLoads(self):
s = '{"a": 1, "b": {"c": 2}}'
d = json.loads(s)
self.assertDictEqual(d, {'a': 1, 'b': {'c': 2}})
def testLoadsWithIndent(self):
s = '{\n "a": 1,\n "b": {\n "c": 2\n }\n}'
d = json.loads(s)
self.assertDictEqual(d, {'a': 1, 'b': {'c': 2}})
def testLoad(self):
f = StringIO.StringIO('{"a": 1, "b": {"c": 2}}')
d = json.load(f)
self.assertDictEqual(d, {'a': 1, 'b': {'c': 2}})
def testNonStringKeys(self):
original = {1.1: 1, 5: {(7, 8, 9): {1.1: 5}}}
result = json.loads(json.dumps(original))
self.assertEqual(original, result)
def testDumpsObject(self):
testClass = InferenceShifter()
testClass.a = 5
testClass.b = {'b': (17,)}
encoded = json.dumps(testClass, sort_keys=True)
self.assertEqual(
encoded,
'{"_inferenceBuffer": null, "a": 5, "b": {"b": {"py/tuple": [17]}}, '
'"py/object": "nupic.data.inference_shifter.InferenceShifter"}')
def testObjectWithNonStringKeys(self):
testClass = InferenceShifter()
testClass.a = 5
testClass.b = {(4, 5): (17,)}
encoded = json.dumps(testClass, sort_keys=True)
self.assertEqual(
encoded,
'{"_inferenceBuffer": null, "a": 5, "b": {"py/dict/keys": '
'["{\\"py/tuple\\": [4, 5]}"], "{\\"py/tuple\\": [4, 5]}": '
'{"py/tuple": [17]}}, "py/object": '
'"nupic.data.inference_shifter.InferenceShifter"}')
decoded = json.loads(encoded)
self.assertEqual(decoded.a, 5)
self.assertEqual(type(decoded.b), dict)
self.assertEqual(len(decoded.b.keys()), 1)
self.assertTupleEqual(decoded.b.keys()[0], (4, 5))
self.assertTupleEqual(decoded.b[(4, 5)], (17,))
if __name__ == '__main__':
unittest.main()
| 5,608 | Python | .py | 124 | 39.822581 | 80 | 0.618089 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,115 | group_by_test.py | numenta_nupic-legacy/tests/unit/nupic/support/group_by_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import unittest
from nupic.support.group_by import groupby2
""" File to test src/nupic/support/group_by.py """
class GroupByTest(unittest.TestCase):
def testOneSequence(self):
sequence0 = [7, 12, 12, 16]
identity = lambda x: int(x)
expectedValues = [(7, [7]),
(12, [12, 12]),
(16, [16])]
i = 0
for data in groupby2(sequence0, identity):
self.assertEqual(data[0], expectedValues[i][0])
for j in xrange(1, len(data)):
temp = list(data[j]) if data[j] else data[j]
self.assertEqual(temp, expectedValues[i][j])
i += 1
def testTwoSequences(self):
sequence0 = [7, 12, 16]
sequence1 = [3, 4, 5]
identity = lambda x: int(x)
times3 = lambda x: int(3 * x)
expectedValues = [(7, [7], None),
(9, None, [3]),
(12, [12], [4]),
(15, None, [5]),
(16, [16], None)]
i = 0
for data in groupby2(sequence0, identity,
sequence1, times3):
self.assertEqual(data[0], expectedValues[i][0])
for j in xrange(1, len(data)):
temp = list(data[j]) if data[j] else data[j]
self.assertEqual(temp, expectedValues[i][j])
i += 1
def testThreeSequences(self):
sequence0 = [7, 12, 16]
sequence1 = [3, 4, 5]
sequence2 = [3, 3, 4, 5]
identity = lambda x: int(x)
times3 = lambda x: int(3 * x)
times4 = lambda x: int(4 * x)
expectedValues = [(7, [7], None, None),
(9, None, [3], None),
(12, [12], [4], [3, 3]),
(15, None, [5], None),
(16, [16], None, [4]),
(20, None, None, [5])]
i = 0
for data in groupby2(sequence0, identity,
sequence1, times3,
sequence2, times4):
self.assertEqual(data[0], expectedValues[i][0])
for j in xrange(1, len(data)):
temp = list(data[j]) if data[j] else data[j]
self.assertEqual(temp, expectedValues[i][j])
i += 1
def testFourSequences(self):
sequence0 = [7, 12, 16]
sequence1 = [3, 4, 5]
sequence2 = [3, 3, 4, 5]
sequence3 = [3, 3, 4, 5]
identity = lambda x: int(x)
times3 = lambda x: int(3 * x)
times4 = lambda x: int(4 * x)
times5 = lambda x: int(5 * x)
expectedValues = [(7, [7], None, None, None),
(9, None, [3], None, None),
(12, [12], [4], [3, 3], None),
(15, None, [5], None, [3, 3]),
(16, [16], None, [4], None),
(20, None, None, [5], [4]),
(25, None, None, None, [5])]
i = 0
for data in groupby2(sequence0, identity,
sequence1, times3,
sequence2, times4,
sequence3, times5):
self.assertEqual(data[0], expectedValues[i][0])
for j in xrange(1, len(data)):
temp = list(data[j]) if data[j] else data[j]
self.assertEqual(temp, expectedValues[i][j])
i += 1
def testFiveSequences(self):
sequence0 = [7, 12, 16]
sequence1 = [3, 4, 5]
sequence2 = [3, 3, 4, 5]
sequence3 = [3, 3, 4, 5]
sequence4 = [2, 2, 3]
identity = lambda x: int(x)
times3 = lambda x: int(3 * x)
times4 = lambda x: int(4 * x)
times5 = lambda x: int(5 * x)
times6 = lambda x: int(6 * x)
expectedValues = [(7, [7], None, None, None, None),
(9, None, [3], None, None, None),
(12, [12], [4], [3, 3], None, [2, 2]),
(15, None, [5], None, [3, 3], None),
(16, [16], None, [4], None, None),
(18, None, None, None, None, [3]),
(20, None, None, [5], [4], None),
(25, None, None, None, [5], None)]
i = 0
for data in groupby2(sequence0, identity,
sequence1, times3,
sequence2, times4,
sequence3, times5,
sequence4, times6):
self.assertEqual(data[0], expectedValues[i][0])
for j in xrange(1, len(data)):
temp = list(data[j]) if data[j] else data[j]
self.assertEqual(temp, expectedValues[i][j])
i += 1
def testNone(self):
sequence0 = None
sequence1 = [3, 4, 5]
sequence2 = None
sequence3 = [3, 3, 4, 5]
sequence4 = None
identity = lambda x: int(x)
times3 = lambda x: int(3 * x)
times4 = lambda x: int(4 * x)
times5 = lambda x: int(5 * x)
times6 = lambda x: int(6 * x)
expectedValues = [(9, None, [3], None, None, None),
(12, None, [4], None, None, None),
(15, None, [5], None, [3, 3], None),
(20, None, None, None, [4], None),
(25, None, None, None, [5], None)]
i = 0
for data in groupby2(sequence0, identity,
sequence1, times3,
sequence2, times4,
sequence3, times5,
sequence4, times6):
self.assertEqual(data[0], expectedValues[i][0])
for j in xrange(1, len(data)):
temp = list(data[j]) if data[j] else data[j]
self.assertEqual(temp, expectedValues[i][j])
i += 1
if __name__ == "__main__":
unittest.main()
| 6,474 | Python | .py | 162 | 30.012346 | 72 | 0.509716 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,116 | custom_configuration_test.py | numenta_nupic-legacy/tests/unit/nupic/support/custom_configuration_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from copy import copy
import os
import shutil
from StringIO import StringIO
import sys
import tempfile
import unittest2 as unittest
import uuid
from pkg_resources import resource_filename
from mock import Mock, patch
from pkg_resources import resource_filename
from xml.parsers.expat import ExpatError
# ParseError not present in xml module for python2.6
try:
from xml.etree.ElementTree import ParseError
except ImportError:
from xml.parsers.expat import ExpatError as ParseError
import nupic
import nupic.support.configuration_custom as configuration
import configuration_test
class ConfigurationCustomTest(unittest.TestCase):
def setUp(self):
if "NTA_DYNAMIC_CONF_DIR" in os.environ:
# Remove it to make sure our in-proc tests won't accidentally
# mess with actual files
oldNtaDynamicConfDir = os.environ["NTA_DYNAMIC_CONF_DIR"]
del os.environ["NTA_DYNAMIC_CONF_DIR"]
self.addCleanup(os.environ.update,
dict(NTA_DYNAMIC_CONF_DIR=oldNtaDynamicConfDir))
self.files = dict()
tmpDir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpDir)
with open(os.path.join(tmpDir, 'nupic-default.xml-unittest'), 'w') as fp:
with open(resource_filename(__name__, 'conf/nupic-default.xml')) as inp:
fp.write(inp.read())
self.files['nupic-default.xml'] = fp.name
with open(os.path.join(tmpDir, 'nupic-site.xml-unittest'), 'w') as fp:
with open(resource_filename(__name__, 'conf/nupic-site.xml')) as inp:
fp.write(inp.read())
self.files['nupic-site.xml'] = fp.name
with open(os.path.join(tmpDir, 'nupic-custom.xml'), 'w') as fp:
with open(resource_filename(__name__, 'conf/nupic-custom.xml')) as inp:
fp.write(inp.read())
self.files['nupic-custom.xml'] = fp.name
self.customParam = 'nupic.custom.hello'
self.customValue = 'world'
configuration.Configuration.clear()
####################################################################
# Custom Configuration Tests
# Todo: Share tests between two configuration_test files
####################################################################
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testCustomFileCreated(self, findConfigFile, environ):
environ.__getitem__.side_effect = dict(
NTA_DYNAMIC_CONF_DIR=os.path.dirname(self.files['nupic-custom.xml'])).get
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.setCustomProperty('param', 'val')
self.assertTrue(os.path.exists(self.files['nupic-custom.xml']))
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGet(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-custom.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>'+self.customParam+'</name>',
' <value>'+self.customValue+'</value>',
' </property>',
'</configuration>')))
self.assertEqual(
configuration.Configuration.get(self.customParam),
self.customValue)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testSetCustomProperty(self, findConfigFile, environ):
environ.__getitem__.side_effect = dict(
NTA_DYNAMIC_CONF_DIR=os.path.dirname(self.files['nupic-custom.xml'])).get
environ.get.return_value = None
configuration.Configuration.clear()
findConfigFile.side_effect = self.files.get
with open(self.files['nupic-custom.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>' + self.customParam + '</name>',
' <value>' + self.customValue + '</value>',
' </property>',
'</configuration>')))
configuration.Configuration.setCustomProperty('PersistProp', 'PersistVal')
self.assertEqual(
configuration.Configuration.get('PersistProp'),'PersistVal')
configuration.Configuration.clear()
self.assertEqual(
configuration.Configuration.get('PersistProp'),'PersistVal')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testSetCustomProperties(self, findConfigFile, environ):
environ.__getitem__.side_effect = dict(
NTA_DYNAMIC_CONF_DIR=os.path.dirname(self.files['nupic-custom.xml'])).get
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
with open(self.files['nupic-custom.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>' + self.customParam + '</name>',
' <value>' + self.customValue + '</value>',
' </property>',
'</configuration>')))
configuration.Configuration.clear()
originalProps = copy(configuration.Configuration.dict())
configuration.Configuration.setCustomProperties(
{'PersistProp' : 'PersistVal', 'apple' : 'pear'})
expectedProps = {'PersistProp' : 'PersistVal', 'apple' : 'pear'}
expectedProps.update(originalProps)
self.assertEqual(configuration.Configuration.dict(), expectedProps)
configuration.Configuration.clear()
self.assertEqual(configuration.Configuration.dict(), expectedProps)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testDictWithTemp(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-custom.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>param</name>',
' <value>value</value>',
' </property>',
' <property>',
' <name>param2</name>',
' <value>value2</value>',
' </property>',
'</configuration>')))
customDict = configuration.Configuration.dict()
self.assertTrue('param' in customDict)
self.assertTrue('param2' in customDict)
self.assertEqual(customDict['param'], 'value')
self.assertEqual(customDict['param2'], 'value2')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testCustomConfigOverrides(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
paramNames = configuration.Configuration.dict().keys()
customValue = 'NewValue'
with open(self.files['nupic-custom.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>'+paramNames[0]+'</name>',
' <value>'+customValue+'</value>',
' </property>',
'</configuration>')))
configuration.Configuration.clear()
self.assertEqual(configuration.Configuration.get(paramNames[0]), \
customValue)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testCustomConfigDict(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-custom.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>CustomParam</name>',
' <value>CustomValue</value>',
' </property>',
'</configuration>')))
configuration.Configuration.clear()
self.assertEqual(configuration.Configuration.get('CustomParam'), \
'CustomValue')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testClearInvalidFile(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-custom.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<foo/>')))
configuration.Configuration.clear()
with patch('sys.stderr', new_callable=StringIO):
self.assertRaises(RuntimeError, configuration.Configuration.get, 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testSetInvalidFile(self, findConfigFile, environ):
environ.__getitem__.side_effect = dict(
NTA_DYNAMIC_CONF_DIR=os.path.dirname(self.files['nupic-custom.xml'])).get
configuration.Configuration.clear()
with open(self.files['nupic-custom.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<foo/>')))
with patch('sys.stderr', new_callable=StringIO):
with self.assertRaises(RuntimeError) as cm:
configuration.Configuration.setCustomProperty('foo', 'value')
self.assertIn("Expected top-level element to be 'configuration'",
cm.exception.args[0])
with open(self.files['nupic-custom.xml'], 'w') as fp:
fp.write('\n'.join(('')))
with patch('sys.stderr', new_callable=StringIO):
with self.assertRaises(RuntimeError) as cm:
configuration.Configuration.setCustomProperty('foo', 'value')
self.assertIn("File contents of custom configuration is corrupt.",
cm.exception.args[0])
# NTA_CONF_PATH is not being mocked out in this test, so we have to mock out
# findConfigFile to return the right path to the config file.
findConfigFile.return_value = self.files['nupic-custom.xml']
configuration.Configuration.resetCustomConfig()
configuration.Configuration.setCustomProperty('foo', 'value')
self.assertEqual(configuration.Configuration.getCustomDict(), {'foo': 'value'})
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetCustomDict(self, findConfigFile, environ):
environ.__getitem__.side_effect = dict(
NTA_DYNAMIC_CONF_DIR=os.path.dirname(self.files['nupic-custom.xml'])).get
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-custom.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>CustomParam</name>',
' <value>CustomValue</value>',
' </property>',
'</configuration>')))
self.assertEqual(configuration.Configuration.getCustomDict(),
dict(CustomParam='CustomValue'))
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetCustomDictNoFile(self, findConfigFile, environ):
environ.__getitem__.side_effect = dict(
NTA_DYNAMIC_CONF_DIR=os.path.dirname(self.files['nupic-custom.xml'])).get
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.resetCustomConfig()
self.assertEqual(configuration.Configuration.getCustomDict(), dict())
del self.files['nupic-custom.xml']
###############################################
# Replicated Tests From configuration_test.py
###############################################
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetStringMissingRaisesKeyError(self, findConfigFileMock, environMock):
findConfigFileMock.side_effect = self.files.get
environMock.get.return_value = None
configuration.Configuration.clear()
with self.assertRaises(KeyError):
configuration.Configuration.getString(uuid.uuid1().hex)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetString(self, findConfigFileMock, environMock):
environMock.get.return_value = None
findConfigFileMock.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.set('foo', 'bar')
result = configuration.Configuration.getString('foo')
self.assertEqual(result, 'bar')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetBoolMissingRaisesKeyError(self, findConfigFileMock, environMock):
findConfigFileMock.side_effect = self.files.get
environMock.get.return_value = None
configuration.Configuration.clear()
with self.assertRaises(KeyError):
configuration.Configuration.getBool(uuid.uuid1().hex)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetBoolOutOfRangeRaisesValueError(self, findConfigFileMock,
environMock):
environMock.get.return_value = None
findConfigFileMock.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.set('foobool2', '2')
with self.assertRaises(ValueError):
configuration.Configuration.getBool('foobool2')
configuration.Configuration.set('fooboolneg1', '-1')
with self.assertRaises(ValueError):
configuration.Configuration.getBool('fooboolneg1')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetBool(self, findConfigFileMock, environMock):
environMock.get.return_value = None
findConfigFileMock.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.set('foobool0', '0')
result = configuration.Configuration.getBool('foobool0')
self.assertEqual(result, False)
configuration.Configuration.set('foobool1', '1')
result = configuration.Configuration.getBool('foobool1')
self.assertEqual(result, True)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetIntMissingRaisesKeyError(self, findConfigFileMock, environMock):
findConfigFileMock.side_effect = self.files.get
environMock.get.return_value = None
configuration.Configuration.clear()
with self.assertRaises(KeyError):
configuration.Configuration.getInt(uuid.uuid1().hex)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetInt(self, findConfigFileMock, environMock):
environMock.get.return_value = None
findConfigFileMock.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.set('fooint', '-127')
result = configuration.Configuration.getInt('fooint')
self.assertEqual(result, -127)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetFloatMissingRaisesKeyError(self, findConfigFileMock, environMock):
findConfigFileMock.side_effect = self.files.get
environMock.get.return_value = None
configuration.Configuration.clear()
with self.assertRaises(KeyError):
configuration.Configuration.getFloat(uuid.uuid1().hex)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetFloat(self, findConfigFileMock, environMock):
environMock.get.return_value = None
findConfigFileMock.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.set('foofloat', '-127.65')
result = configuration.Configuration.getFloat('foofloat')
self.assertEqual(result, -127.65)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetMissingReturnsNone(self, findConfigFile, environ):
findConfigFile.side_effect = self.files.get
environ.get.return_value = None
configuration.Configuration.clear()
result = configuration.Configuration.get(str(uuid.uuid4()))
self.assertTrue(result is None)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testSetAndGet(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.set('foo', 'bar')
result = configuration.Configuration.get('foo')
self.assertTrue(result == 'bar')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testDict(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.set('foo', 'bar')
configuration.Configuration.set('apple', 'banana')
result = configuration.Configuration.dict()
self.assertTrue(isinstance(result, dict))
self.assertTrue('foo' in result)
self.assertTrue(result['foo'] == 'bar')
self.assertTrue('apple' in result)
self.assertTrue(result['apple'] == 'banana')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testDictReadsFilesFirstTime(self, findConfigFile,
environ): # pylint: disable=W0613
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
result = configuration.Configuration.dict()
self.assertTrue(isinstance(result, dict))
self.assertTrue(len(result) == 1, result)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testDictReplacesKeysFromEnvironment(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
key = str(uuid.uuid4())
env = {'NTA_CONF_PROP_' + key: 'foo'}
environ.keys.side_effect = env.keys
environ.__getitem__.side_effect = env.__getitem__
result = configuration.Configuration.dict()
self.assertTrue(key in result)
self.assertTrue(result[key] == 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testClear(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.set('foo', 'bar')
configuration.Configuration.set('apple', 'banana')
self.assertTrue(configuration.Configuration.get('foo') == 'bar')
self.assertTrue(configuration.Configuration.get('apple') == 'banana')
configuration.Configuration.clear()
self.assertTrue(configuration.Configuration.get('foo') is None)
self.assertTrue(configuration.Configuration.get('apple') is None)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetFromEnvironment(self, findConfigFile, environ):
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
key = str(uuid.uuid4())
environ.get.side_effect = {'NTA_CONF_PROP_' + key: 'foo'}.get
self.assertTrue(configuration.Configuration.get(key) == 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileFromPath(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
prefix, _, filename = self.files['nupic-default.xml'].rpartition(os.sep)
configuration.Configuration.readConfigFile(filename, prefix)
self.assertTrue(configuration.Configuration.get('dummy') == 'dummy value')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileUnexpectedElementAtRoot(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-default.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<foo/>')))
with patch('sys.stderr', new_callable=StringIO):
self.assertRaises(RuntimeError, configuration.Configuration.get, 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileMissingDocumentRoot(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-default.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>')))
with patch('sys.stderr', new_callable=StringIO):
self.assertRaises((ExpatError, ParseError), configuration.Configuration.get, 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileMissingNonPropertyConfigurationChildren(
self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-default.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <foo>bar<baz/></foo>',
'</configuration>')))
self.assertEqual(configuration.Configuration.dict(), \
dict(dummy='dummy value'))
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileEmptyValue(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-default.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>foo</name>',
' </property>',
'</configuration>')))
with patch('sys.stderr', new_callable=StringIO):
self.assertRaises(Exception, configuration.Configuration.get, 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileEmptyNameAndValue(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-default.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name></name>',
' <value></value>',
' </property>',
'</configuration>')))
with patch('sys.stderr', new_callable=StringIO):
self.assertRaises(RuntimeError, configuration.Configuration.get, 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileMissingEnvVars(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-default.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>foo</name>',
' <value>${env.foo}</value>',
' </property>',
'</configuration>')))
with patch('sys.stderr', new_callable=StringIO):
self.assertRaises(RuntimeError, configuration.Configuration.get, 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileMalformedEnvReference(self, findConfigFile,
environ): # pylint: disable=W0613
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-default.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>foo</name>',
' <value>${env.foo</value>',
' </property>',
'</configuration>')))
with patch('sys.stderr', new_callable=StringIO):
self.assertRaises(RuntimeError, configuration.Configuration.get, 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileEnvironmentOverride(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-default.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>foo</name>',
' <value>${env.NTA_CONF_PROP_foo}</value>',
' </property>',
'</configuration>')))
env = {'NTA_CONF_PROP_foo': 'bar'}
environ.__getitem__.side_effect = env.__getitem__
environ.get.side_effect = env.get
environ.__contains__.side_effect = env.__contains__
result = configuration.Configuration.get('foo')
self.assertEqual(result, 'bar')
@patch.object(configuration.Configuration, 'getConfigPaths',
spec=configuration.Configuration.getConfigPaths)
def testFindConfigFile(self, getConfigPaths):
prefix, _, filename = self.files['nupic-default.xml'].rpartition(os.sep)
def replacePaths(**_):
return [prefix]
getConfigPaths.side_effect = replacePaths
configuration.Configuration.clear()
result = configuration.Configuration.findConfigFile(filename)
self.assertTrue(result == self.files['nupic-default.xml'])
getConfigPaths.assert_called_with()
@patch.object(configuration.Configuration, 'getConfigPaths',
spec=configuration.Configuration.getConfigPaths)
def testFindConfigFileReturnsNoneForMissingFile(self, getConfigPaths):
prefix, _, _ = self.files['nupic-default.xml'].rpartition(os.sep)
def replacePaths(**_):
return [prefix]
getConfigPaths.side_effect = replacePaths
configuration.Configuration.clear()
result = configuration.Configuration.findConfigFile(str(uuid.uuid4()))
self.assertTrue(result is None)
getConfigPaths.assert_called_with()
@patch.object(configuration.Configuration, '_configPaths',
spec=configuration.Configuration._configPaths)
@patch.object(configuration.os, 'environ', spec=dict)
def testGetConfigPaths(
self, environ, configPaths): # pylint: disable=W0613
result = configuration.Configuration.getConfigPaths()
self.assertEqual(result, configPaths)
@unittest.skip('NUP-2081')
@patch.object(configuration.Configuration, '_configPaths',
spec=configuration.Configuration._configPaths)
@patch.object(configuration.os, 'environ', spec=dict)
def testGetConfigPathsForNone(
self, environ, configPaths): # pylint: disable=W0613
configuration.Configuration._configPaths = None # pylint: disable=W0212
result = configuration.Configuration.getConfigPaths()
self.assertTrue(isinstance(result, list))
self.assertListEqual(result, [resource_filename("nupic",
os.path.join("config", "default"))])
@patch.object(configuration.Configuration, '_configPaths',
spec=configuration.Configuration._configPaths)
@patch.object(configuration.os, 'environ', spec=dict)
def testGetConfigPathsForNoneWithNTA_CONF_PATHInEnv(
self, environ, configPaths): # pylint: disable=W0613
configuration.Configuration._configPaths = None # pylint: disable=W0212
env = {'NTA_CONF_PATH': ''}
environ.__getitem__.side_effect = env.__getitem__
environ.get.side_effect = env.get
environ.__contains__.side_effect = env.__contains__
result = configuration.Configuration.getConfigPaths()
self.assertTrue(isinstance(result, list))
self.assertEqual(len(result), 1)
self.assertEqual(result[0], env['NTA_CONF_PATH'])
def testSetConfigPathsForNoneWithNTA_CONF_PATHInEnv(self):
paths = [Mock()]
configuration.Configuration.setConfigPaths(paths)
self.assertEqual(
paths,
configuration.Configuration._configPaths) # pylint: disable=W0212
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testEmptyGetCustomDict(self, findConfigFile, environMock):
findConfigFile.side_effect = self.files.get
environMock.__getitem__.side_effect = dict(
NTA_DYNAMIC_CONF_DIR=os.path.dirname(self.files['nupic-custom.xml'])).get
configuration.Configuration.resetCustomConfig()
self.assertEqual(configuration.Configuration.getCustomDict(), dict())
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testConfiguration(self, findConfigFile, environ):
configuration.Configuration.clear()
findConfigFile.side_effect = self.files.get
with open(self.files['nupic-default.xml'], 'w') as fp:
with open(resource_filename(__name__, 'conf/testFile1.xml')) as inp:
fp.write(inp.read())
with open(self.files['nupic-site.xml'], 'w') as fp:
with open(resource_filename(__name__, 'conf/testFile2.xml')) as inp:
fp.write(inp.read())
env = {'USER': 'foo', 'HOME': 'bar'}
environ.__getitem__.side_effect = env.__getitem__
environ.get.side_effect = env.get
environ.__contains__.side_effect = env.__contains__
environ.keys.side_effect = env.keys
# Test the resulting configuration
self.assertEqual(configuration.Configuration.get('database.host'),
'TestHost')
self.assertEqual(configuration.Configuration.get('database.password'),
'pass')
self.assertEqual(configuration.Configuration.get('database.emptypassword'),
'')
self.assertEqual(configuration.Configuration.get('database.missingfield'),
None)
self.assertEqual(configuration.Configuration.get('database.user'), 'root')
expectedValue = 'foo'
actualValue = configuration.Configuration.get(
'var.environment.standalone.user')
self.assertTrue(actualValue == expectedValue,
"expected %r, but got %r" % (expectedValue, actualValue))
expectedValue = "The user " + os.environ['USER'] + " rocks!"
actualValue = configuration.Configuration.get(
'var.environment.user.in.the.middle')
self.assertTrue(actualValue == expectedValue,
"expected %r, but got %r" % (expectedValue, actualValue))
expectedValue = ("User " + os.environ['USER'] + " and home " +
os.environ['HOME'] + " in the middle")
actualValue = configuration.Configuration.get(
'var.environment.user.and.home.in.the.middle')
self.assertTrue(actualValue == expectedValue,
"expected %r, but got %r" % (expectedValue, actualValue))
env['NTA_CONF_PROP_database_host'] = 'FooBar'
self.assertEqual(configuration.Configuration.get('database.host'), 'FooBar')
allProps = configuration.Configuration.dict()
self.assertTrue(allProps['database.host'] == 'FooBar')
del env['NTA_CONF_PROP_database_host']
environ.__getitem__.side_effect = env.__getitem__
environ.get.side_effect = env.get
environ.__contains__.side_effect = env.__contains__
# Change a property
configuration.Configuration.set('database.host', 'matrix')
self.assertEqual(configuration.Configuration.get('database.host'), 'matrix')
@patch.object(configuration.os, 'environ', spec=dict)
def testConfiguration2(self, environ):
configuration.Configuration.clear()
tmpDir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpDir)
with open(os.path.join(tmpDir, 'nupic-default.xml'), 'w') as fp:
with open(resource_filename(__name__, 'conf/testFile1.xml')) as inp:
fp.write(inp.read())
with open(os.path.join(tmpDir, 'nupic-site.xml'), 'w') as fp:
with open(resource_filename(__name__, 'conf/testFile2.xml')) as inp:
fp.write(inp.read())
env = {
'USER': 'foo',
'HOME': 'bar',
'NTA_CONF_PATH': tmpDir
}
environ.__getitem__.side_effect = env.__getitem__
environ.get.side_effect = env.get
environ.__contains__.side_effect = env.__contains__
environ.keys.side_effect = env.keys
# Test the resulting configuration
self.assertEqual(configuration.Configuration.get('database.host'),
'TestHost')
self.assertEqual(configuration.Configuration.get('database.password'),
'pass')
self.assertEqual(
configuration.Configuration.get('database.emptypassword'), '')
self.assertEqual(configuration.Configuration.get('database.missingfield'),
None)
self.assertEqual(configuration.Configuration.get('database.user'), 'root')
expectedValue = 'foo'
actualValue = configuration.Configuration.get(
'var.environment.standalone.user')
self.assertEqual(actualValue, expectedValue,
"expected %r, but got %r" % (expectedValue, actualValue))
expectedValue = "The user " + os.environ['USER'] + " rocks!"
actualValue = configuration.Configuration.get(
'var.environment.user.in.the.middle')
self.assertEqual(actualValue, expectedValue,
"expected %r, but got %r" % (expectedValue, actualValue))
expectedValue = ("User " + os.environ['USER'] + " and home " +
os.environ['HOME'] + " in the middle")
actualValue = configuration.Configuration.get(
'var.environment.user.and.home.in.the.middle')
self.assertEqual(actualValue, expectedValue,
"expected %r, but got %r" % (expectedValue, actualValue))
env['NTA_CONF_PROP_database_host'] = 'FooBar'
self.assertEqual(configuration.Configuration.get('database.host'),
'FooBar')
allProps = configuration.Configuration.dict()
self.assertEqual(allProps['database.host'], 'FooBar')
del env['NTA_CONF_PROP_database_host']
# Change a property
configuration.Configuration.set('database.host', 'matrix')
self.assertEqual(configuration.Configuration.get('database.host'),
'matrix')
configuration.Configuration.clear()
tmpDir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpDir)
with open(os.path.join(tmpDir, 'nupic-default.xml'), 'w') as fp:
with open(resource_filename(__name__, 'conf/testFile1.xml')) as inp:
fp.write(inp.read())
with open(os.path.join(tmpDir, 'nupic-site.xml'), 'w') as fp:
with open(resource_filename(__name__, 'conf/testFile2.xml')) as inp:
fp.write(inp.read())
tmpDir2 = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpDir2)
with open(os.path.join(tmpDir2, 'nupic-site.xml'), 'w') as fp:
with open(resource_filename(__name__, 'conf/testFile3.xml')) as inp:
fp.write(inp.read())
env['NTA_CONF_PATH'] = os.pathsep.join([tmpDir, tmpDir2])
# Test the resulting configuration
self.assertEqual(configuration.Configuration.get('database.host'),
'TestHost')
self.assertEqual(configuration.Configuration.get('database.password'),
'pass')
self.assertEqual(
configuration.Configuration.get('database.emptypassword'), '')
self.assertEqual(configuration.Configuration.get('database.missingfield'),
None)
self.assertEqual(configuration.Configuration.get('database.user'),
'root')
# Change a property
configuration.Configuration.set('database.host', 'matrix')
self.assertEqual(configuration.Configuration.get('database.host'),
'matrix')
if __name__ == '__main__':
unittest.main(argv=[sys.argv[0], "--verbose"] + sys.argv[1:])
| 42,118 | Python | .py | 841 | 43.057075 | 89 | 0.68969 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,117 | configuration_test.py | numenta_nupic-legacy/tests/unit/nupic/support/configuration_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import shutil
from StringIO import StringIO
import sys
import tempfile
import unittest2 as unittest
import uuid
from pkg_resources import resource_filename
from mock import Mock, patch
from pkg_resources import resource_filename
from xml.parsers.expat import ExpatError
# ParseError not present in xml module for python2.6
try:
from xml.etree.ElementTree import ParseError
except ImportError:
from xml.parsers.expat import ExpatError as ParseError
import nupic
import nupic.support.configuration_base as configuration
class ConfigurationTest(unittest.TestCase):
def setUp(self):
"""configuration.Configuration relies on static methods
which load files by name. Since we need to be able to run tests and
potentially change the content of those files between tests without
interfering with one another and with the system configuration, this
setUp() function will allocate temporary files used only during the using
conf/nupic-default.xml and conf/nupic-site.xml (relative to the unit tests)
as templates.
"""
self.files = {}
with tempfile.NamedTemporaryFile(
prefix='nupic-default.xml-unittest-', delete=False) as outp:
self.addCleanup(os.remove, outp.name)
with open(resource_filename(__name__, 'conf/nupic-default.xml')) as inp:
outp.write(inp.read())
self.files['nupic-default.xml'] = outp.name
with tempfile.NamedTemporaryFile(
prefix='nupic-site.xml-unittest-', delete=False) as outp:
self.addCleanup(os.remove, outp.name)
with open(resource_filename(__name__, 'conf/nupic-site.xml')) as inp:
outp.write(inp.read())
self.files['nupic-site.xml'] = outp.name
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetStringMissingRaisesKeyError(self, findConfigFileMock, environMock):
findConfigFileMock.side_effect = self.files.get
environMock.get.return_value = None
configuration.Configuration.clear()
with self.assertRaises(KeyError):
configuration.Configuration.getString(uuid.uuid1().hex)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetString(self, findConfigFileMock, environMock):
environMock.get.return_value = None
findConfigFileMock.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.set('foo', 'bar')
result = configuration.Configuration.getString('foo')
self.assertEqual(result, 'bar')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetBoolMissingRaisesKeyError(self, findConfigFileMock, environMock):
findConfigFileMock.side_effect = self.files.get
environMock.get.return_value = None
configuration.Configuration.clear()
with self.assertRaises(KeyError):
configuration.Configuration.getBool(uuid.uuid1().hex)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetBoolOutOfRangeRaisesValueError(self, findConfigFileMock,
environMock):
environMock.get.return_value = None
findConfigFileMock.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.set('foobool2', '2')
with self.assertRaises(ValueError):
configuration.Configuration.getBool('foobool2')
configuration.Configuration.set('fooboolneg1', '-1')
with self.assertRaises(ValueError):
configuration.Configuration.getBool('fooboolneg1')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetBool(self, findConfigFileMock, environMock):
environMock.get.return_value = None
findConfigFileMock.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.set('foobool0', '0')
result = configuration.Configuration.getBool('foobool0')
self.assertEqual(result, False)
configuration.Configuration.set('foobool1', '1')
result = configuration.Configuration.getBool('foobool1')
self.assertEqual(result, True)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetIntMissingRaisesKeyError(self, findConfigFileMock, environMock):
findConfigFileMock.side_effect = self.files.get
environMock.get.return_value = None
configuration.Configuration.clear()
with self.assertRaises(KeyError):
configuration.Configuration.getInt(uuid.uuid1().hex)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetInt(self, findConfigFileMock, environMock):
environMock.get.return_value = None
findConfigFileMock.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.set('fooint', '-127')
result = configuration.Configuration.getInt('fooint')
self.assertEqual(result, -127)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetFloatMissingRaisesKeyError(self, findConfigFileMock, environMock):
findConfigFileMock.side_effect = self.files.get
environMock.get.return_value = None
configuration.Configuration.clear()
with self.assertRaises(KeyError):
configuration.Configuration.getFloat(uuid.uuid1().hex)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetFloat(self, findConfigFileMock, environMock):
environMock.get.return_value = None
findConfigFileMock.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.set('foofloat', '-127.65')
result = configuration.Configuration.getFloat('foofloat')
self.assertEqual(result, -127.65)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetMissingReturnsNone(self, findConfigFile, environ):
findConfigFile.side_effect = self.files.get
environ.get.return_value = None
configuration.Configuration.clear()
result = configuration.Configuration.get(uuid.uuid1().hex)
self.assertTrue(result is None)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testSetAndGet(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.set('foo', 'bar')
result = configuration.Configuration.get('foo')
self.assertTrue(result == 'bar')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testDict(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.set('foo', 'bar')
configuration.Configuration.set('apple', 'banana')
result = configuration.Configuration.dict()
self.assertTrue(isinstance(result, dict))
self.assertTrue('foo' in result)
self.assertTrue(result['foo'] == 'bar')
self.assertTrue('apple' in result)
self.assertTrue(result['apple'] == 'banana')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testDictReadsFilesFirstTime(self, findConfigFile,
environ): # pylint: disable=W0613
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
result = configuration.Configuration.dict()
self.assertTrue(isinstance(result, dict))
self.assertTrue(len(result) == 1, result)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testDictReplacesKeysFromEnvironment(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
key = uuid.uuid1().hex
env = {'NTA_CONF_PROP_' + key: 'foo'}
environ.keys.side_effect = env.keys
environ.__getitem__.side_effect = env.__getitem__
result = configuration.Configuration.dict()
self.assertTrue(key in result)
self.assertTrue(result[key] == 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testClear(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.set('foo', 'bar')
configuration.Configuration.set('apple', 'banana')
self.assertTrue(configuration.Configuration.get('foo') == 'bar')
self.assertTrue(configuration.Configuration.get('apple') == 'banana')
configuration.Configuration.clear()
self.assertTrue(configuration.Configuration.get('foo') is None)
self.assertTrue(configuration.Configuration.get('apple') is None)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetFromEnvironment(self, findConfigFile, environ):
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
key = uuid.uuid1().hex
environ.get.side_effect = {'NTA_CONF_PROP_' + key: 'foo'}.get
self.assertTrue(configuration.Configuration.get(key) == 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileFromPath(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
prefix, _, filename = self.files['nupic-default.xml'].rpartition(os.sep)
configuration.Configuration.readConfigFile(filename, prefix)
self.assertTrue(configuration.Configuration.get('dummy') == 'dummy value')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileUnexpectedElementAtRoot(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-default.xml'], 'w') as outp:
outp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<foo/>')))
outp.flush()
with patch('sys.stderr', new_callable=StringIO):
self.assertRaises(RuntimeError, configuration.Configuration.get, 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileMissingDocumentRoot(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-default.xml'], 'w') as outp:
outp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>')))
outp.flush()
with patch('sys.stderr', new_callable=StringIO):
self.assertRaises((ExpatError, ParseError), configuration.Configuration.get, 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileMissingNonPropertyConfigurationChildren(
self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-default.xml'], 'w') as outp:
outp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <foo>bar<baz/></foo>',
'</configuration>')))
outp.flush()
self.assertEqual(configuration.Configuration.dict(), \
dict(dummy='dummy value'))
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileEmptyValue(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-default.xml'], 'w') as outp:
outp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>foo</name>',
' </property>',
'</configuration>')))
outp.flush()
with patch('sys.stderr', new_callable=StringIO):
self.assertRaises(Exception, configuration.Configuration.get, 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileEmptyNameAndValue(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-default.xml'], 'w') as outp:
outp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name></name>',
' <value></value>',
' </property>',
'</configuration>')))
outp.flush()
with patch('sys.stderr', new_callable=StringIO):
self.assertRaises(RuntimeError, configuration.Configuration.get, 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileMissingEnvVars(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-default.xml'], 'w') as outp:
outp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>foo</name>',
' <value>${env.foo}</value>',
' </property>',
'</configuration>')))
outp.flush()
with patch('sys.stderr', new_callable=StringIO):
self.assertRaises(RuntimeError, configuration.Configuration.get, 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileMalformedEnvReference(self, findConfigFile,
environ): # pylint: disable=W0613
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-default.xml'], 'w') as outp:
outp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>foo</name>',
' <value>${env.foo</value>',
' </property>',
'</configuration>')))
outp.flush()
with patch('sys.stderr', new_callable=StringIO):
self.assertRaises(RuntimeError, configuration.Configuration.get, 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileEnvironmentOverride(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-default.xml'], 'w') as outp:
outp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>foo</name>',
' <value>${env.NTA_CONF_PROP_foo}</value>',
' </property>',
'</configuration>')))
outp.flush()
env = {'NTA_CONF_PROP_foo': 'bar'}
environ.__getitem__.side_effect = env.__getitem__
environ.get.side_effect = env.get
environ.__contains__.side_effect = env.__contains__
result = configuration.Configuration.get('foo')
self.assertEqual(result, 'bar')
@patch.object(configuration.Configuration, 'getConfigPaths',
spec=configuration.Configuration.getConfigPaths)
def testFindConfigFile(self, getConfigPaths):
prefix, _, filename = self.files['nupic-default.xml'].rpartition(os.sep)
def replacePaths(**_):
return [prefix]
getConfigPaths.side_effect = replacePaths
configuration.Configuration.clear()
result = configuration.Configuration.findConfigFile(filename)
self.assertTrue(result == self.files['nupic-default.xml'])
getConfigPaths.assert_called_with()
@patch.object(configuration.Configuration, 'getConfigPaths',
spec=configuration.Configuration.getConfigPaths)
def testFindConfigFileReturnsNoneForMissingFile(self, getConfigPaths):
prefix, _, _ = self.files['nupic-default.xml'].rpartition(os.sep)
def replacePaths(**_):
return [prefix]
getConfigPaths.side_effect = replacePaths
configuration.Configuration.clear()
result = configuration.Configuration.findConfigFile(uuid.uuid1().hex)
self.assertTrue(result is None)
getConfigPaths.assert_called_with()
@patch.object(configuration.Configuration, '_configPaths',
spec=configuration.Configuration._configPaths)
@patch.object(configuration.os, 'environ', spec=dict)
def testGetConfigPaths(
self, environ, configPaths): # pylint: disable=W0613
result = configuration.Configuration.getConfigPaths()
self.assertEqual(result, configPaths)
@unittest.skip('NUP-2081')
@patch.object(configuration.Configuration, '_configPaths',
spec=configuration.Configuration._configPaths)
@patch.object(configuration.os, 'environ', spec=dict)
def testGetConfigPathsForNone(
self, environ, configPaths): # pylint: disable=W0613
configuration.Configuration._configPaths = None # pylint: disable=W0212
result = configuration.Configuration.getConfigPaths()
self.assertTrue(isinstance(result, list))
self.assertListEqual(result, [resource_filename("nupic",
os.path.join("config", "default"))])
@patch.object(configuration.Configuration, '_configPaths',
spec=configuration.Configuration._configPaths)
@patch.object(configuration.os, 'environ', spec=dict)
def testGetConfigPathsForNoneWithNTA_CONF_PATHInEnv(
self, environ, configPaths): # pylint: disable=W0613
configuration.Configuration._configPaths = None # pylint: disable=W0212
env = {'NTA_CONF_PATH': ''}
environ.__getitem__.side_effect = env.__getitem__
environ.get.side_effect = env.get
environ.__contains__.side_effect = env.__contains__
result = configuration.Configuration.getConfigPaths()
self.assertTrue(isinstance(result, list))
self.assertEqual(len(result), 1)
self.assertEqual(result[0], env['NTA_CONF_PATH'])
def testSetConfigPathsForNoneWithNTA_CONF_PATHInEnv(self):
paths = [Mock()]
configuration.Configuration.setConfigPaths(paths)
self.assertEqual(
paths,
configuration.Configuration._configPaths) # pylint: disable=W0212
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testConfiguration(self, findConfigFile, environ):
configuration.Configuration.clear()
findConfigFile.side_effect = self.files.get
with open(self.files['nupic-default.xml'], 'w') as outp:
with open(resource_filename(__name__, 'conf/testFile1.xml')) as inp:
outp.write(inp.read())
with open(self.files['nupic-site.xml'], 'w') as outp:
with open(resource_filename(__name__, 'conf/testFile2.xml')) as inp:
outp.write(inp.read())
env = {'USER': 'foo', 'HOME': 'bar'}
environ.__getitem__.side_effect = env.__getitem__
environ.get.side_effect = env.get
environ.__contains__.side_effect = env.__contains__
environ.keys.side_effect = env.keys
# Test the resulting configuration
self.assertEqual(configuration.Configuration.get('database.host'),
'TestHost')
self.assertEqual(configuration.Configuration.get('database.password'),
'pass')
self.assertEqual(configuration.Configuration.get('database.emptypassword'),
'')
self.assertEqual(configuration.Configuration.get('database.missingfield'),
None)
self.assertEqual(configuration.Configuration.get('database.user'), 'root')
expectedValue = 'foo'
actualValue = configuration.Configuration.get(
'var.environment.standalone.user')
self.assertTrue(actualValue == expectedValue,
"expected %r, but got %r" % (expectedValue, actualValue))
expectedValue = "The user " + os.environ['USER'] + " rocks!"
actualValue = configuration.Configuration.get(
'var.environment.user.in.the.middle')
self.assertTrue(actualValue == expectedValue,
"expected %r, but got %r" % (expectedValue, actualValue))
expectedValue = ("User " + os.environ['USER'] + " and home " +
os.environ['HOME'] + " in the middle")
actualValue = configuration.Configuration.get(
'var.environment.user.and.home.in.the.middle')
self.assertTrue(actualValue == expectedValue,
"expected %r, but got %r" % (expectedValue, actualValue))
env['NTA_CONF_PROP_database_host'] = 'FooBar'
self.assertEqual(configuration.Configuration.get('database.host'), 'FooBar')
allProps = configuration.Configuration.dict()
self.assertTrue(allProps['database.host'] == 'FooBar')
del env['NTA_CONF_PROP_database_host']
environ.__getitem__.side_effect = env.__getitem__
environ.get.side_effect = env.get
environ.__contains__.side_effect = env.__contains__
# Change a property
configuration.Configuration.set('database.host', 'matrix')
self.assertEqual(configuration.Configuration.get('database.host'), 'matrix')
@patch.object(configuration.os, 'environ', spec=dict)
def testConfiguration2(self, environ):
configuration.Configuration.clear()
tmpDir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpDir)
with open(os.path.join(tmpDir, 'nupic-default.xml'), 'w') as outp:
with open(resource_filename(__name__, 'conf/testFile1.xml')) as inp:
outp.write(inp.read())
with open(os.path.join(tmpDir, 'nupic-site.xml'), 'w') as outp:
with open(resource_filename(__name__, 'conf/testFile2.xml')) as inp:
outp.write(inp.read())
env = {
'USER': 'foo',
'HOME': 'bar',
'NTA_CONF_PATH': tmpDir
}
environ.__getitem__.side_effect = env.__getitem__
environ.get.side_effect = env.get
environ.__contains__.side_effect = env.__contains__
environ.keys.side_effect = env.keys
# Test the resulting configuration
self.assertEqual(configuration.Configuration.get('database.host'),
'TestHost')
self.assertEqual(configuration.Configuration.get('database.password'),
'pass')
self.assertEqual(
configuration.Configuration.get('database.emptypassword'), '')
self.assertEqual(configuration.Configuration.get('database.missingfield'),
None)
self.assertEqual(configuration.Configuration.get('database.user'), 'root')
expectedValue = 'foo'
actualValue = configuration.Configuration.get(
'var.environment.standalone.user')
self.assertEqual(actualValue, expectedValue,
"expected %r, but got %r" % (expectedValue, actualValue))
expectedValue = "The user " + os.environ['USER'] + " rocks!"
actualValue = configuration.Configuration.get(
'var.environment.user.in.the.middle')
self.assertEqual(actualValue, expectedValue,
"expected %r, but got %r" % (expectedValue, actualValue))
expectedValue = ("User " + os.environ['USER'] + " and home " +
os.environ['HOME'] + " in the middle")
actualValue = configuration.Configuration.get(
'var.environment.user.and.home.in.the.middle')
self.assertEqual(actualValue, expectedValue,
"expected %r, but got %r" % (expectedValue, actualValue))
env['NTA_CONF_PROP_database_host'] = 'FooBar'
self.assertEqual(configuration.Configuration.get('database.host'),
'FooBar')
allProps = configuration.Configuration.dict()
self.assertEqual(allProps['database.host'], 'FooBar')
del env['NTA_CONF_PROP_database_host']
# Change a property
configuration.Configuration.set('database.host', 'matrix')
self.assertEqual(configuration.Configuration.get('database.host'),
'matrix')
configuration.Configuration.clear()
tmpDir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpDir)
with open(os.path.join(tmpDir, 'nupic-default.xml'), 'w') as outp:
with open(resource_filename(__name__, 'conf/testFile1.xml')) as inp:
outp.write(inp.read())
with open(os.path.join(tmpDir, 'nupic-site.xml'), 'w') as outp:
with open(resource_filename(__name__, 'conf/testFile2.xml')) as inp:
outp.write(inp.read())
tmpDir2 = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpDir2)
with open(os.path.join(tmpDir2, 'nupic-site.xml'), 'w') as outp:
with open(resource_filename(__name__, 'conf/testFile3.xml')) as inp:
outp.write(inp.read())
env['NTA_CONF_PATH'] = os.pathsep.join([tmpDir, tmpDir2])
# Test the resulting configuration
self.assertEqual(configuration.Configuration.get('database.host'),
'TestHost')
self.assertEqual(configuration.Configuration.get('database.password'),
'pass')
self.assertEqual(
configuration.Configuration.get('database.emptypassword'), '')
self.assertEqual(configuration.Configuration.get('database.missingfield'),
None)
self.assertEqual(configuration.Configuration.get('database.user'),
'root')
# Change a property
configuration.Configuration.set('database.host', 'matrix')
self.assertEqual(configuration.Configuration.get('database.host'),
'matrix')
if __name__ == '__main__':
unittest.main(argv=[sys.argv[0], "--verbose"] + sys.argv[1:])
| 29,999 | Python | .py | 599 | 43.230384 | 89 | 0.703108 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,118 | decorators_test.py | numenta_nupic-legacy/tests/unit/nupic/support/decorators_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for the nupic.support.decorators module.
TODO: add tests for logEntryExit
"""
from mock import patch, call, Mock
from nupic.support.unittesthelpers.testcasebase import unittest
from nupic.support import decorators
class TestParentException(Exception):
pass
class TestChildException(TestParentException):
pass
class RetryDecoratorTest(unittest.TestCase):
"""Unit tests specific to retry decorator."""
def mockSleepTime(self, mockTime, mockSleep):
"""Configures mocks for time.time and time.sleep such that every call
to time.sleep(x) increments the return value of time.time() by x.
mockTime: time.time mock
mockSleep: time.sleep mock
"""
class _TimeContainer(object):
accumulatedTime = 0
def testTime():
return _TimeContainer.accumulatedTime
def testSleep(duration):
_TimeContainer.accumulatedTime += duration
mockTime.side_effect = testTime
mockSleep.side_effect = testSleep
@patch("time.sleep", autospec=True)
@patch("time.time", autospec=True)
def testRetryNoTimeForRetries(self, mockTime, mockSleep):
"""Test that when timeoutSec == 0, function is executed exactly once
with no retries, and raises an exception on failure.
"""
self.mockSleepTime(mockTime, mockSleep)
retryDecorator = decorators.retry(
timeoutSec=0, initialRetryDelaySec=0.2,
maxRetryDelaySec=10)
testFunction = Mock(side_effect=TestParentException("Test exception"),
__name__="testFunction", autospec=True)
with self.assertRaises(TestParentException):
retryDecorator(testFunction)()
self.assertFalse(mockSleep.called)
testFunction.assert_called_once_with()
@patch("time.sleep", autospec=True)
@patch("time.time", autospec=True)
def testRetryWaitsInitialRetryDelaySec(self, mockTime, mockSleep):
"""Test that delay times are correct."""
self.mockSleepTime(mockTime, mockSleep)
retryDecorator = decorators.retry(
timeoutSec=30, initialRetryDelaySec=2,
maxRetryDelaySec=10)
testFunction = Mock(side_effect=TestParentException("Test exception"),
__name__="testFunction", autospec=True)
with self.assertRaises(TestParentException):
retryDecorator(testFunction)()
self.assertEqual(mockSleep.mock_calls, [call(2), call(4), call(8),
call(10), call(10)])
self.assertEqual(testFunction.call_count, 6)
@patch("time.sleep", autospec=True)
@patch("time.time", autospec=True)
def testRetryRetryExceptionIncluded(self, mockTime, mockSleep):
"""Test that retry is triggered if raised exception is in
retryExceptions."""
self.mockSleepTime(mockTime, mockSleep)
retryDecorator = decorators.retry(
timeoutSec=1, initialRetryDelaySec=1,
maxRetryDelaySec=10, retryExceptions=(TestParentException,))
@retryDecorator
def testFunction():
raise TestChildException("Test exception")
with self.assertRaises(TestChildException):
testFunction()
self.assertEqual(mockSleep.call_count, 1)
@patch("time.sleep", autospec=True)
@patch("time.time", autospec=True)
def testRetryRetryExceptionExcluded(self, mockTime, mockSleep):
""" Test that retry is not triggered if raised exception is not in
retryExceptions """
self.mockSleepTime(mockTime, mockSleep)
class TestExceptionA(Exception):
pass
class TestExceptionB(Exception):
pass
retryDecorator = decorators.retry(
timeoutSec=1, initialRetryDelaySec=1,
maxRetryDelaySec=10, retryExceptions=(TestExceptionA,))
@retryDecorator
def testFunction():
raise TestExceptionB("Test exception")
with self.assertRaises(TestExceptionB):
testFunction()
self.assertEqual(mockSleep.call_count, 0)
@patch("time.sleep", autospec=True)
@patch("time.time", autospec=True)
def testRetryRetryFilter(self, mockTime, mockSleep):
"""Test that if retryFilter is specified and exception is in
retryExceptions, retries iff retryFilter returns true."""
self.mockSleepTime(mockTime, mockSleep)
# Test with retryFilter returning True
retryDecoratorTrueFilter = decorators.retry(
timeoutSec=1, initialRetryDelaySec=1,
maxRetryDelaySec=10, retryExceptions=(TestParentException,),
retryFilter=lambda _1, _2, _3: True)
@retryDecoratorTrueFilter
def testFunctionTrue():
raise TestChildException("Test exception")
with self.assertRaises(TestChildException):
testFunctionTrue()
self.assertEqual(mockSleep.call_count, 1)
# Test with retryFilter returning False
mockSleep.reset_mock()
retryDecoratorFalseFilter = decorators.retry(
timeoutSec=1, initialRetryDelaySec=1,
maxRetryDelaySec=10, retryExceptions=(TestParentException,),
retryFilter=lambda _1, _2, _3: False)
@retryDecoratorFalseFilter
def testFunctionFalse():
raise TestChildException("Test exception")
with self.assertRaises(TestChildException):
testFunctionFalse()
self.assertEqual(mockSleep.call_count, 0)
@patch("time.sleep", autospec=True)
@patch("time.time", autospec=True)
def testReturnsExpectedWithExpectedArgs(self, mockTime, mockSleep):
"""Test that docorated function receives only expected args and
that it returns the expected value on success."""
self.mockSleepTime(mockTime, mockSleep)
retryDecorator = decorators.retry(
timeoutSec=30, initialRetryDelaySec=2,
maxRetryDelaySec=10)
testFunction = Mock(return_value=321,
__name__="testFunction", autospec=True)
returnValue = retryDecorator(testFunction)(1, 2, a=3, b=4)
self.assertEqual(returnValue, 321)
testFunction.assert_called_once_with(1, 2, a=3, b=4)
@patch("time.sleep", autospec=True)
@patch("time.time", autospec=True)
def testNoRetryIfCallSucceeds(self, mockTime, mockSleep):
"""If the initial call succeeds, test that no retries are performed."""
self.mockSleepTime(mockTime, mockSleep)
retryDecorator = decorators.retry(
timeoutSec=30, initialRetryDelaySec=2,
maxRetryDelaySec=10)
testFunction = Mock(__name__="testFunction", autospec=True)
retryDecorator(testFunction)()
testFunction.assert_called_once_with()
@patch("time.sleep", autospec=True)
@patch("time.time", autospec=True)
def testFailsFirstSucceedsLater(self, mockTime, mockSleep):
"""If initial attempts fail but subsequent attempt succeeds, ensure that
expected number of retries is performed and expected value is returned."""
self.mockSleepTime(mockTime, mockSleep)
retryDecorator = decorators.retry(
timeoutSec=30, initialRetryDelaySec=2,
maxRetryDelaySec=10)
testFunction = Mock(
side_effect=[
TestParentException("Test exception 1"),
TestParentException("Test exception 2"),
321
],
__name__="testFunction", autospec=True)
returnValue = retryDecorator(testFunction)()
self.assertEqual(returnValue, 321)
self.assertEqual(testFunction.call_count, 3)
class LogExceptionsTestCase(unittest.TestCase):
"""Unit tests for the nupic.support.decorators module."""
def testLogExceptionsWithoutException(self):
@decorators.logExceptions()
def doSomething(*args, **kwargs):
return args, kwargs
inputArgs = (1, 2, 3)
inputKwargs = dict(a="A", b="B", c="C")
outputArgs, outputKwargs = doSomething(*inputArgs, **inputKwargs)
# Validate that doSomething got the right inputs
self.assertEqual(outputArgs, inputArgs)
self.assertEqual(outputKwargs, inputKwargs)
def testLogExceptionsWithRuntimeErrorExceptionAndDefaultLogger(self):
loggerMock = Mock(spec_set=decorators.logging.getLogger())
with patch.object(decorators.logging, "getLogger", autospec=True,
return_value=loggerMock):
@decorators.logExceptions()
def doSomething(*args, **kwargs):
self.assertEqual(args, inputArgs)
self.assertEqual(kwargs, inputKwargs)
raise RuntimeError()
inputArgs = (1, 2, 3)
inputKwargs = dict(a="A", b="B", c="C")
with self.assertRaises(RuntimeError):
doSomething(*inputArgs, **inputKwargs)
self.assertEqual(loggerMock.exception.call_count, 1)
self.assertIn("Unhandled exception %r from %r. Caller stack:\n%s",
loggerMock.exception.call_args[0][0])
def testLogExceptionsWithRuntimeErrorExceptionAndCustomLogger(self):
loggerMock = Mock(spec_set=decorators.logging.getLogger())
@decorators.logExceptions(loggerMock)
def doSomething(*args, **kwargs):
self.assertEqual(args, inputArgs)
self.assertEqual(kwargs, inputKwargs)
raise RuntimeError()
inputArgs = (1, 2, 3)
inputKwargs = dict(a="A", b="B", c="C")
with self.assertRaises(RuntimeError):
doSomething(*inputArgs, **inputKwargs)
self.assertEqual(loggerMock.exception.call_count, 1)
self.assertIn("Unhandled exception %r from %r. Caller stack:\n%s",
loggerMock.exception.call_args[0][0])
def testLogExceptionsWithSystemExitExceptionAndDefaultLogger(self):
loggerMock = Mock(spec_set=decorators.logging.getLogger())
with patch.object(decorators.logging, "getLogger", autospec=True,
return_value=loggerMock):
# SystemExit is based on BaseException, so we want to make sure that
# those are handled properly, too
inputArgs = (1, 2, 3)
inputKwargs = dict(a="A", b="B", c="C")
@decorators.logExceptions()
def doSomething(*args, **kwargs):
self.assertEqual(args, inputArgs)
self.assertEqual(kwargs, inputKwargs)
raise SystemExit()
with self.assertRaises(SystemExit):
doSomething(*inputArgs, **inputKwargs)
self.assertEqual(loggerMock.exception.call_count, 1)
self.assertIn("Unhandled exception %r from %r. Caller stack:\n%s",
loggerMock.exception.call_args[0][0])
if __name__ == '__main__':
unittest.main()
| 11,119 | Python | .py | 246 | 39.101626 | 78 | 0.716783 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,119 | consoleprinter_test.py | numenta_nupic-legacy/tests/unit/nupic/support/consoleprinter_test/consoleprinter_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import unittest2 as unittest
from nupic.support.console_printer import ConsolePrinterMixin, Tee
# Class used for testing
class MyClass(ConsolePrinterMixin):
def __init__(self):
ConsolePrinterMixin.__init__(self)
def run(self):
for i in xrange(0, 4):
self.cPrint(i, "message at level %d", i)
class ConsolePrinterTest(unittest.TestCase):
def testPrint(self):
mydir = os.path.dirname(os.path.abspath(__file__))
filename = os.path.abspath("console_output.txt")
if os.path.exists(filename):
os.remove(filename)
# Capture output to a file so that we can compare it
with Tee(filename):
c1 = MyClass()
print "Running with default verbosity"
c1.run()
print
print "Running with verbosity 2"
c1.consolePrinterVerbosity = 2
c1.run()
print
print "Running with verbosity 0"
c1.consolePrinterVerbosity = 0
c1.run()
print
c1.cPrint(0, "Message %s two %s", "with", "args")
c1.cPrint(0, "Message with no newline", newline=False)
c1.cPrint(0, " Message with newline")
c1.cPrint(0, "Message with %s and %s",
"no newline", "args", newline=False)
c1.cPrint(0, " Message with %s and %s", "newline", "args")
print "Done"
with self.assertRaises(KeyError):
c1.cPrint(0, "Message", badkw="badvalue")
referenceFilename = os.path.join(mydir, "consoleprinter_output.txt")
expected = open(referenceFilename).readlines()
actual = open(filename).readlines()
print ("Comparing files '%s'" % referenceFilename)
print ("and '%s'" % filename)
self.assertEqual(len(expected), len(actual))
for i in xrange(len(expected)):
self.assertEqual(expected[i].strip(), actual[i].strip())
# Clean up
os.remove(filename)
if __name__ == "__main__":
unittest.main()
| 2,866 | Python | .py | 71 | 35.788732 | 72 | 0.661121 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,120 | previous_value_model_test.py | numenta_nupic-legacy/tests/unit/nupic/frameworks/opf/previous_value_model_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
## @file
This file tests the operation of the Previous Value Model.
"""
import unittest2 as unittest
from nupic.data import dict_utils
from nupic.frameworks.opf import opf_utils, previous_value_model
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.frameworks.opf.previous_value_model_capnp import (
PreviousValueModelProto)
SEQUENCE_LENGTH = 100
def _generateIncreasing():
return [i for i in range(SEQUENCE_LENGTH)]
def _generateDecreasing():
return [SEQUENCE_LENGTH - i for i in range(SEQUENCE_LENGTH)]
def _generateSaw():
return [i % 3 for i in range(SEQUENCE_LENGTH)]
class PreviousValueModelTest(unittest.TestCase):
"""Unit test for the Previous Value Model."""
def _runNextStep(self, data):
model = previous_value_model.PreviousValueModel(
opf_utils.InferenceType.TemporalNextStep, predictedField ='a')
inputRecords = (dict_utils.DictObj({'a' : d}) for d in data)
for i, (inputRecord, expectedInference) in enumerate(zip(inputRecords,
data)):
results = model.run(inputRecord)
self.assertEqual(results.predictionNumber, i)
self.assertEqual(results.inferences[
opf_utils.InferenceElement.prediction], expectedInference)
self.assertEqual(results.inferences[
opf_utils.InferenceElement.multiStepBestPredictions][1],
expectedInference)
def _runMultiStep(self, data):
model = previous_value_model.PreviousValueModel(
opf_utils.InferenceType.TemporalMultiStep, predictedField ='a',
predictionSteps = [1, 3, 5])
inputRecords = (dict_utils.DictObj({'a' : d}) for d in data)
for i, (inputRecord, expectedInference) in enumerate(zip(inputRecords,
data)):
results = model.run(inputRecord)
self.assertEqual(results.predictionNumber, i)
self.assertEqual(results.inferences[
opf_utils.InferenceElement.prediction], expectedInference)
self.assertEqual(results.inferences[
opf_utils.InferenceElement.multiStepBestPredictions][1],
expectedInference)
self.assertEqual(results.inferences[
opf_utils.InferenceElement.multiStepBestPredictions][3],
expectedInference)
self.assertEqual(results.inferences[
opf_utils.InferenceElement.multiStepBestPredictions][5],
expectedInference)
def testNextStepIncreasing(self):
self._runNextStep(_generateIncreasing())
def testNextStepDecreasing(self):
self._runNextStep(_generateDecreasing())
def testNextStepSaw(self):
self._runNextStep(_generateSaw())
def testMultiStepIncreasing(self):
self._runMultiStep(_generateIncreasing())
def testMultiStepDecreasing(self):
self._runMultiStep(_generateDecreasing())
def testMultiStepSaw(self):
self._runMultiStep(_generateSaw())
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testCapnpWriteRead(self):
m1 = previous_value_model.PreviousValueModel(
opf_utils.InferenceType.TemporalMultiStep, predictedField ='a',
predictionSteps = [1, 3, 5])
m1.run(dict_utils.DictObj({'a' : 0}))
# Serialize
builderProto = PreviousValueModelProto.new_message()
m1.write(builderProto)
# Construct reader from populated builder
readerProto = PreviousValueModelProto.from_bytes(builderProto.to_bytes())
# Deserialize
m2 = previous_value_model.PreviousValueModel.read(readerProto)
self.assertIs(m1.getSchema(), PreviousValueModelProto)
self.assertIs(m2.getSchema(), PreviousValueModelProto)
self.assertEqual(m2._numPredictions, m1._numPredictions)
self.assertEqual(m2.getInferenceType(), m1.getInferenceType())
self.assertEqual(m2.isLearningEnabled(), m1.isLearningEnabled())
self.assertEqual(m2.isInferenceEnabled(), m1.isInferenceEnabled())
self.assertEqual(m2.getInferenceArgs(), m1.getInferenceArgs())
self.assertEqual(m2._predictedField, m1._predictedField)
self.assertEqual(m2._fieldNames, m1._fieldNames)
self.assertEqual(m2._fieldTypes, m1._fieldTypes)
self.assertEqual(m2._predictionSteps, m1._predictionSteps)
# Run computes on m1 & m2 and compare results
r1 = m1.run(dict_utils.DictObj({'a' : 1}))
r2 = m2.run(dict_utils.DictObj({'a' : 1}))
self.assertEqual(r2.predictionNumber, r1.predictionNumber)
self.assertEqual(r2.rawInput, r1.rawInput)
self.assertEqual(r2.predictionNumber, r1.predictionNumber)
self.assertEqual(r2.inferences[opf_utils.InferenceElement.prediction],
r1.inferences[opf_utils.InferenceElement.prediction])
self.assertEqual(
r2.inferences[opf_utils.InferenceElement.multiStepBestPredictions][1],
r1.inferences[opf_utils.InferenceElement.multiStepBestPredictions][1])
self.assertEqual(
r2.inferences[opf_utils.InferenceElement.multiStepBestPredictions][3],
r1.inferences[opf_utils.InferenceElement.multiStepBestPredictions][3])
self.assertEqual(
r2.inferences[opf_utils.InferenceElement.multiStepBestPredictions][5],
r1.inferences[opf_utils.InferenceElement.multiStepBestPredictions][5])
if __name__ == '__main__':
unittest.main()
| 6,324 | Python | .py | 132 | 41.80303 | 77 | 0.718521 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,121 | htmpredictionmodel_test.py | numenta_nupic-legacy/tests/unit/nupic/frameworks/opf/htmpredictionmodel_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for the htm_prediction_model module."""
import datetime
import unittest2 as unittest
from nupic.frameworks.opf.htm_prediction_model import HTMPredictionModel
from nupic.frameworks.opf.model_factory import ModelFactory
from nupic.frameworks.opf.opf_utils import ModelResult
class HTMPredictionModelTest(unittest.TestCase):
"""HTMPredictionModel unit tests."""
def testRemoveUnlikelyPredictionsEmpty(self):
result = HTMPredictionModel._removeUnlikelyPredictions({}, 0.01, 3)
self.assertDictEqual(result, {})
def testRemoveUnlikelyPredictionsSingleValues(self):
result = HTMPredictionModel._removeUnlikelyPredictions({1: 0.1}, 0.01, 3)
self.assertDictEqual(result, {1: 0.1})
result = HTMPredictionModel._removeUnlikelyPredictions({1: 0.001}, 0.01, 3)
self.assertDictEqual(result, {1: 0.001})
def testRemoveUnlikelyPredictionsLikelihoodThresholds(self):
result = HTMPredictionModel._removeUnlikelyPredictions({1: 0.1, 2: 0.001}, 0.01, 3)
self.assertDictEqual(result, {1: 0.1})
result = HTMPredictionModel._removeUnlikelyPredictions({1: 0.001, 2: 0.002}, 0.01, 3)
self.assertDictEqual(result, {2: 0.002})
result = HTMPredictionModel._removeUnlikelyPredictions({1: 0.002, 2: 0.001}, 0.01, 3)
self.assertDictEqual(result, {1: 0.002})
def testRemoveUnlikelyPredictionsMaxPredictions(self):
result = HTMPredictionModel._removeUnlikelyPredictions({1: 0.1, 2: 0.2, 3: 0.3},
0.01, 3)
self.assertDictEqual(result, {1: 0.1, 2: 0.2, 3: 0.3})
result = HTMPredictionModel._removeUnlikelyPredictions(
{1: 0.1, 2: 0.2, 3: 0.3, 4: 0.4}, 0.01, 3)
self.assertDictEqual(result, {2: 0.2, 3: 0.3, 4: 0.4})
def testRemoveUnlikelyPredictionsComplex(self):
result = HTMPredictionModel._removeUnlikelyPredictions(
{1: 0.1, 2: 0.2, 3: 0.3, 4: 0.004}, 0.01, 3)
self.assertDictEqual(result, {1: 0.1, 2: 0.2, 3: 0.3})
result = HTMPredictionModel._removeUnlikelyPredictions(
{1: 0.1, 2: 0.2, 3: 0.3, 4: 0.4, 5: 0.005}, 0.01, 3)
self.assertDictEqual(result, {2: 0.2, 3: 0.3, 4: 0.4})
result = HTMPredictionModel._removeUnlikelyPredictions(
{1: 0.1, 2: 0.2, 3: 0.3, 4: 0.004, 5: 0.005}, 0.01, 3)
self.assertDictEqual(result, {1: 0.1, 2: 0.2, 3: 0.3})
def testTemporalAnomalyModelFactory(self):
""" Simple test to assert that ModelFactory.create() with a given specific
Temporal Anomaly configuration will return a model that can return
inferences
"""
modelConfig = (
{u'aggregationInfo': {u'days': 0,
u'fields': [],
u'hours': 0,
u'microseconds': 0,
u'milliseconds': 0,
u'minutes': 0,
u'months': 0,
u'seconds': 0,
u'weeks': 0,
u'years': 0},
u'model': u'HTMPrediction',
u'modelParams': {u'anomalyParams': {u'anomalyCacheRecords': None,
u'autoDetectThreshold': None,
u'autoDetectWaitRecords': 5030},
u'clEnable': False,
u'clParams': {u'alpha': 0.035828933612158,
u'verbosity': 0,
u'regionName': u'SDRClassifierRegion',
u'steps': u'1'},
u'inferenceType': u'TemporalAnomaly',
u'sensorParams': {u'encoders': {u'c0_dayOfWeek': None,
u'c0_timeOfDay': {u'fieldname': u'c0',
u'name': u'c0',
u'timeOfDay': [21,
9.49122334747737],
u'type': u'DateEncoder'},
u'c0_weekend': None,
u'c1': {u'fieldname': u'c1',
u'name': u'c1',
u'resolution': 0.8771929824561403,
u'seed': 42,
u'type': u'RandomDistributedScalarEncoder'}},
u'sensorAutoReset': None,
u'verbosity': 0},
u'spEnable': True,
u'spParams': {u'potentialPct': 0.8,
u'columnCount': 2048,
u'globalInhibition': 1,
u'inputWidth': 0,
u'boostStrength': 0.0,
u'numActiveColumnsPerInhArea': 40,
u'seed': 1956,
u'spVerbosity': 0,
u'spatialImp': u'cpp',
u'synPermActiveInc': 0.0015,
u'synPermConnected': 0.1,
u'synPermInactiveDec': 0.0005,
},
u'tmEnable': True,
u'tmParams': {u'activationThreshold': 13,
u'cellsPerColumn': 32,
u'columnCount': 2048,
u'globalDecay': 0.0,
u'initialPerm': 0.21,
u'inputWidth': 2048,
u'maxAge': 0,
u'maxSegmentsPerCell': 128,
u'maxSynapsesPerSegment': 32,
u'minThreshold': 10,
u'newSynapseCount': 20,
u'outputType': u'normal',
u'pamLength': 3,
u'permanenceDec': 0.1,
u'permanenceInc': 0.1,
u'seed': 1960,
u'temporalImp': u'cpp',
u'verbosity': 0},
u'trainSPNetOnlyIfRequested': False},
u'predictAheadTime': None,
u'version': 1}
)
inferenceArgs = {u'inputPredictedField': u'auto',
u'predictedField': u'c1',
u'predictionSteps': [1]}
data = [
{'_category': [None],
'_reset': 0,
'_sequenceId': 0,
'_timestamp': datetime.datetime(2013, 12, 5, 0, 0),
'_timestampRecordIdx': None,
u'c0': datetime.datetime(2013, 12, 5, 0, 0),
u'c1': 5.0},
{'_category': [None],
'_reset': 0,
'_sequenceId': 0,
'_timestamp': datetime.datetime(2013, 12, 6, 0, 0),
'_timestampRecordIdx': None,
u'c0': datetime.datetime(2013, 12, 6, 0, 0),
u'c1': 6.0},
{'_category': [None],
'_reset': 0,
'_sequenceId': 0,
'_timestamp': datetime.datetime(2013, 12, 7, 0, 0),
'_timestampRecordIdx': None,
u'c0': datetime.datetime(2013, 12, 7, 0, 0),
u'c1': 7.0}
]
model = ModelFactory.create(modelConfig=modelConfig)
model.enableLearning()
model.enableInference(inferenceArgs)
for row in data:
result = model.run(row)
self.assertIsInstance(result, ModelResult)
if __name__ == "__main__":
unittest.main()
| 9,060 | Python | .py | 171 | 33.900585 | 109 | 0.47445 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,122 | opf_metrics_test.py | numenta_nupic-legacy/tests/unit/nupic/frameworks/opf/opf_metrics_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy as np
import unittest2 as unittest
from nupic.frameworks.opf.metrics import getModule, MetricSpec, MetricMulti
class OPFMetricsTest(unittest.TestCase):
DELTA = 0.01
VERBOSITY = 0
def testRMSE(self):
rmse = getModule(MetricSpec("rmse", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
gt = [9, 4, 5, 6]
p = [0, 13, 8, 3]
for i in xrange(len(gt)):
rmse.addInstance(gt[i], p[i])
target = 6.71
self.assertTrue(abs(rmse.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testNRMSE(self):
nrmse = getModule(MetricSpec("nrmse", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
gt = [9, 4, 5, 6]
p = [0, 13, 8, 3]
for i in xrange(len(gt)):
nrmse.addInstance(gt[i], p[i])
target = 3.5856858280031814
self.assertAlmostEqual(nrmse.getMetric()["value"], target)
def testWindowedRMSE(self):
wrmse = getModule(MetricSpec("rmse", None, None,
{"verbosity": OPFMetricsTest.VERBOSITY, "window":3}))
gt = [9, 4, 4, 100, 44]
p = [0, 13, 4, 6, 7]
for gv, pv in zip(gt, p):
wrmse.addInstance(gv, pv)
target = 58.324
self.assertTrue (abs(wrmse.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testAAE(self):
aae = getModule(MetricSpec("aae", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
gt = [9, 4, 5, 6]
p = [0, 13, 8, 3]
for i in xrange(len(gt)):
aae.addInstance(gt[i], p[i])
target = 6.0
self.assertTrue(abs(aae.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testTrivialAAE(self):
trivialaae = getModule(MetricSpec("trivial", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY,"errorMetric":"aae"}))
gt = [i/4+1 for i in range(100)]
p = [i for i in range(100)]
for i in xrange(len(gt)):
trivialaae.addInstance(gt[i], p[i])
target = .25
self.assertTrue(abs(trivialaae.getMetric()["value"]-target) \
< OPFMetricsTest.DELTA)
def testTrivialAccuracy(self):
trivialaccuracy = getModule(MetricSpec("trivial", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY,"errorMetric":"acc"}))
gt = [str(i/4+1) for i in range(100)]
p = [str(i) for i in range(100)]
for i in xrange(len(gt)):
trivialaccuracy.addInstance(gt[i], p[i])
target = .75
self.assertTrue(abs(trivialaccuracy.getMetric()["value"]-target) \
< OPFMetricsTest.DELTA)
def testWindowedTrivialAAE (self):
"""Trivial Average Error metric test"""
trivialAveErr = getModule(MetricSpec("trivial", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY,"errorMetric":"avg_err"}))
gt = [str(i/4+1) for i in range(100)]
p = [str(i) for i in range(100)]
for i in xrange(len(gt)):
trivialAveErr.addInstance(gt[i], p[i])
target = .25
self.assertTrue(abs(trivialAveErr.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testWindowedTrivialAccuract(self):
"""Trivial AAE metric test"""
trivialaae = getModule(MetricSpec("trivial", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100,"errorMetric":"aae"}))
gt = [i/4+1 for i in range(1000)]
p = [i for i in range(1000)]
for i in xrange(len(gt)):
trivialaae.addInstance(gt[i], p[i])
target = .25
self.assertTrue(abs(trivialaae.getMetric()["value"]-target) \
< OPFMetricsTest.DELTA)
def testWindowedTrivialAccuracy(self):
"""Trivial Accuracy metric test"""
trivialaccuracy = getModule(MetricSpec("trivial", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100,"errorMetric":"acc"}))
gt = [str(i/4+1) for i in range(1000)]
p = [str(i) for i in range(1000)]
for i in xrange(len(gt)):
trivialaccuracy.addInstance(gt[i], p[i])
target = .75
self.assertTrue(abs(trivialaccuracy.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testWindowedTrivialAverageError (self):
"""Trivial Average Error metric test"""
trivialAveErr = getModule(MetricSpec("trivial", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100,"errorMetric":"avg_err"}))
gt = [str(i/4+1) for i in range(500, 1000)]
p = [str(i) for i in range(1000)]
for i in xrange(len(gt)):
trivialAveErr.addInstance(gt[i], p[i])
target = .25
self.assertTrue(abs(trivialAveErr.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testMultistepAAE(self):
"""Multistep AAE metric test"""
msp = getModule(MetricSpec("multiStep", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "errorMetric":"aae",
"steps": 3}))
# Make each ground truth 1 greater than the prediction
gt = [i+1 for i in range(100)]
p = [{3: {i: .7, 5: 0.3}} for i in range(100)]
for i in xrange(len(gt)):
msp.addInstance(gt[i], p[i])
target = 1
self.assertTrue(abs(msp.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testMultistepAAEMultipleSteps(self):
"""Multistep AAE metric test, predicting 2 different step sizes"""
msp = getModule(MetricSpec("multiStep", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "errorMetric":"aae",
"steps": [3,6]}))
# Make each 3 step prediction +1 over ground truth and each 6 step
# prediction +0.5 over ground truth
gt = [i for i in range(100)]
p = [{3: {i+1: .7, 5: 0.3},
6: {i+0.5: .7, 5: 0.3}} for i in range(100)]
for i in xrange(len(gt)):
msp.addInstance(gt[i], p[i])
target = 0.75 # average of +1 error and 0.5 error
self.assertTrue(abs(msp.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testMultistepProbability(self):
"""Multistep with probabilities metric test"""
msp = getModule(MetricSpec("multiStepProbability", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "errorMetric":"aae",
"steps":3}))
gt = [5 for i in range(1000)]
p = [{3: {i: .3, 5: .7}} for i in range(1000)]
for i in xrange(len(gt)):
msp.addInstance(gt[i], p[i])
#((999-5)(1000-5)/2-(899-5)(900-5)/2)*.3/100
target = 283.35
self.assertTrue(abs(msp.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testMultistepProbabilityMultipleSteps(self):
"""Multistep with probabilities metric test, predicting 2 different step
sizes"""
msp = getModule(MetricSpec("multiStepProbability", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100,
"errorMetric":"aae", "steps": [1,3]}))
gt = [5 for i in range(1000)]
p = [{3: {i: .3, 5: .7},
1: {5: 1.0}} for i in range(1000)]
for i in xrange(len(gt)):
msp.addInstance(gt[i], p[i])
#(((999-5)(1000-5)/2-(899-5)(900-5)/2)*.3/100) / 2
# / 2 because the 1-step prediction is 100% accurate
target = 283.35/2
self.assertTrue(abs(msp.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testMovingMeanAbsoluteError(self):
"""Moving mean Average Absolute Error metric test"""
movingMeanAAE = getModule(MetricSpec("moving_mean", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "mean_window":3,
"errorMetric":"aae"}))
gt = [i for i in range(890)]
gt.extend([2*i for i in range(110)])
p = [i for i in range(1000)]
res = []
for i in xrange(len(gt)):
movingMeanAAE.addInstance(gt[i], p[i])
res.append(movingMeanAAE.getMetric()["value"])
self.assertTrue(max(res[1:890]) == 2.0)
self.assertTrue(min(res[891:])>=4.0)
target = 4.0
self.assertTrue(abs(movingMeanAAE.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testMovingMeanRMSE(self):
"""Moving mean RMSE metric test"""
movingMeanRMSE = getModule(MetricSpec("moving_mean", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "mean_window":3,
"errorMetric":"rmse"}))
gt = [i for i in range(890)]
gt.extend([2*i for i in range(110)])
p = [i for i in range(1000)]
res = []
for i in xrange(len(gt)):
movingMeanRMSE.addInstance(gt[i], p[i])
res.append(movingMeanRMSE.getMetric()["value"])
self.assertTrue(max(res[1:890]) == 2.0)
self.assertTrue(min(res[891:])>=4.0)
target = 4.0
self.assertTrue(abs(movingMeanRMSE.getMetric()["value"]-target) \
< OPFMetricsTest.DELTA)
def testMovingModeAverageError(self):
"""Moving mode Average Error metric test"""
movingModeAvgErr = getModule(MetricSpec("moving_mode", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "mode_window":3,
"errorMetric":"avg_err"}))
#Should initially assymptote to .5
#Then after 900 should go to 1.0 as the predictions will always be offset
gt = [i/4 for i in range(900)]
gt.extend([2*i/4 for i in range(100)])
p = [i for i in range(1000)]
res = []
for i in xrange(len(gt)):
movingModeAvgErr.addInstance(gt[i], p[i])
res.append(movingModeAvgErr.getMetric()["value"])
#Make sure that there is no point where the average error is >.5
self.assertTrue(max(res[1:890]) == .5)
#Make sure that after the statistics switch the error goes to 1.0
self.assertTrue(min(res[891:])>=.5)
#Make sure that the statistics change is still noticeable while it is
#in the window
self.assertTrue(res[998]<1.0)
target = 1.0
self.assertTrue(abs(movingModeAvgErr.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testMovingModeAccuracy(self):
"""Moving mode Accuracy metric test"""
movingModeACC = getModule(MetricSpec("moving_mode", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "mode_window":3,
"errorMetric":"acc"}))
#Should initially asymptote to .5
#Then after 900 should go to 0.0 as the predictions will always be offset
gt = [i/4 for i in range(900)]
gt.extend([2*i/4 for i in range(100)])
p = [i for i in range(1000)]
res = []
for i in xrange(len(gt)):
movingModeACC.addInstance(gt[i], p[i])
res.append(movingModeACC.getMetric()["value"])
#Make sure that there is no point where the average acc is <.5
self.assertTrue(min(res[1:899]) == .5)
#Make sure that after the statistics switch the acc goes to 0.0
self.assertTrue(max(res[900:])<=.5)
#Make sure that the statistics change is still noticeable while it
#is in the window
self.assertTrue(res[998]>0.0)
target = 0.0
self.assertTrue(abs(movingModeACC.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testTwoGramScalars(self):
"""Two gram scalars test"""
oneGram = getModule(MetricSpec("two_gram", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, \
"window":100, "predictionField":"test",
"errorMetric":"acc"}))
# Sequences of 0,1,2,3,4,0,1,2,3,4,...
encodings = [np.zeros(10) for i in range(5)]
for i in range(len(encodings)):
encoding = encodings[i]
encoding[i] = 1
gt = [i%5 for i in range(1000)]
res = []
for i in xrange(len(gt)):
if i == 20:
# Make sure we don"t barf with missing values
oneGram.addInstance(np.zeros(10), prediction=None,
record={"test":None})
else:
# Feed in next groundTruth
oneGram.addInstance(encodings[i%5], prediction=None,
record={"test":gt[i]})
res.append(oneGram.getMetric()["value"])
target = 1.0
self.assertTrue(abs(oneGram.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testTwoGramScalarsStepsGreaterOne(self):
"""Two gram scalars test with step size other than 1"""
oneGram = getModule(MetricSpec("two_gram", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY,\
"window":100, "predictionField":"test",
"errorMetric":"acc", "steps": 2}))
# Sequences of 0,1,2,3,4,0,1,2,3,4,...
encodings = [np.zeros(10) for i in range(5)]
for i in range(len(encodings)):
encoding = encodings[i]
encoding[i] = 1
gt = [i%5 for i in range(1000)]
res = []
for i in xrange(len(gt)):
if i == 20:
# Make sure we don"t barf with missing values
oneGram.addInstance(np.zeros(10), prediction=None,
record={"test":None})
else:
# Feed in next groundTruth
oneGram.addInstance(encodings[i%5], prediction=None,
record={"test":gt[i]})
res.append(oneGram.getMetric()["value"])
target = 1.0
self.assertTrue(abs(oneGram.getMetric()["value"]-target) \
< OPFMetricsTest.DELTA)
def testTwoGramStrings(self):
"""One gram string test"""
oneGram = getModule(MetricSpec("two_gram", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "errorMetric":"acc",
"predictionField":"test"}))
# Sequences of "0", "1", "2", "3", "4", "0", "1", ...
gt = [str(i%5) for i in range(1000)]
encodings = [np.zeros(10) for i in range(5)]
for i in range(len(encodings)):
encoding = encodings[i]
encoding[i] = 1
# Make every 5th element random
newElem = 100
for i in range(5, 1000, 5):
gt[i] = str(newElem)
newElem += 20
res = []
for i in xrange(len(gt)):
if i==20:
# Make sure we don"t barf with missing values
oneGram.addInstance(np.zeros(10), prediction=None,
record={"test":None})
else:
oneGram.addInstance(encodings[i%5], prediction=None,
record={"test":gt[i]})
res.append(oneGram.getMetric()["value"])
target = .8
self.assertTrue(abs(oneGram.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testWindowedAAE(self):
"""Windowed AAE"""
waae = getModule(MetricSpec("aae", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":1}))
gt = [9, 4, 5, 6]
p = [0, 13, 8, 3]
for i in xrange(len(gt)):
waae.addInstance(gt[i], p[i])
target = 3.0
self.assertTrue( abs(waae.getMetric()["value"]-target) \
< OPFMetricsTest.DELTA, "Got %s" %waae.getMetric())
def testAccuracy(self):
"""Accuracy"""
acc = getModule(MetricSpec("acc", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
gt = [0, 1, 2, 3, 4, 5]
p = [0, 1, 2, 4, 5, 6]
for i in xrange(len(gt)):
acc.addInstance(gt[i], p[i])
target = 0.5
self.assertTrue(abs(acc.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testWindowedAccuracy(self):
"""Windowed accuracy"""
acc = getModule(MetricSpec("acc", None, None, \
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":2}))
gt = [0, 1, 2, 3, 4, 5]
p = [0, 1, 2, 4, 5, 6]
for i in xrange(len(gt)):
acc.addInstance(gt[i], p[i])
target = 0.0
self.assertTrue(abs(acc.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testAverageError(self):
"""Ave Error"""
err = getModule(MetricSpec("avg_err", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
gt = [1, 1, 2, 3, 4, 5]
p = [0, 1, 2, 4, 5, 6]
for i in xrange(len(gt)):
err.addInstance(gt[i], p[i])
target = (2.0/3.0)
self.assertTrue(abs(err.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testWindowedAverageError(self):
"""Windowed Ave Error"""
err = getModule(MetricSpec("avg_err", None, None, \
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":2}))
gt = [0, 1, 2, 3, 4, 5]
p = [0, 1, 2, 4, 5, 6]
for i in xrange(len(gt)):
err.addInstance(gt[i], p[i])
target = 1.0
self.assertTrue(abs(err.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testLongWindowRMSE(self):
"""RMSE"""
rmse = getModule(MetricSpec("rmse", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100}))
gt = [9, 4, 5, 6]
p = [0, 13, 8, 3]
for i in xrange(len(gt)):
rmse.addInstance(gt[i], p[i])
target = 6.71
self.assertTrue(abs(rmse.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testNegativeLogLikelihood(self):
# make sure negativeLogLikelihood returns correct LL numbers
# mock objects for ClassifierInput and ModelResult (see opf_utils.py)
class MockClassifierInput(object):
def __init__(self, bucketIdx):
self.bucketIndex = bucketIdx
class MockModelResult(object):
def __init__(self, bucketll, bucketIdx):
self.inferences = {'multiStepBucketLikelihoods': {1: bucketll}}
self.classifierInput = MockClassifierInput(bucketIdx)
bucketLL = {0: 1.0, 1: 0, 2: 0, 3: 0} # model prediction as a dictionary
gt_bucketIdx = 0 # bucket index for ground truth
negLL = getModule(MetricSpec("negativeLogLikelihood", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
negLL.addInstance(0, 0, record = None,
result=MockModelResult(bucketLL, gt_bucketIdx))
target = 0.0 # -log(1.0)
self.assertAlmostEqual(negLL.getMetric()["value"], target)
bucketLL = {0: 0.5, 1: 0.5, 2: 0, 3: 0} # model prediction as a dictionary
gt_bucketIdx = 0 # bucket index for ground truth
negLL = getModule(MetricSpec("negativeLogLikelihood", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
negLL.addInstance(0, 0, record = None,
result=MockModelResult(bucketLL, gt_bucketIdx))
target = 0.6931471 # -log(0.5)
self.assertTrue(abs(negLL.getMetric()["value"]-target)
< OPFMetricsTest.DELTA)
# test accumulated negLL for multiple steps
bucketLL = []
bucketLL.append({0: 1, 1: 0, 2: 0, 3: 0})
bucketLL.append({0: 0, 1: 1, 2: 0, 3: 0})
bucketLL.append({0: 0, 1: 0, 2: 1, 3: 0})
bucketLL.append({0: 0, 1: 0, 2: 0, 3: 1})
gt_bucketIdx = [0, 2, 1, 3]
negLL = getModule(MetricSpec("negativeLogLikelihood", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
for i in xrange(len(bucketLL)):
negLL.addInstance(0, 0, record = None,
result=MockModelResult(bucketLL[i], gt_bucketIdx[i]))
target = 5.756462
self.assertTrue(abs(negLL.getMetric()["value"]-target)
< OPFMetricsTest.DELTA)
def testNegLLMultiplePrediction(self):
# In cases where the ground truth has multiple possible outcomes, make sure
# that the prediction that captures ground truth distribution has best LL
# and models that gives single prediction (either most likely outcome or
# average outcome) has worse LL
# mock objects for ClassifierInput and ModelResult (see opf_utils.py)
class MockClassifierInput(object):
def __init__(self, bucketIdx):
self.bucketIndex = bucketIdx
class MockModelResult(object):
def __init__(self, bucketll, bucketIdx):
self.inferences = {'multiStepBucketLikelihoods': {1: bucketll}}
self.classifierInput = MockClassifierInput(bucketIdx)
# the ground truth lies in bucket 0 with p=0.45, in bucket 1 with p=0.0
# and in bucket 2 with p=0.55
gt_bucketIdx = [0]*45+[2]*55
# compare neg log-likelihood for three type of model predictions
# a model that predicts ground truth distribution
prediction_gt = {0: 0.45, 1: 0, 2: 0.55}
# a model that predicts only the most likely outcome
prediction_ml = {0: 0.0, 1: 0, 2: 1.0}
# a model that only predicts mean (bucket 1)
prediction_mean = {0: 0, 1: 1, 2: 0}
negLL_gt = getModule(MetricSpec("negativeLogLikelihood", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
negLL_ml = getModule(MetricSpec("negativeLogLikelihood", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
negLL_mean = getModule(MetricSpec("negativeLogLikelihood", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
for i in xrange(len(gt_bucketIdx)):
negLL_gt.addInstance(0, 0, record = None,
result=MockModelResult(prediction_gt, gt_bucketIdx[i]))
negLL_ml.addInstance(0, 0, record = None,
result=MockModelResult(prediction_ml, gt_bucketIdx[i]))
negLL_mean.addInstance(0, 0, record = None,
result=MockModelResult(prediction_mean, gt_bucketIdx[i]))
self.assertTrue(negLL_gt.getMetric()["value"] < negLL_ml.getMetric()["value"])
self.assertTrue(negLL_gt.getMetric()["value"] < negLL_mean.getMetric()["value"])
def testCustomErrorMetric(self):
customFunc = """def getError(pred,ground,tools):
return abs(pred-ground)"""
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc, "errorWindow":3}))
gt = [9, 4, 5, 6]
p = [0, 13, 8, 3]
for i in xrange(len(gt)):
aggErr = customEM.addInstance(gt[i], p[i])
target = 5.0
delta = 0.001
# insure that addInstance returns the aggregate error - other
# uber metrics depend on this behavior.
self.assertEqual(aggErr, customEM.getMetric()["value"])
self.assertTrue(abs(customEM.getMetric()["value"]-target) < delta)
customFunc = """def getError(pred,ground,tools):
sum = 0
for i in range(min(3,tools.getBufferLen())):
sum+=abs(tools.getPrediction(i)-tools.getGroundTruth(i))
return sum/3"""
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc}))
gt = [9, 4, 5, 6]
p = [0, 13, 8, 3]
for i in xrange(len(gt)):
customEM.addInstance(gt[i], p[i])
target = 5.0
delta = 0.001
self.assertTrue(abs(customEM.getMetric()["value"]-target) < delta)
# Test custom error metric helper functions
# Test getPrediction
# Not-Windowed
storeWindow=4
failed = False
for lookBack in range(3):
customFunc = """def getError(pred,ground,tools):
return tools.getPrediction(%d)""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue( not failed,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue( customEM.getMetric()["value"] == p[i-lookBack])
#Windowed
for lookBack in range(5):
customFunc = """def getError(pred,ground,tools):
return tools.getPrediction(%d)""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"storeWindow":storeWindow}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if lookBack>=storeWindow-1:
pass
if i < lookBack or lookBack>=storeWindow:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue (not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == p[i-lookBack])
#Test getGroundTruth
#Not-Windowed
for lookBack in range(3):
customFunc = """def getError(pred,ground,tools):
return tools.getGroundTruth(%d)""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue( not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == gt[i-lookBack])
#Windowed
for lookBack in range(5):
customFunc = """def getError(pred,ground,tools):
return tools.getGroundTruth(%d)""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"storeWindow":storeWindow}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack or lookBack>=storeWindow:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue( not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue( customEM.getMetric()["value"] == gt[i-lookBack])
#Test getFieldValue
#Not-Windowed Scalar
for lookBack in range(3):
customFunc = """def getError(pred,ground,tools):
return tools.getFieldValue(%d,"test1")""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue( not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == t1[i-lookBack])
#Windowed Scalar
for lookBack in range(3):
customFunc = """def getError(pred,ground,tools):
return tools.getFieldValue(%d,"test1")""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"storeWindow":storeWindow}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack or lookBack>=storeWindow:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue (not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue( customEM.getMetric()["value"] == t1[i-lookBack])
#Not-Windowed category
for lookBack in range(3):
customFunc = """def getError(pred,ground,tools):
return tools.getFieldValue(%d,"test1")""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue( not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == t1[i-lookBack])
#Windowed category
for lookBack in range(3):
customFunc = """def getError(pred,ground,tools):
return tools.getFieldValue(%d,"test1")""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"storeWindow":storeWindow}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack or lookBack>=storeWindow:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue (not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == t1[i-lookBack])
#Test getBufferLen
#Not-Windowed
customFunc = """def getError(pred,ground,tools):
return tools.getBufferLen()"""
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == i+1)
#Windowed
customFunc = """def getError(pred,ground,tools):
return tools.getBufferLen()"""
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"storeWindow":storeWindow}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == min(i+1, 4))
#Test initialization edge cases
try:
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"errorWindow":0}))
self.assertTrue (False , "error Window of 0 should fail self.assertTrue")
except:
pass
try:
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"storeWindow":0}))
self.assertTrue (False , "error Window of 0 should fail self.assertTrue")
except:
pass
def testMultiMetric(self):
ms1 = MetricSpec(field='a', metric='trivial', inferenceElement='prediction', params={'errorMetric': 'aae', 'window': 1000, 'steps': 1})
ms2 = MetricSpec(metric='trivial', inferenceElement='prediction', field='a', params={'window': 10, 'steps': 1, 'errorMetric': 'rmse'})
metric1000 = getModule(ms1)
metric10 = getModule(ms2)
# create multi metric
multi = MetricMulti(weights=[0.2, 0.8], metrics=[metric10, metric1000])
multi.verbosity = 1
# create reference metrics (must be diff from metrics above used in MultiMetric, as they keep history)
metric1000ref = getModule(ms1)
metric10ref = getModule(ms2)
gt = range(500, 1000)
p = range(500)
for i in xrange(len(gt)):
v10=metric10ref.addInstance(gt[i], p[i])
v1000=metric1000ref.addInstance(gt[i], p[i])
if v10 is None or v1000 is None:
check=None
else:
check=0.2*float(v10) + 0.8*float(v1000)
metricValue = multi.addInstance(gt[i], p[i])
self.assertEqual(check, metricValue, "iter i= %s gt=%s pred=%s multi=%s sub1=%s sub2=%s" % (i, gt[i], p[i], metricValue, v10, v1000))
if __name__ == "__main__":
unittest.main()
| 33,221 | Python | .py | 763 | 37.030144 | 140 | 0.634603 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,123 | htmpredictionmodel_classifier_helper_test.py | numenta_nupic-legacy/tests/unit/nupic/frameworks/opf/htmpredictionmodel_classifier_helper_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for the htm_prediction_model module."""
import sys
import copy
from datetime import datetime
import numpy
from mock import Mock, patch, ANY, call
from nupic.support.unittesthelpers.testcasebase import (unittest,
TestOptionParser)
from nupic.frameworks.opf.htm_prediction_model import HTMPredictionModel
from nupic.frameworks.opf.htm_prediction_model_classifier_helper import \
HTMPredictionModelClassifierHelper, _CLAClassificationRecord, Configuration
from nupic.frameworks.opf.opf_utils import InferenceType
from nupic.frameworks.opf.exceptions import HTMPredictionModelInvalidRangeError
experimentDesc = {
"inferenceType": InferenceType.TemporalAnomaly,
"environment": "nupic",
"inferenceArgs": {
"predictionSteps": [1],
"predictedField": "value1"
},
"streamDef": dict(
version = 1,
info = "checkpoint_test_dummy",
streams = [
dict(source="file://joined_mosman_2011.csv",
info="checkpoint_test_dummy",
columns=["*"],
),
],
),
"includedFields": [
{
"fieldName": "TimeStamp",
"fieldType": "datetime"
},
{
"fieldName": "value1",
"fieldType": "float"
},
{
"fieldName": "value2",
"fieldType": "string"
}
]
}
records= [
{"TimeStamp":datetime(year=2012, month=4, day=4, hour=1),
"value1": 8.3,
"value2": "BLUE"},
{"TimeStamp":datetime(year=2012, month=4, day=4, hour=2),
"value1": -8.3,
"value2": "GREEN"},
{"TimeStamp":datetime(year=2012, month=4, day=4, hour=3),
"value1": 1.3,
"value2": "RED"},
{"TimeStamp":datetime(year=2012, month=4, day=4, hour=4),
"value1": -0.9,
"value2": "GREEN"},
{"TimeStamp":datetime(year=2012, month=4, day=4, hour=5),
"value1": 4.2,
"value2": "GREEN"},
{"TimeStamp":datetime(year=2012, month=4, day=4, hour=6),
"value1": 100.1,
"value2": "BLUE"},
{"TimeStamp":datetime(year=2012, month=4, day=4, hour=7),
"value1": 8.3,
"value2": "BLUE"},
{"TimeStamp":datetime(year=2012, month=4, day=4, hour=8),
"value1": 8.3,
"value2": "GREEN"},
{"TimeStamp":datetime(year=2012, month=4, day=4, hour=9),
"value1": 8.3,
"value2": "GREEN"},
{"TimeStamp":datetime(year=2012, month=4, day=4, hour=10),
"value1": 8.3,
"value2": "GREEN"},
{"TimeStamp":datetime(year=2012, month=4, day=4, hour=10),
"value1": 8.3,
"value2": "GREEN"},
{"TimeStamp":datetime(year=2012, month=4, day=4, hour=10),
"value1": 8.3,
"value2": "GREEN"},
{"TimeStamp":datetime(year=2012, month=4, day=4, hour=10),
"value1": 8.3,
"value2": "GREEN"},
{"TimeStamp":datetime(year=2012, month=4, day=4, hour=10),
"value1": 8.3,
"value2": "BLUE"},
{"TimeStamp":datetime(year=2012, month=4, day=4, hour=10),
"value1": 8.3,
"value2": "GREEN"},
{"TimeStamp":datetime(year=2012, month=4, day=4, hour=10),
"value1": 8.3,
"value2": "GREEN"},
{"TimeStamp":datetime(year=2012, month=4, day=4, hour=10),
"value1": 8.3,
"value2": "GREEN"},
]
class SDRClassifierHelperTest(unittest.TestCase):
"""HTMPredictionModelClassifierHelper unit tests."""
def setUp(self):
self.helper = HTMPredictionModelClassifierHelper(Mock(spec=HTMPredictionModel))
@patch.object(Configuration, 'get')
@patch.object(HTMPredictionModelClassifierHelper, 'compute')
def testInit(self, compute, configurationGet):
anomalyParams = {
'autoDetectWaitRecords': 100,
'autoDetectThreshold': 101,
'anomalyCacheRecords': 102,
'anomalyVectorType': 'tpc'
}
conf = {
'nupic.model.temporalAnomaly.wait_records': 160,
'nupic.model.temporalAnomaly.auto_detect_threshold': 2.0,
'nupic.model.temporalAnomaly.window_length': 1111,
'nupic.model.temporalAnomaly.anomaly_vector': 'tpc',
}
configurationGet.side_effect = conf.get
helper = HTMPredictionModelClassifierHelper(Mock(spec=HTMPredictionModel), anomalyParams)
self.assertEqual(helper._autoDetectWaitRecords,
anomalyParams['autoDetectWaitRecords'])
self.assertEqual(helper._autoDetectThreshold,
anomalyParams['autoDetectThreshold'])
self.assertEqual(helper._history_length,
anomalyParams['anomalyCacheRecords'])
self.assertEqual(helper._vectorType,
anomalyParams['anomalyVectorType'])
helper = HTMPredictionModelClassifierHelper(Mock(spec=HTMPredictionModel), None)
self.assertEqual(helper._autoDetectWaitRecords,
conf['nupic.model.temporalAnomaly.wait_records'])
self.assertEqual(helper._autoDetectThreshold,
conf['nupic.model.temporalAnomaly.auto_detect_threshold'])
self.assertEqual(helper._history_length,
conf['nupic.model.temporalAnomaly.window_length'])
self.assertEqual(helper._vectorType,
conf['nupic.model.temporalAnomaly.anomaly_vector'])
@patch.object(HTMPredictionModelClassifierHelper, 'compute')
def testRun(self,compute):
state = {
"ROWID": 0,
"anomalyScore": 1.0,
"anomalyVector": [1,4,5],
"anomalyLabel": "Label"
}
compute.return_value = _CLAClassificationRecord(**state)
result = self.helper.run()
compute.assert_called_once_with()
self.assertEqual(result, state['anomalyLabel'])
def testGetLabels(self):
# No saved_states
self.helper.saved_states = []
self.assertEqual(self.helper.getLabels(), \
{'isProcessing': False, 'recordLabels': []})
# Invalid ranges
self.helper.saved_states = [Mock(ROWID=10)]
self.assertRaises(HTMPredictionModelInvalidRangeError,
self.helper.getLabels, start=100, end=100)
self.helper.saved_states = [Mock(ROWID=10)]
self.assertRaises(HTMPredictionModelInvalidRangeError,
self.helper.getLabels, start=-100, end=-100)
self.helper.saved_states = [Mock(ROWID=10)]
self.assertRaises(HTMPredictionModelInvalidRangeError,
self.helper.getLabels, start=100, end=-100)
# Valid no threshold labels
values = {
'categoryRecencyList': [4, 5, 7],
}
self.helper.saved_categories = ['TestCategory']
categoryList = [1,1,1]
classifier = self.helper.htm_prediction_model._getAnomalyClassifier().getSelf()
classifier.getParameter.side_effect = values.get
classifier._knn._categoryList = categoryList
results = self.helper.getLabels()
self.assertTrue('isProcessing' in results)
self.assertTrue('recordLabels' in results)
self.assertEqual(len(results['recordLabels']),
len(values['categoryRecencyList']))
for record in results['recordLabels']:
self.assertTrue(record['ROWID'] in values['categoryRecencyList'])
self.assertEqual(record['labels'], self.helper.saved_categories)
@patch.object(HTMPredictionModelClassifierHelper, '_getStateAnomalyVector')
@patch.object(HTMPredictionModelClassifierHelper, '_updateState')
def testAddLabel(self, _updateState, _getStateAnomalyVector):
self.helper.htm_prediction_model._getAnomalyClassifier().getSelf().getParameter.return_value = [1,2,3]
self.helper.saved_states = []
self.assertRaises(HTMPredictionModelInvalidRangeError,
self.helper.addLabel, start=100, end=100, labelName="test")
# Invalid ranges
self.helper.saved_states = [Mock(ROWID=10)]
self.assertRaises(HTMPredictionModelInvalidRangeError,
self.helper.addLabel, start=100, end=100, labelName="test")
self.helper.saved_states = [Mock(ROWID=10)]
self.assertRaises(HTMPredictionModelInvalidRangeError,
self.helper.addLabel, start=-100, end=-100, labelName="test")
self.helper.saved_states = [Mock(ROWID=10)]
self.assertRaises(HTMPredictionModelInvalidRangeError,
self.helper.addLabel, start=100, end=-100, labelName="test")
# Valid no threshold labels
self.helper.saved_states = [
Mock(ROWID=10, anomalyLabel=["Test"], setByUser=False),
Mock(ROWID=11, anomalyLabel=[], setByUser=False),
Mock(ROWID=12, anomalyLabel=["Test"], setByUser=True)]
results = self.helper.addLabel(11, 12, "Added")
# Verifies records were updated
self.assertEqual(results, None)
self.assertTrue('Added' in self.helper.saved_states[1].anomalyLabel)
self.assertTrue(self.helper.saved_states[1].setByUser)
# Verifies record added to KNN classifier
knn = self.helper.htm_prediction_model._getAnomalyClassifier().getSelf()._knn
knn.learn.assert_called_once_with(ANY, ANY, rowID=11)
# Verifies records after added label is recomputed
_updateState.assert_called_once_with(self.helper.saved_states[2])
@patch.object(HTMPredictionModelClassifierHelper, '_getStateAnomalyVector')
@patch.object(HTMPredictionModelClassifierHelper, '_updateState')
def testRemoveLabel(self, _updateState, _getStateAnomalyVector):
classifier = self.helper.htm_prediction_model._getAnomalyClassifier().getSelf()
classifier.getParameter.return_value = [10,11,12]
classifier._knn._numPatterns = 3
classifier._knn.removeIds.side_effect = self.mockRemoveIds
self.helper.saved_states = []
self.assertRaises(HTMPredictionModelInvalidRangeError,
self.helper.removeLabels, )
# Invalid ranges
self.helper.saved_states = [Mock(ROWID=10)]
self.assertRaises(HTMPredictionModelInvalidRangeError,
self.helper.removeLabels, start=100, end=100)
self.helper.saved_states = [Mock(ROWID=10)]
self.assertRaises(HTMPredictionModelInvalidRangeError,
self.helper.removeLabels, start=-100, end=-100)
self.helper.saved_states = [Mock(ROWID=10)]
self.assertRaises(HTMPredictionModelInvalidRangeError,
self.helper.removeLabels, start=100, end=-100)
# Valid no threshold labels
self.helper.saved_states = [
Mock(ROWID=10, anomalyLabel=["Test"], setByUser=False),
Mock(ROWID=11, anomalyLabel=["Test"], setByUser=False),
Mock(ROWID=12, anomalyLabel=["Test"], setByUser=True)]
results = self.helper.removeLabels(11, 12, "Test")
self.assertEqual(results, {'status': 'success'})
self.assertTrue('Test' not in self.helper.saved_states[1].anomalyLabel)
# Verifies records removed from KNN classifier
knn = self.helper.htm_prediction_model._getAnomalyClassifier().getSelf()._knn
self.assertEqual(knn.removeIds.mock_calls, [call([11]), call([])])
# Verifies records after removed record are updated
_updateState.assert_called_once_with(self.helper.saved_states[2])
@patch.object(HTMPredictionModelClassifierHelper, '_getStateAnomalyVector')
@patch.object(HTMPredictionModelClassifierHelper, '_updateState')
def testRemoveLabelNoFilter(self, _updateState, _getStateAnomalyVector):
classifier = self.helper.htm_prediction_model._getAnomalyClassifier().getSelf()
values = {
'categoryRecencyList': [10, 11, 12]
}
classifier.getParameter.side_effect = values.get
classifier._knn._numPatterns = 3
classifier._knn.removeIds.side_effect = self.mockRemoveIds
# Valid no threshold labels
self.helper.saved_states = [
Mock(ROWID=10, anomalyLabel=["Test"], setByUser=False),
Mock(ROWID=11, anomalyLabel=["Test"], setByUser=False),
Mock(ROWID=12, anomalyLabel=["Test"], setByUser=True)]
results = self.helper.removeLabels(11, 12)
self.assertEqual(results, {'status': 'success'})
self.assertTrue('Test' not in self.helper.saved_states[1].anomalyLabel)
# Verifies records removed from KNN classifier
knn = self.helper.htm_prediction_model._getAnomalyClassifier().getSelf()._knn
self.assertEqual(knn.removeIds.mock_calls, [call([11]), call([])])
# Verifies records after removed record are updated
_updateState.assert_called_once_with(self.helper.saved_states[2])
@patch.object(HTMPredictionModelClassifierHelper, '_updateState')
def testSetGetThreshold(self, updateState):
self.helper.saved_states = [Mock(), Mock(), Mock()]
self.helper.setAutoDetectThreshold(1.0)
self.assertAlmostEqual(self.helper._autoDetectThreshold, 1.0)
self.assertEqual(len(updateState.mock_calls), len(self.helper.saved_states))
self.assertAlmostEqual(self.helper.getAutoDetectThreshold(), 1.0)
self.assertRaises(Exception, self.helper.setAutoDetectThreshold, 'invalid')
@patch.object(HTMPredictionModelClassifierHelper, '_updateState')
def testSetGetWaitRecords(self, updateState):
self.helper.saved_states = [
Mock(ROWID=10, anomalyLabel=["Test"], setByUser=False),
Mock(ROWID=11, anomalyLabel=["Test"], setByUser=False),
Mock(ROWID=12, anomalyLabel=["Test"], setByUser=True)]
self.helper.setAutoDetectWaitRecords(20)
self.assertEqual(self.helper._autoDetectWaitRecords, 20)
self.assertEqual(len(updateState.mock_calls), len(self.helper.saved_states))
self.assertEqual(self.helper.getAutoDetectWaitRecords(), 20)
# Test invalid parameter type
self.assertRaises(Exception, self.helper.setAutoDetectWaitRecords,
'invalid')
# Test invalid value before first record ROWID in cache
self.assertRaises(Exception, self.helper.setAutoDetectWaitRecords, 0)
@patch.object(HTMPredictionModelClassifierHelper, '_addRecordToKNN')
@patch.object(HTMPredictionModelClassifierHelper, '_deleteRecordsFromKNN')
@patch.object(HTMPredictionModelClassifierHelper, '_recomputeRecordFromKNN')
@patch.object(HTMPredictionModelClassifierHelper, '_categoryToLabelList')
def testUpdateState(self, toLabelList, recompute, deleteRecord, addRecord):
record = {
"ROWID": 0,
"anomalyScore": 1.0,
"anomalyVector": "",
"anomalyLabel": ["Label"],
"setByUser": False
}
# Test record not labeled and not above threshold
deleteRecord.reset_mock()
addRecord.reset_mock()
self.helper._autoDetectWaitRecords = 0
self.helper._autoDetectThreshold = 1.1
toLabelList.return_value = []
state = _CLAClassificationRecord(**record)
self.helper._updateState(state)
self.assertEqual(state.anomalyLabel, [])
deleteRecord.assert_called_once_with([state])
# Test record not labeled and above threshold
deleteRecord.reset_mock()
addRecord.reset_mock()
self.helper._autoDetectThreshold = 0.5
toLabelList.return_value = []
state = _CLAClassificationRecord(**record)
self.helper._updateState(state)
self.assertEqual(state.anomalyLabel, \
[HTMPredictionModelClassifierHelper.AUTO_THRESHOLD_CLASSIFIED_LABEL])
addRecord.assert_called_once_with(state)
# Test record not labeled and above threshold during wait period
deleteRecord.reset_mock()
addRecord.reset_mock()
self.helper._autoDetectWaitRecords = 10
self.helper._autoDetectThreshold = 0.5
toLabelList.return_value = []
state = _CLAClassificationRecord(**record)
self.helper._updateState(state)
self.assertEqual(state.anomalyLabel, [])
self.assertTrue(not addRecord.called)
# Test record labeled and not above threshold
deleteRecord.reset_mock()
addRecord.reset_mock()
self.helper._autoDetectWaitRecords = 0
self.helper._autoDetectThreshold = 1.1
toLabelList.return_value = ["Label"]
state = _CLAClassificationRecord(**record)
self.helper._updateState(state)
self.assertEqual(state.anomalyLabel, ["Label"])
self.assertTrue(not addRecord.called)
# Test setByUser
deleteRecord.reset_mock()
addRecord.reset_mock()
self.helper._autoDetectThreshold = 1.1
toLabelList.return_value = ["Label 2"]
recordCopy = copy.deepcopy(record)
recordCopy['setByUser'] = True
state = _CLAClassificationRecord(**recordCopy)
self.helper._updateState(state)
self.assertEqual(state.anomalyLabel,
[recordCopy["anomalyLabel"][0], toLabelList.return_value[0]])
addRecord.assert_called_once_with(state)
# Test removal of above threshold
deleteRecord.reset_mock()
addRecord.reset_mock()
self.helper._autoDetectThreshold = 1.1
toLabelList.return_value = []
recordCopy = copy.deepcopy(record)
recordCopy['setByUser'] = True
recordCopy['anomalyLabel'] = \
[HTMPredictionModelClassifierHelper.AUTO_THRESHOLD_CLASSIFIED_LABEL,
HTMPredictionModelClassifierHelper.AUTO_THRESHOLD_CLASSIFIED_LABEL + \
HTMPredictionModelClassifierHelper.AUTO_TAG]
state = _CLAClassificationRecord(**recordCopy)
self.helper._updateState(state)
self.assertEqual(state.anomalyLabel, [])
# Auto classified threshold
deleteRecord.reset_mock()
addRecord.reset_mock()
self.helper._autoDetectThreshold = 1.1
toLabelList.return_value = \
[HTMPredictionModelClassifierHelper.AUTO_THRESHOLD_CLASSIFIED_LABEL]
recordCopy = copy.deepcopy(record)
recordCopy['setByUser'] = True
recordCopy['anomalyLabel'] = \
[HTMPredictionModelClassifierHelper.AUTO_THRESHOLD_CLASSIFIED_LABEL]
state = _CLAClassificationRecord(**recordCopy)
self.helper._updateState(state)
self.assertEqual(state.anomalyLabel,
[HTMPredictionModelClassifierHelper.AUTO_THRESHOLD_CLASSIFIED_LABEL + \
HTMPredictionModelClassifierHelper.AUTO_TAG])
addRecord.assert_called_once_with(state)
# Test precedence of threshold label above auto threshold label
deleteRecord.reset_mock()
addRecord.reset_mock()
self.helper._autoDetectThreshold = 0.8
toLabelList.return_value = \
[HTMPredictionModelClassifierHelper.AUTO_THRESHOLD_CLASSIFIED_LABEL,
HTMPredictionModelClassifierHelper.AUTO_THRESHOLD_CLASSIFIED_LABEL + \
HTMPredictionModelClassifierHelper.AUTO_TAG]
recordCopy = copy.deepcopy(record)
recordCopy['setByUser'] = True
recordCopy['anomalyLabel'] = \
[HTMPredictionModelClassifierHelper.AUTO_THRESHOLD_CLASSIFIED_LABEL]
state = _CLAClassificationRecord(**recordCopy)
self.helper._updateState(state)
self.assertEqual(state.anomalyLabel,
[HTMPredictionModelClassifierHelper.AUTO_THRESHOLD_CLASSIFIED_LABEL])
addRecord.assert_called_once_with(state)
@patch.object(HTMPredictionModelClassifierHelper, '_getStateAnomalyVector')
def testAddRecordToKNN(self, getAnomalyVector):
getAnomalyVector.return_value = "Vector"
values = {
'categoryRecencyList': [1, 2, 3]
}
classifier = self.helper.htm_prediction_model._getAnomalyClassifier().getSelf()
classifier.getParameter.side_effect = values.get
state = {
"ROWID": 5,
"anomalyScore": 1.0,
"anomalyVector": "",
"anomalyLabel": ["Label"],
"setByUser": False
}
record = _CLAClassificationRecord(**state)
# Test with record not already in KNN
self.helper._addRecordToKNN(record)
classifier._knn.learn.assert_called_once_with("Vector", ANY, rowID=state['ROWID'])
self.assertTrue(not classifier._knn.prototypeSetCategory.called)
classifier._knn.learn.reset_mock()
# Test with record already in KNN
values = {
'categoryRecencyList': [1, 2, 3, 5]
}
classifier.getParameter.side_effect = values.get
self.helper._addRecordToKNN(record)
classifier._knn.prototypeSetCategory.assert_called_once_with(\
state['ROWID'], ANY)
self.assertTrue(not classifier._knn.learn.called)
@patch.object(HTMPredictionModelClassifierHelper, '_getStateAnomalyVector')
def testDeleteRangeFromKNN(self, getAnomalyVector):
getAnomalyVector.return_value = "Vector"
values = {
'categoryRecencyList': [1, 2, 3]
}
classifier = self.helper.htm_prediction_model._getAnomalyClassifier().getSelf()
classifier.getParameter.side_effect = values.get
classifier._knn._numPatterns = len(values['categoryRecencyList'])
classifier._knn.removeIds.side_effect = self.mockRemoveIds
# Test with record not already in KNN
self.helper._deleteRangeFromKNN(start=1,end=3)
classifier._knn.removeIds.assert_called_once_with([1,2])
classifier._knn.removeIds.reset_mock()
# Test with record already in KNN
values = {
'categoryRecencyList': [1, 2, 3, 5]
}
classifier.getParameter.side_effect = values.get
self.helper._deleteRangeFromKNN(start=1)
classifier._knn.removeIds.assert_called_once_with([1,2,3,5])
@patch.object(HTMPredictionModelClassifierHelper, '_getStateAnomalyVector')
def testRecomputeRecordFromKNN(self, getAnomalyVector):
getAnomalyVector.return_value = "Vector"
values = {
'categoryRecencyList': [1, 2, 3, 5, 6, 7, 8, 9],
'latestDists': numpy.array([0.7, 0.2, 0.5, 1, 0.3, 0.2, 0.1]),
'categories': ['A','B','C','D','E','F','G']
}
classifier = self.helper.htm_prediction_model._getAnomalyClassifier().getSelf()
classifier.getLatestDistances.return_value = values['latestDists']
classifier.getCategoryList.return_value = values['categories']
classifier.getParameter.side_effect = values.get
state = {
"ROWID": 5,
"anomalyScore": 1.0,
"anomalyVector": "",
"anomalyLabel": ["Label"],
"setByUser": False
}
record = _CLAClassificationRecord(**state)
# Test finding best category before record - exists
self.helper._classificationMaxDist = 0.4
self.helper._autoDetectWaitRecords = 0
result = self.helper._recomputeRecordFromKNN(record)
self.assertEqual(result, 'B')
# Test finding best category before record - does not exists
self.helper._classificationMaxDist = 0.1
result = self.helper._recomputeRecordFromKNN(record)
self.assertEqual(result, None)
# Test finding best category before record - not record before
record.ROWID = 0
self.helper._classificationMaxDist = 0.1
result = self.helper._recomputeRecordFromKNN(record)
self.assertEqual(result, None)
def testConstructClassificationVector(self):
modelParams = {
'__numRunCalls': 0
}
spVals = {
'params': {
'activeOutputCount': 5
},
'output': {
'bottomUpOut': numpy.array([1,1,0,0,1])
}
}
tpVals = {
'params': {
'cellsPerColumn': 2,
'columnCount': 2
},
'output': {
'lrnActive': numpy.array([1,0,0,1]),
'topDownOut': numpy.array([1,0,0,0,1])
}
}
self.helper.htm_prediction_model.getParameter.side_effect = modelParams.get
sp = self.helper.htm_prediction_model._getSPRegion()
tm = self.helper.htm_prediction_model._getTPRegion()
tpImp = tm.getSelf()._tfdr
sp.getParameter.side_effect = spVals['params'].get
sp.getOutputData.side_effect = spVals['output'].get
self.helper._activeColumnCount = 5
tm.getParameter.side_effect = tpVals['params'].get
tm.getOutputData.side_effect = tpVals['output'].get
tpImp.getLearnActiveStateT.return_value = tpVals['output']['lrnActive']
# Test TM Cell vector
self.helper._vectorType = 'tpc'
vector = self.helper._constructClassificationRecord()
self.assertEqual(vector.anomalyVector, tpImp.getLearnActiveStateT().nonzero()[0].tolist())
# Test SP and TM Column Error vector
self.helper._vectorType = 'sp_tpe'
self.helper._prevPredictedColumns = numpy.array([1,0,0,0,1]).nonzero()[0]
vector = self.helper._constructClassificationRecord()
self.assertEqual(vector.anomalyVector, [0, 1, 4])
self.helper._prevPredictedColumns = numpy.array([1,0,1,0,0]).nonzero()[0]
vector = self.helper._constructClassificationRecord()
self.assertEqual(vector.anomalyVector, [0, 1, 4, 7])
self.helper._vectorType = 'invalidType'
self.assertRaises(TypeError, self.helper._constructClassificationRecord)
@patch.object(HTMPredictionModelClassifierHelper ,'_updateState')
@patch.object(HTMPredictionModelClassifierHelper, '_constructClassificationRecord')
def testCompute(self, createRecord, updateState):
state = {
"ROWID": 0,
"anomalyScore": 1.0,
"anomalyVector": "Vector",
"anomalyLabel": "Label"
}
record = _CLAClassificationRecord(**state)
createRecord.return_value = record
# Test add first record
self.helper._history_length = 10
self.helper._autoDetectWaitRecords = 0
self.helper.saved_states = []
result = self.helper.compute()
self.assertEqual(result, record)
self.assertEqual(len(self.helper.saved_states), 1)
updateState.assert_called_once_with(result)
# Test add record before wait records
updateState.reset_mock()
self.helper._history_length = 10
self.helper._autoDetectWaitRecords = 10
self.helper.saved_states = []
result = self.helper.compute()
self.assertEqual(result, record)
self.assertEqual(len(self.helper.saved_states), 1)
result = self.helper.compute()
self.assertEqual(result, record)
self.assertEqual(len(self.helper.saved_states), 2)
self.assertTrue(not updateState.called)
# Test exceeded cache length
updateState.reset_mock()
self.helper._history_length = 1
self.helper.saved_states = []
result = self.helper.compute()
self.assertEqual(result, record)
self.assertEqual(len(self.helper.saved_states), 1)
result = self.helper.compute()
self.assertEqual(result, record)
self.assertEqual(len(self.helper.saved_states), 1)
self.assertTrue(not updateState.called)
def testCategoryToList(self):
result = self.helper._categoryToLabelList(None)
self.assertEqual(result, [])
self.helper.saved_categories = ['A', 'B', 'C']
result = self.helper._categoryToLabelList(1)
self.assertEqual(result, ['A'])
result = self.helper._categoryToLabelList(4)
self.assertEqual(result, ['C'])
result = self.helper._categoryToLabelList(5)
self.assertEqual(result, ['A','C'])
def testGetAnomalyVector(self):
state = {
"ROWID": 0,
"anomalyScore": 1.0,
"anomalyVector": [1,4,5],
"anomalyLabel": "Label"
}
record = _CLAClassificationRecord(**state)
self.helper._anomalyVectorLength = 10
vector = self.helper._getStateAnomalyVector(record)
self.assertEqual(len(vector), self.helper._anomalyVectorLength)
self.assertEqual(vector.nonzero()[0].tolist(), record.anomalyVector)
# Tests for configuration
# ===========================================================================
@patch.object(Configuration, 'get')
def testConfiguration(self, configurationGet):
conf = {
'nupic.model.temporalAnomaly.wait_records': 160,
'nupic.model.temporalAnomaly.auto_detect_threshold': 2.0,
'nupic.model.temporalAnomaly.window_length': 1111,
'nupic.model.temporalAnomaly.anomaly_vector': 'tpc'
}
configurationGet.side_effect = conf.get
helper = HTMPredictionModelClassifierHelper(Mock(spec=HTMPredictionModel))
self.assertEqual(helper._autoDetectWaitRecords,
conf['nupic.model.temporalAnomaly.wait_records'])
self.assertTrue(helper._autoDetectThreshold,
conf['nupic.model.temporalAnomaly.auto_detect_threshold'])
self.assertTrue(helper._history_length,
conf['nupic.model.temporalAnomaly.window_length'])
self.assertTrue(helper._vectorType,
conf['nupic.model.temporalAnomaly.anomaly_vector'])
@patch.object(Configuration, 'get')
def testConfigurationFail(self, configurationGet):
conf = {
'nupic.model.temporalAnomaly.wait_records': 160,
'nupic.model.temporalAnomaly.anomaly_vector': 'tpc'
}
configurationGet.side_effect = conf.get
self.assertRaises(TypeError, HTMPredictionModelClassifierHelper, Mock(spec=HTMPredictionModel))
@patch.object(Configuration, 'get')
def testSetState(self, configurationGet):
conf = {
'nupic.model.temporalAnomaly.wait_records': 160,
'nupic.model.temporalAnomaly.anomaly_vector': 'tpc'
}
configurationGet.side_effect = conf.get
state = dict(_version=1,_classificationDelay=100)
self.helper._vectorType = None
state = self.helper.__setstate__(state)
self.assertEqual(self.helper._vectorType,
conf['nupic.model.temporalAnomaly.anomaly_vector'])
self.assertEqual(self.helper._version, HTMPredictionModelClassifierHelper.__VERSION__)
state = dict(_version=2, _classificationDelay=100)
state = self.helper.__setstate__(state)
self.assertEqual(self.helper._version, HTMPredictionModelClassifierHelper.__VERSION__)
state = dict(_version="invalid")
self.assertRaises(Exception, self.helper.__setstate__, state)
# Tests for _HTMClassificationRecord class
# ===========================================================================
def testCLAClassificationRecord(self):
record = {
"ROWID": 0,
"anomalyScore": 1.0,
"anomalyVector": "Vector",
"anomalyLabel": "Label"
}
state = _CLAClassificationRecord(**record)
self.assertEqual(state.ROWID, record['ROWID'])
self.assertEqual(state.anomalyScore, record['anomalyScore'])
self.assertEqual(state.anomalyVector, record['anomalyVector'])
self.assertEqual(state.anomalyLabel, record['anomalyLabel'])
self.assertEqual(state.setByUser, False)
record = {
"ROWID": 0,
"anomalyScore": 1.0,
"anomalyVector": "Vector",
"anomalyLabel": "Label",
"setByUser": True
}
state = _CLAClassificationRecord(**record)
self.assertEqual(state.ROWID, record['ROWID'])
self.assertEqual(state.anomalyScore, record['anomalyScore'])
self.assertEqual(state.anomalyVector, record['anomalyVector'])
self.assertEqual(state.anomalyLabel, record['anomalyLabel'])
self.assertEqual(state.setByUser, record['setByUser'])
def testCLAClassificationRecordGetState(self):
record = {
"ROWID": 0,
"anomalyScore": 1.0,
"anomalyVector": "Vector",
"anomalyLabel": "Label",
"setByUser": False
}
state = _CLAClassificationRecord(**record)
self.assertEqual(state.__getstate__(), record)
def testCLAClassificationRecordSetState(self):
record = {
"ROWID": None,
"anomalyScore": None,
"anomalyVector": None,
"anomalyLabel": None,
"setByUser": None
}
state = _CLAClassificationRecord(**record)
record = {
"ROWID": 0,
"anomalyScore": 1.0,
"anomalyVector": "Vector",
"anomalyLabel": "Label",
"setByUser": False
}
state.__setstate__(record)
self.assertEqual(state.ROWID, record['ROWID'])
self.assertEqual(state.anomalyScore, record['anomalyScore'])
self.assertEqual(state.anomalyVector, record['anomalyVector'])
self.assertEqual(state.anomalyLabel, record['anomalyLabel'])
self.assertEqual(state.setByUser, record['setByUser'])
def mockRemoveIds(self, ids):
self.helper.htm_prediction_model._getAnomalyClassifier().getSelf()._knn._numPatterns -= len(ids)
for idx in ids:
if idx in self.helper.htm_prediction_model._getAnomalyClassifier().getSelf().getParameter('categoryRecencyList'):
self.helper.htm_prediction_model._getAnomalyClassifier().getSelf().getParameter('categoryRecencyList').remove(idx)
if __name__ == '__main__':
parser = TestOptionParser()
options, args = parser.parse_args()
# Form the command line for the unit test framework
args = [sys.argv[0]] + args
unittest.main(argv=args)
| 32,457 | Python | .py | 734 | 38.348774 | 122 | 0.704689 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,124 | two_gram_model_test.py | numenta_nupic-legacy/tests/unit/nupic/frameworks/opf/two_gram_model_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for TwoGramModel.py."""
import tempfile
import unittest2 as unittest
from nupic.data import dict_utils
from nupic.frameworks.opf import opf_utils, two_gram_model
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.frameworks.opf.two_gram_model_capnp import TwoGramModelProto
class TwoGramModelTest(unittest.TestCase):
"""Unit tests for TwoGramModel."""
def testBasicPredictions(self):
encoders = {"a": {"fieldname": "a",
"maxval": 9,
"minval": 0,
"n": 10,
"w": 1,
"clipInput": True,
"forced": True,
"type": "ScalarEncoder"}}
inferenceType = opf_utils.InferenceType.TemporalNextStep
twoGramModel = two_gram_model.TwoGramModel(inferenceType, encoders)
inputRecords = (dict_utils.DictObj(d) for d in ({"a": 5},
{"a": 6},
{"a": 5},
{"a": 6}))
inferences = ((0,), (0,), (6,), (5,))
for i, (inputRecord, expectedInference) in enumerate(zip(inputRecords,
inferences)):
results = twoGramModel.run(inputRecord)
self.assertEqual(results.predictionNumber, i)
self.assertSequenceEqual(
results.inferences[opf_utils.InferenceElement.prediction],
expectedInference)
def testSequenceReset(self):
encoders = {"a": {"fieldname": u"a",
"maxval": 9,
"minval": 0,
"n": 10,
"w": 1,
"clipInput": True,
"forced": True,
"type": "ScalarEncoder"}}
inferenceType = opf_utils.InferenceType.TemporalNextStep
twoGramModel = two_gram_model.TwoGramModel(inferenceType, encoders)
inputRecords = (dict_utils.DictObj(d) for d in ({"a": 5},
{"a": 6},
{"a": 5},
{"a": 6}))
inferences = ((0,), (0,), (6,), (0,))
resets = (False, False, True, False)
for i, (inputRecord, expectedInference, reset) in enumerate(
zip(inputRecords, inferences, resets)):
if reset:
twoGramModel.resetSequenceStates()
results = twoGramModel.run(inputRecord)
self.assertEqual(results.predictionNumber, i)
self.assertSequenceEqual(
results.inferences[opf_utils.InferenceElement.prediction],
expectedInference)
def testMultipleFields(self):
encoders = {"a": {"fieldname": u"a",
"maxval": 9,
"minval": 0,
"n": 10,
"w": 1,
"clipInput": True,
"forced": True,
"type": "ScalarEncoder"},
"b": {"fieldname": u"b",
"maxval": 9,
"minval": 0,
"n": 10,
"w": 1,
"clipInput": True,
"forced": True,
"type": "ScalarEncoder"}}
inferenceType = opf_utils.InferenceType.TemporalNextStep
twoGramModel = two_gram_model.TwoGramModel(inferenceType, encoders)
inputRecords = (dict_utils.DictObj(d) for d in ({"a": 5, "b": 1},
{"a": 6, "b": 2},
{"a": 5, "b": 3},
{"a": 6, "b": 2}))
inferences = ((0, 0), (0, 0), (6, 0), (5, 3))
for i, (inputRecord, expectedInference) in enumerate(zip(inputRecords,
inferences)):
results = twoGramModel.run(inputRecord)
self.assertEqual(results.predictionNumber, i)
self.assertSequenceEqual(
results.inferences[opf_utils.InferenceElement.prediction],
expectedInference)
def testCategoryPredictions(self):
encoders = {"a": {"fieldname": u"a",
"n": 10,
"w": 3,
"forced": True,
"type": "SDRCategoryEncoder"}}
inferenceType = opf_utils.InferenceType.TemporalNextStep
twoGramModel = two_gram_model.TwoGramModel(inferenceType, encoders)
inputRecords = (dict_utils.DictObj(d) for d in ({"a": "A"},
{"a": "B"},
{"a": "A"},
{"a": "B"}))
inferences = (("",), ("",), ("B",), ("A",))
for i, (inputRecord, expectedInference) in enumerate(zip(inputRecords,
inferences)):
results = twoGramModel.run(inputRecord)
self.assertEqual(results.predictionNumber, i)
self.assertSequenceEqual(
results.inferences[opf_utils.InferenceElement.prediction],
expectedInference)
def testBucketedScalars(self):
encoders = {"a": {"fieldname": u"a",
"maxval": 9,
"minval": 0,
"n": 2,
"w": 1,
"clipInput": True,
"forced": True,
"type": "ScalarEncoder"}}
inferenceType = opf_utils.InferenceType.TemporalNextStep
twoGramModel = two_gram_model.TwoGramModel(inferenceType, encoders)
inputRecords = (dict_utils.DictObj(d) for d in ({"a": 5},
{"a": 6},
{"a": 5},
{"a": 4},
{"a": 6},
{"a": 7},
{"a": 3}))
inferences = ((0,), (6,), (5,), (0,), (6,), (7,), (7,))
for i, (inputRecord, expectedInference) in enumerate(zip(inputRecords,
inferences)):
results = twoGramModel.run(inputRecord)
self.assertEqual(results.predictionNumber, i)
self.assertSequenceEqual(
results.inferences[opf_utils.InferenceElement.prediction],
expectedInference)
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testWriteRead(self):
encoders = {"a": {"fieldname": u"a",
"maxval": 9,
"minval": 0,
"n": 10,
"w": 1,
"clipInput": True,
"forced": True,
"type": "ScalarEncoder"},
"b": {"fieldname": u"b",
"maxval": 9,
"minval": 0,
"n": 10,
"w": 1,
"clipInput": True,
"forced": True,
"type": "ScalarEncoder"}}
inferenceType = opf_utils.InferenceType.TemporalNextStep
model = two_gram_model.TwoGramModel(inferenceType, encoders)
inputRecords = (dict_utils.DictObj(d) for d in ({"a": 5, "b": 1},
{"a": 6, "b": 3},
{"a": 5, "b": 2},
{"a": 6, "b": 1}))
inferences = ((0, 0), (0, 0), (6, 0), (5, 3))
for i, (inputRecord, expectedInference) in enumerate(zip(inputRecords,
inferences)):
results = model.run(inputRecord)
self.assertEqual(results.predictionNumber, i)
self.assertSequenceEqual(
results.inferences[opf_utils.InferenceElement.prediction],
expectedInference)
proto = TwoGramModelProto.new_message()
model.write(proto)
with tempfile.TemporaryFile() as f:
proto.write(f)
f.seek(0)
protoDeserialized = TwoGramModelProto.read(f)
modelDeserialized = two_gram_model.TwoGramModel.read(protoDeserialized)
self.assertEqual(model.getInferenceType(), inferenceType)
self.assertEqual(modelDeserialized.getInferenceType(),
model.getInferenceType())
self.assertSequenceEqual(modelDeserialized._prevValues,
model._prevValues)
self.assertSequenceEqual(modelDeserialized._hashToValueDict,
model._hashToValueDict)
self.assertSequenceEqual(modelDeserialized._fieldNames,
model._fieldNames)
self.assertSequenceEqual(modelDeserialized._twoGramDicts,
model._twoGramDicts)
for i, (inputRecord, expectedInference) in enumerate(zip(inputRecords,
inferences)):
expected = model.run(inputRecord)
actual = modelDeserialized.run(inputRecord)
self.assertEqual(expected.predictionNumber, actual.predictionNumber)
self.assertSequenceEqual(
expected.inferences[opf_utils.InferenceElement.prediction],
actual.inferences[opf_utils.InferenceElement.prediction])
if __name__ == "__main__":
unittest.main()
| 10,534 | Python | .py | 220 | 31.690909 | 75 | 0.500146 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,125 | safe_interpreter_test.py | numenta_nupic-legacy/tests/unit/nupic/frameworks/opf/safe_interpreter_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for safe_interpreter module."""
import ast
import io
import types
import unittest2 as unittest
from nupic.frameworks.opf.safe_interpreter import SafeInterpreter
class TestSafeInterpreter(unittest.TestCase):
# AWS tests attribute required for tagging via automatic test discovery via
# nosetests
engineAWSClusterTest = 1
def setUp(self):
"""Set up an interpreter directing output to a BytesIO stream."""
self.interpreter = SafeInterpreter(writer=io.BytesIO())
def testPrimitives(self):
"""Verify basic primitives"""
self.assertTrue(self.interpreter("True"))
self.assertFalse(self.interpreter("False"))
self.assertTrue(self.interpreter("None") is None)
def testConditionals(self):
"""Verify basic if statements"""
self.assertTrue(self.interpreter("True if True else False"))
self.assertTrue(self.interpreter("""
foo = False
if not foo:
foo = True
foo
"""))
def testBlacklist(self):
"""Verify that src with blacklisted nodes fail"""
self.interpreter("for x in []: pass")
self.assertIn("NotImplementedError",
(error.get_error()[0] for error in self.interpreter.error))
self.interpreter("while True: pass")
self.assertIn("NotImplementedError",
(error.get_error()[0] for error in self.interpreter.error))
def testParse(self):
"""Verify that parse() returns an AST instance"""
tree = self.interpreter.parse("True")
self.assertTrue(isinstance(tree, ast.AST))
def testCompile(self):
"""Verify that parse() returns a compile()-able AST"""
tree = self.interpreter.parse("True")
codeObj = compile(tree, "<string>", mode="exec")
self.assertTrue(isinstance(codeObj, types.CodeType))
def testSum(self):
"""Verify that sum() works and is correct"""
result = self.interpreter("sum([x*p for x,p in {1:2}.items()])")
self.assertEqual(result, 2)
def testRecursive(self):
"""Verify that a recursive function raises a runtime error"""
self.interpreter("""
def foo():
foo()
foo()
""")
self.assertIn("RuntimeError",
(error.get_error()[0] for error in self.interpreter.error))
def testOpen(self):
"""Verify that an attempt to open a file raises a runtime error"""
self.interpreter("open('foo')")
self.assertIn("RuntimeError",
(error.get_error()[0] for error in self.interpreter.error))
if __name__ == "__main__":
unittest.main()
| 3,445 | Python | .py | 84 | 37.02381 | 77 | 0.688382 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,126 | cluster_params_test.py | numenta_nupic-legacy/tests/unit/nupic/frameworks/opf/common_models/cluster_params_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for model selection via cluster params."""
import unittest
from nupic.support.unittesthelpers.testcasebase import TestCaseBase
from nupic.frameworks.opf.model_factory import ModelFactory
from nupic.frameworks.opf.htm_prediction_model import HTMPredictionModel
from nupic.frameworks.opf.common_models.cluster_params import (
getScalarMetricWithTimeOfDayAnomalyParams)
class ClusterParamsTest(TestCaseBase):
def testModelParams(self):
"""
Test that clusterParams loads returns a valid dict that can be instantiated
as a HTMPredictionModel.
"""
params = getScalarMetricWithTimeOfDayAnomalyParams([0],
minVal=23.42,
maxVal=23.420001)
encodersDict= (
params['modelConfig']['modelParams']['sensorParams']['encoders'])
model = ModelFactory.create(modelConfig=params['modelConfig'])
self.assertIsInstance(model,
HTMPredictionModel,
"JSON returned cannot be used to create a model")
# Ensure we have a time of day field
self.assertIsNotNone(encodersDict['c0_timeOfDay'])
# Ensure resolution doesn't get too low
if encodersDict['c1']['type'] == 'RandomDistributedScalarEncoder':
self.assertGreaterEqual(encodersDict['c1']['resolution'], 0.001,
"Resolution is too low")
# Ensure tm_cpp returns correct json file
params = getScalarMetricWithTimeOfDayAnomalyParams([0], tmImplementation="tm_cpp")
self.assertEqual(params['modelConfig']['modelParams']['tmParams']['temporalImp'], "tm_cpp",
"Incorrect json for tm_cpp tmImplementation")
# Ensure incorrect tmImplementation throws exception
with self.assertRaises(ValueError):
getScalarMetricWithTimeOfDayAnomalyParams([0], tmImplementation="")
if __name__ == '__main__':
unittest.main()
| 2,937 | Python | .py | 57 | 44.877193 | 95 | 0.681675 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,127 | __init__.py | numenta_nupic-legacy/tests/unit/nupic/frameworks/opf/common_models/__init__.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
| 979 | Python | .py | 20 | 47.95 | 73 | 0.668405 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,128 | category_test.py | numenta_nupic-legacy/tests/unit/nupic/encoders/category_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for category encoder"""
import tempfile
import unittest
import numpy
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.encoders.base import defaultDtype
from nupic.encoders.category import CategoryEncoder, UNKNOWN
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.category_capnp import CategoryEncoderProto
class CategoryEncoderTest(unittest.TestCase):
'''Unit tests for CategoryEncoder class'''
def testCategoryEncoder(self):
categories = ["ES", "GB", "US"]
# forced: is not recommended, but is used here for readability.
# see scalar.py
e = CategoryEncoder(w=3, categoryList=categories, forced=True)
output = e.encode("US")
expected = numpy.array([0,0,0,0,0,0,0,0,0,1,1,1], dtype=defaultDtype)
self.assertTrue(numpy.array_equal(output, expected))
# Test reverse lookup
decoded = e.decode(output)
(fieldsDict, fieldNames) = decoded
self.assertEqual(len(fieldNames), 1)
self.assertEqual(len(fieldsDict), 1)
self.assertEqual(fieldNames[0], fieldsDict.keys()[0])
(ranges, desc) = fieldsDict.values()[0]
self.assertEqual(desc, "US")
self.assertEqual(len(ranges), 1)
self.assertTrue(numpy.array_equal(ranges[0], [3, 3]))
# Test topdown compute
for v in categories:
output = e.encode(v)
topDown = e.topDownCompute(output)
self.assertEqual(topDown.value, v)
self.assertEqual(topDown.scalar, e.getScalars(v)[0])
bucketIndices = e.getBucketIndices(v)
topDown = e.getBucketInfo(bucketIndices)[0]
self.assertEqual(topDown.value, v)
self.assertEqual(topDown.scalar, e.getScalars(v)[0])
self.assertTrue(numpy.array_equal(topDown.encoding, output))
self.assertEqual(topDown.value, e.getBucketValues()[bucketIndices[0]])
# ---------------------
# unknown category
output = e.encode("NA")
expected = numpy.array([1,1,1,0,0,0,0,0,0,0,0,0], dtype=defaultDtype)
self.assertTrue(numpy.array_equal(output, expected))
# Test reverse lookup
decoded = e.decode(output)
(fieldsDict, fieldNames) = decoded
self.assertEqual(len(fieldNames), 1)
self.assertEqual(len(fieldsDict), 1)
self.assertEqual(fieldNames[0], fieldsDict.keys()[0])
(ranges, desc) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 1)
self.assertTrue(numpy.array_equal(ranges[0], [0, 0]))
# Test topdown compute
topDown = e.topDownCompute(output)
self.assertEqual(topDown.value, UNKNOWN)
self.assertEqual(topDown.scalar, 0)
# --------------------------------
# ES
output = e.encode("ES")
expected = numpy.array([0,0,0,1,1,1,0,0,0,0,0,0], dtype=defaultDtype)
self.assertTrue(numpy.array_equal(output, expected))
# MISSING VALUE
outputForMissing = e.encode(SENTINEL_VALUE_FOR_MISSING_DATA)
self.assertEqual(sum(outputForMissing), 0)
# Test reverse lookup
decoded = e.decode(output)
(fieldsDict, fieldNames) = decoded
self.assertEqual(len(fieldNames), 1)
self.assertEqual(len(fieldsDict), 1)
self.assertEqual(fieldNames[0], fieldsDict.keys()[0])
(ranges, desc) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 1)
self.assertTrue(numpy.array_equal(ranges[0], [1, 1]))
# Test topdown compute
topDown = e.topDownCompute(output)
self.assertEqual(topDown.value, "ES")
self.assertEqual(topDown.scalar, e.getScalars("ES")[0])
# --------------------------------
# Multiple categories
output.fill(1)
# Test reverse lookup
decoded = e.decode(output)
(fieldsDict, fieldNames) = decoded
self.assertEqual(len(fieldNames), 1)
self.assertEqual(len(fieldsDict), 1)
self.assertEqual(fieldNames[0], fieldsDict.keys()[0])
(ranges, desc) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 1)
self.assertTrue(numpy.array_equal(ranges[0], [0, 3]))
# -------------------------------------------------------------
# Test with width = 1
categories = ["cat1", "cat2", "cat3", "cat4", "cat5"]
# forced: is not recommended, but is used here for readability.
# see scalar.py
e = CategoryEncoder(w=1, categoryList=categories, forced=True)
for cat in categories:
output = e.encode(cat)
topDown = e.topDownCompute(output)
self.assertEqual(topDown.value, cat)
self.assertEqual(topDown.scalar, e.getScalars(cat)[0])
# -------------------------------------------------------------
# Test with width = 9, removing some bits end the encoded output
categories = ["cat%d" % (x) for x in range(1, 10)]
# forced: is not recommended, but is used here for readability.
# see scalar.py
e = CategoryEncoder(w=9, categoryList=categories, forced=True)
for cat in categories:
output = e.encode(cat)
topDown = e.topDownCompute(output)
self.assertEqual(topDown.value, cat)
self.assertEqual(topDown.scalar, e.getScalars(cat)[0])
# Get rid of 1 bit on the left
outputNZs = output.nonzero()[0]
output[outputNZs[0]] = 0
topDown = e.topDownCompute(output)
self.assertEqual(topDown.value, cat)
self.assertEqual(topDown.scalar, e.getScalars(cat)[0])
# Get rid of 1 bit on the right
output[outputNZs[0]] = 1
output[outputNZs[-1]] = 0
topDown = e.topDownCompute(output)
self.assertEqual(topDown.value, cat)
self.assertEqual(topDown.scalar, e.getScalars(cat)[0])
# Get rid of 4 bits on the left
output.fill(0)
output[outputNZs[-5:]] = 1
topDown = e.topDownCompute(output)
self.assertEqual(topDown.value, cat)
self.assertEqual(topDown.scalar, e.getScalars(cat)[0])
# Get rid of 4 bits on the right
output.fill(0)
output[outputNZs[0:5]] = 1
topDown = e.topDownCompute(output)
self.assertEqual(topDown.value, cat)
self.assertEqual(topDown.scalar, e.getScalars(cat)[0])
# OR together the output of 2 different categories, we should not get
# back the mean, but rather one or the other
output1 = e.encode("cat1")
output2 = e.encode("cat9")
output = output1 + output2
topDown = e.topDownCompute(output)
self.assertTrue(topDown.scalar == e.getScalars("cat1")[0] \
or topDown.scalar == e.getScalars("cat9")[0])
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testReadWrite(self):
categories = ["ES", "GB", "US"]
# forced: is not recommended, but is used here for readability. see
# scalar.py
original = CategoryEncoder(w=3, categoryList=categories, forced=True)
output = original.encode("US")
target = numpy.array([0,0,0,0,0,0,0,0,0,1,1,1], dtype=defaultDtype)
self.assertTrue(numpy.array_equal(output, target))
decoded = original.decode(output)
proto1 = CategoryEncoderProto.new_message()
original.write(proto1)
# Write the proto to a temp file and read it back into a new proto
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = CategoryEncoderProto.read(f)
encoder = CategoryEncoder.read(proto2)
self.assertIsInstance(encoder, CategoryEncoder)
self.assertEqual(encoder.verbosity, original.verbosity)
self.assertEqual(encoder.width, original.width)
self.assertEqual(encoder.description, original.description)
self.assertEqual(encoder.name, original.name)
self.assertDictEqual(encoder.categoryToIndex, original.categoryToIndex)
self.assertDictEqual(encoder.indexToCategory, original.indexToCategory)
self.assertTrue(numpy.array_equal(encoder.encode("US"), output))
self.assertEqual(original.decode(encoder.encode("US")),
encoder.decode(original.encode("US")))
self.assertEqual(decoded, encoder.decode(output))
if __name__ == '__main__':
unittest.main()
| 8,877 | Python | .py | 202 | 38.806931 | 76 | 0.678994 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,129 | logenc_test.py | numenta_nupic-legacy/tests/unit/nupic/encoders/logenc_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for logarithmic encoder"""
import numpy
import math
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.data.field_meta import FieldMetaType
import tempfile
import unittest
from nupic.encoders.logarithm import LogEncoder
from nupic.encoders.scalar import ScalarEncoder
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.logarithm_capnp import LogEncoderProto
class LogEncoderTest(unittest.TestCase):
"""Unit tests for LogEncoder class"""
def testLogEncoder(self):
# Create the encoder
# use of forced=True is not recommended, but is used in the example for
# readibility, see scalar.py
le = LogEncoder(w=5,
resolution=0.1,
minval=1,
maxval=10000,
name="amount",
forced=True)
# Verify we're setting the description properly
self.assertEqual(le.getDescription(), [("amount", 0)])
# Verify we're getting the correct field types
types = le.getDecoderOutputFieldTypes()
self.assertEqual(types[0], FieldMetaType.float)
# Verify the encoder ends up with the correct width
#
# 10^0 -> 10^4 => 0 -> 4; With a resolution of 0.1
# 41 possible values plus padding = 4 = width 45
self.assertEqual(le.getWidth(), 45)
# Verify we have the correct number of possible values
self.assertEqual(len(le.getBucketValues()), 41)
# Verify closeness calculations
testTuples = [([1], [10000], 0.0),
([1], [1000], 0.25),
([1], [1], 1.0),
([1], [-200], 1.0)]
for tm in testTuples:
expected = tm[0]
actual = tm[1]
expectedResult = tm[2]
self.assertEqual(le.closenessScores(expected, actual),
expectedResult,
"exp: %s act: %s expR: %s" % (str(expected),
str(actual),
str(expectedResult)))
# Verify a value of 1.0 is encoded as expected
value = 1.0
output = le.encode(value)
# Our expected encoded representation of the value 1 is the first
# w bits on in an array of len width.
expected = [1, 1, 1, 1, 1] + 40 * [0]
# Convert to numpy array
expected = numpy.array(expected, dtype="uint8")
self.assertTrue(numpy.array_equal(output, expected))
# Test reverse lookup
decoded = le.decode(output)
(fieldsDict, _) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, _) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 1)
self.assertTrue(numpy.array_equal(ranges[0], [1, 1]))
# Verify an input representing a missing value is handled properly
mvOutput = le.encode(SENTINEL_VALUE_FOR_MISSING_DATA)
self.assertEqual(sum(mvOutput), 0)
# Test top-down for all values
value = le.minval
while value <= le.maxval:
output = le.encode(value)
topDown = le.topDownCompute(output)
# Do the scaling by hand here.
scaledVal = math.log10(value)
# Find the range of values that would also produce this top down
# output.
minTopDown = math.pow(10, (scaledVal - le.encoder.resolution))
maxTopDown = math.pow(10, (scaledVal + le.encoder.resolution))
# Verify the range surrounds this scaled val
self.assertGreaterEqual(topDown.value, minTopDown)
self.assertLessEqual(topDown.value, maxTopDown)
# Test bucket support
bucketIndices = le.getBucketIndices(value)
topDown = le.getBucketInfo(bucketIndices)[0]
# Verify our reconstructed value is in the valid range
self.assertGreaterEqual(topDown.value, minTopDown)
self.assertLessEqual(topDown.value, maxTopDown)
# Same for the scalar value
self.assertGreaterEqual(topDown.scalar, minTopDown)
self.assertLessEqual(topDown.scalar, maxTopDown)
# That the encoding portion of our EncoderResult matched the result of
# encode()
self.assertTrue(numpy.array_equal(topDown.encoding, output))
# Verify our reconstructed value is the same as the bucket value
bucketValues = le.getBucketValues()
self.assertEqual(topDown.value,
bucketValues[bucketIndices[0]])
# Next value
scaledVal += le.encoder.resolution / 4.0
value = math.pow(10, scaledVal)
# Verify next power of 10 encoding
output = le.encode(100)
# increase of 2 decades = 20 decibels
# bit 0, 1 are padding; bit 3 is 1, ..., bit 22 is 20 (23rd bit)
expected = 20 * [0] + [1, 1, 1, 1, 1] + 20 * [0]
expected = numpy.array(expected, dtype="uint8")
self.assertTrue(numpy.array_equal(output, expected))
# Test reverse lookup
decoded = le.decode(output)
(fieldsDict, _) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, _) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 1)
self.assertTrue(numpy.array_equal(ranges[0], [100, 100]))
# Verify next power of 10 encoding
output = le.encode(10000)
expected = 40 * [0] + [1, 1, 1, 1, 1]
expected = numpy.array(expected, dtype="uint8")
self.assertTrue(numpy.array_equal(output, expected))
# Test reverse lookup
decoded = le.decode(output)
(fieldsDict, _) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, _) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 1)
self.assertTrue(numpy.array_equal(ranges[0], [10000, 10000]))
def testGetBucketValues(self):
"""
Verify that the values of buckets are as expected for given
init params
"""
# Create the encoder
le = LogEncoder(w=5,
resolution=0.1,
minval=1,
maxval=10000,
name="amount",
forced=True)
# Build our expected values
inc = 0.1
exp = 0
expected = []
# Incrementing to exactly 4.0 runs into fp issues
while exp <= 4.0001:
val = 10 ** exp
expected.append(val)
exp += inc
expected = numpy.array(expected)
actual = numpy.array(le.getBucketValues())
numpy.testing.assert_almost_equal(expected, actual, 7)
def testInitWithRadius(self):
"""
Verifies you can use radius to specify a log encoder
"""
# Create the encoder
le = LogEncoder(w=1,
radius=1,
minval=1,
maxval=10000,
name="amount",
forced=True)
self.assertEqual(le.encoder.n, 5)
# Verify a a couple powers of 10 are encoded as expected
value = 1.0
output = le.encode(value)
expected = [1, 0, 0, 0, 0]
# Convert to numpy array
expected = numpy.array(expected, dtype="uint8")
self.assertTrue(numpy.array_equal(output, expected))
value = 100.0
output = le.encode(value)
expected = [0, 0, 1, 0, 0]
# Convert to numpy array
expected = numpy.array(expected, dtype="uint8")
self.assertTrue(numpy.array_equal(output, expected))
def testInitWithN(self):
"""
Verifies you can use N to specify a log encoder
"""
# Create the encoder
n = 100
le = LogEncoder(n=n, forced=True)
self.assertEqual(le.encoder.n, n)
def testMinvalMaxVal(self):
"""
Verifies unusual instances of minval and maxval are handled properly
"""
self.assertRaises(ValueError, LogEncoder, n=100, minval=0, maxval=-100,
forced=True)
self.assertRaises(ValueError, LogEncoder, n=100, minval=0, maxval=1e-07,
forced=True)
le = LogEncoder(n=100, minval=42, maxval=1.3e12, forced=True)
expectedRadius = 0.552141792732
expectedResolution = 0.110428358546
self.assertAlmostEqual(le.encoder.radius, expectedRadius)
self.assertAlmostEqual(le.encoder.resolution, expectedResolution)
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testReadWrite(self):
le = LogEncoder(w=5,
resolution=0.1,
minval=1,
maxval=10000,
name="amount",
forced=True)
originalValue = le.encode(1.0)
proto1 = LogEncoderProto.new_message()
le.write(proto1)
# Write the proto to a temp file and read it back into a new proto
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = LogEncoderProto.read(f)
encoder = LogEncoder.read(proto2)
self.assertIsInstance(encoder, LogEncoder)
self.assertEqual(encoder.minScaledValue, le.minScaledValue)
self.assertEqual(encoder.maxScaledValue, le.maxScaledValue)
self.assertEqual(encoder.minval, le.minval)
self.assertEqual(encoder.maxval, le.maxval)
self.assertEqual(encoder.name, le.name)
self.assertEqual(encoder.verbosity, le.verbosity)
self.assertEqual(encoder.clipInput, le.clipInput)
self.assertEqual(encoder.width, le.width)
self.assertEqual(encoder.description, le.description)
self.assertIsInstance(encoder.encoder, ScalarEncoder)
self.assertTrue(numpy.array_equal(encoder.encode(1), originalValue))
self.assertEqual(le.decode(encoder.encode(1)),
encoder.decode(le.encode(1)))
# Feed in a new value and ensure the encodings match
result1 = le.encode(10)
result2 = encoder.encode(10)
self.assertTrue(numpy.array_equal(result1, result2))
if __name__ == "__main__":
unittest.main()
| 10,536 | Python | .py | 259 | 33.552124 | 76 | 0.653349 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,130 | date_test.py | numenta_nupic-legacy/tests/unit/nupic/encoders/date_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for date encoder"""
import datetime
import numpy
import tempfile
from nupic.encoders.base import defaultDtype
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
import unittest2 as unittest
from nupic.encoders.date import DateEncoder
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.date_capnp import DateEncoderProto
class DateEncoderTest(unittest.TestCase):
"""Unit tests for DateEncoder class"""
def setUp(self):
# 3 bits for season, 1 bit for day of week, 1 for weekend, 5 for time of
# day
# use of forced is not recommended, used here for readability, see scalar.py
self._e = DateEncoder(season=3, dayOfWeek=1, weekend=1, timeOfDay=5)
# in the middle of fall, Thursday, not a weekend, afternoon - 4th Nov,
# 2010, 14:55
self._d = datetime.datetime(2010, 11, 4, 14, 55)
self._bits = self._e.encode(self._d)
# season is aaabbbcccddd (1 bit/month) # TODO should be <<3?
# should be 000000000111 (centered on month 11 - Nov)
seasonExpected = [0,0,0,0,0,0,0,0,0,1,1,1]
# week is MTWTFSS
# contrary to localtime documentation, Monday = 0 (for python
# datetime.datetime.timetuple()
dayOfWeekExpected = [0,0,0,1,0,0,0]
# not a weekend, so it should be "False"
weekendExpected = [1, 0]
# time of day has radius of 4 hours and w of 5 so each bit = 240/5
# min = 48min 14:55 is minute 14*60 + 55 = 895; 895/48 = bit 18.6
# should be 30 bits total (30 * 48 minutes = 24 hours)
timeOfDayExpected = (
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0])
self._expected = numpy.array(seasonExpected +
dayOfWeekExpected +
weekendExpected +
timeOfDayExpected, dtype=defaultDtype)
def testDateEncoder(self):
"""creating date encoder instance"""
self.assertSequenceEqual(
self._e.getDescription(),
[("season", 0),
("day of week", 12),
("weekend", 19), ("time of day", 21)])
self.assertTrue(numpy.array_equal(self._expected, self._bits))
def testMissingValues(self):
"""missing values"""
mvOutput = self._e.encode(SENTINEL_VALUE_FOR_MISSING_DATA)
self.assertEqual(sum(mvOutput), 0)
def testDecoding(self):
"""decoding date"""
decoded = self._e.decode(self._bits)
(fieldsDict, _) = decoded
self.assertEqual(len(fieldsDict), 4)
(ranges, _) = fieldsDict['season']
self.assertEqual(len(ranges), 1)
self.assertSequenceEqual(ranges[0], [305, 305])
(ranges, _) = fieldsDict['time of day']
self.assertEqual(len(ranges), 1)
self.assertSequenceEqual(ranges[0], [14.4, 14.4])
(ranges, _) = fieldsDict['day of week']
self.assertEqual(len(ranges), 1)
self.assertSequenceEqual(ranges[0], [3, 3])
(ranges, _) = fieldsDict['weekend']
self.assertEqual(len(ranges), 1)
self.assertSequenceEqual(ranges[0], [0, 0])
def testTopDownCompute(self):
"""Check topDownCompute"""
topDown = self._e.topDownCompute(self._bits)
topDownValues = numpy.array([elem.value for elem in topDown])
errs = topDownValues - numpy.array([320.25, 3.5, .167, 14.8])
self.assertAlmostEqual(errs.max(), 0, 4)
def testBucketIndexSupport(self):
"""Check bucket index support"""
bucketIndices = self._e.getBucketIndices(self._d)
topDown = self._e.getBucketInfo(bucketIndices)
topDownValues = numpy.array([elem.value for elem in topDown])
errs = topDownValues - numpy.array([320.25, 3.5, .167, 14.8])
self.assertAlmostEqual(errs.max(), 0, 4)
encodings = []
for x in topDown:
encodings.extend(x.encoding)
self.assertTrue(numpy.array_equal(encodings, self._expected))
def testHoliday(self):
"""look at holiday more carefully because of the smooth transition"""
# use of forced is not recommended, used here for readability, see
# scalar.py
e = DateEncoder(holiday=5, forced=True)
holiday = numpy.array([0,0,0,0,0,1,1,1,1,1], dtype="uint8")
notholiday = numpy.array([1,1,1,1,1,0,0,0,0,0], dtype="uint8")
holiday2 = numpy.array([0,0,0,1,1,1,1,1,0,0], dtype="uint8")
d = datetime.datetime(2010, 12, 25, 4, 55)
self.assertTrue(numpy.array_equal(e.encode(d), holiday))
d = datetime.datetime(2008, 12, 27, 4, 55)
self.assertTrue(numpy.array_equal(e.encode(d), notholiday))
d = datetime.datetime(1999, 12, 26, 8, 00)
self.assertTrue(numpy.array_equal(e.encode(d), holiday2))
d = datetime.datetime(2011, 12, 24, 16, 00)
self.assertTrue(numpy.array_equal(e.encode(d), holiday2))
def testHolidayMultiple(self):
"""look at holiday more carefully because of the smooth transition"""
# use of forced is not recommended, used here for readability, see
# scalar.py
e = DateEncoder(holiday=5, forced=True, holidays=[(12, 25), (2018, 4, 1), (2017, 4, 16)])
holiday = numpy.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1], dtype="uint8")
notholiday = numpy.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0], dtype="uint8")
d = datetime.datetime(2011, 12, 25, 4, 55)
self.assertTrue(numpy.array_equal(e.encode(d), holiday))
d = datetime.datetime(2007, 12, 2, 4, 55)
self.assertTrue(numpy.array_equal(e.encode(d), notholiday))
d = datetime.datetime(2018, 4, 1, 16, 10)
self.assertTrue(numpy.array_equal(e.encode(d), holiday))
d = datetime.datetime(2017, 4, 16, 16, 10)
self.assertTrue(numpy.array_equal(e.encode(d), holiday))
def testWeekend(self):
"""Test weekend encoder"""
# use of forced is not recommended, used here for readability, see scalar.py
e = DateEncoder(customDays=(21, ["sat", "sun", "fri"]), forced=True)
mon = DateEncoder(customDays=(21, "Monday"), forced=True)
e2 = DateEncoder(weekend=(21, 1), forced=True)
d = datetime.datetime(1988, 5, 29, 20, 00)
self.assertTrue(numpy.array_equal(e.encode(d), e2.encode(d)))
for _ in range(300):
d = d+datetime.timedelta(days=1)
self.assertTrue(numpy.array_equal(e.encode(d), e2.encode(d)))
#Make sure
if mon.decode(mon.encode(d))[0]["Monday"][0][0][0] == 1.0:
self.assertEqual(d.weekday(), 0)
else:
self.assertNotEqual(d.weekday(), 0)
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testReadWrite(self):
originalTS = datetime.datetime(1997, 8, 29, 2, 14)
originalValue = self._e.encode(originalTS)
proto1 = DateEncoderProto.new_message()
self._e.write(proto1)
# Write the proto to a temp file and read it back into a new proto
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = DateEncoderProto.read(f)
encoder = DateEncoder.read(proto2)
self.assertIsInstance(encoder, DateEncoder)
self.assertEqual(encoder.width, self._e.width)
self.assertEqual(encoder.weekendOffset, self._e.weekendOffset)
self.assertEqual(encoder.timeOfDayOffset, self._e.timeOfDayOffset)
self.assertEqual(encoder.seasonOffset, self._e.seasonOffset)
self.assertEqual(encoder.dayOfWeekOffset, self._e.dayOfWeekOffset)
self.assertIsInstance(encoder.customDaysEncoder,
self._e.customDaysEncoder.__class__)
self.assertIsInstance(encoder.dayOfWeekEncoder,
self._e.dayOfWeekEncoder.__class__)
self.assertIsInstance(encoder.seasonEncoder,
self._e.seasonEncoder.__class__)
self.assertIsInstance(encoder.timeOfDayEncoder,
self._e.timeOfDayEncoder.__class__)
self.assertIsInstance(encoder.weekendEncoder,
self._e.weekendEncoder.__class__)
self.assertTrue(numpy.array_equal(self._bits, encoder.encode(self._d)))
self.assertTrue(numpy.array_equal(encoder.encode(originalTS),
originalValue))
self.assertEqual(self._e.decode(encoder.encode(self._d)),
encoder.decode(self._e.encode(self._d)))
if __name__ == "__main__":
unittest.main()
| 9,081 | Python | .py | 192 | 41.390625 | 93 | 0.666931 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,131 | multi_test.py | numenta_nupic-legacy/tests/unit/nupic/encoders/multi_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for multi- encoder"""
import numpy
import tempfile
import unittest2 as unittest
from nupic.encoders.multi import MultiEncoder
from nupic.encoders import ScalarEncoder, AdaptiveScalarEncoder, SDRCategoryEncoder
from nupic.data.dict_utils import DictObj
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.multi_capnp import MultiEncoderProto
class MultiEncoderTest(unittest.TestCase):
"""Unit tests for MultiEncoder class"""
def testMultiEncoder(self):
"""Testing MultiEncoder..."""
e = MultiEncoder()
# should be 7 bits wide
# use of forced=True is not recommended, but here for readibility, see
# scalar.py
e.addEncoder("dow",
ScalarEncoder(w=3, resolution=1, minval=1, maxval=8,
periodic=True, name="day of week", forced=True))
# sould be 14 bits wide
e.addEncoder("myval",
ScalarEncoder(w=5, resolution=1, minval=1, maxval=10,
periodic=False, name="aux", forced=True))
self.assertEqual(e.getWidth(), 21)
self.assertEqual(e.getDescription(), [("day of week", 0), ("aux", 7)])
d = DictObj(dow=3, myval=10)
expected=numpy.array([0,1,1,1,0,0,0] + [0,0,0,0,0,0,0,0,0,1,1,1,1,1],
dtype="uint8")
output = e.encode(d)
self.assertTrue(numpy.array_equal(expected, output))
# Check decoding
decoded = e.decode(output)
self.assertEqual(len(decoded), 2)
(ranges, _) = decoded[0]["aux"]
self.assertEqual(len(ranges), 1)
self.assertTrue(numpy.array_equal(ranges[0], [10, 10]))
(ranges, _) = decoded[0]["day of week"]
self.assertTrue(len(ranges) == 1 and numpy.array_equal(ranges[0], [3, 3]))
e.addEncoder("myCat",
SDRCategoryEncoder(n=7, w=3,
categoryList=["run", "pass","kick"],
forced=True))
d = DictObj(dow=4, myval=6, myCat="pass")
output = e.encode(d)
topDownOut = e.topDownCompute(output)
self.assertAlmostEqual(topDownOut[0].value, 4.5)
self.assertEqual(topDownOut[1].value, 6.0)
self.assertEqual(topDownOut[2].value, "pass")
self.assertEqual(topDownOut[2].scalar, 2)
self.assertEqual(topDownOut[2].encoding.sum(), 3)
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testReadWrite(self):
original = MultiEncoder()
original.addEncoder("dow",
ScalarEncoder(w=3, resolution=1, minval=1, maxval=8,
periodic=True, name="day of week",
forced=True))
original.addEncoder("myval",
AdaptiveScalarEncoder(n=50, w=5, resolution=1, minval=1, maxval=10,
periodic=False, name="aux", forced=True))
originalValue = DictObj(dow=3, myval=10)
output = original.encode(originalValue)
proto1 = MultiEncoderProto.new_message()
original.write(proto1)
# Write the proto to a temp file and read it back into a new proto
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = MultiEncoderProto.read(f)
encoder = MultiEncoder.read(proto2)
self.assertIsInstance(encoder, MultiEncoder)
self.assertEqual(encoder.name, original.name)
self.assertEqual(encoder.width, original.width)
self.assertTrue(numpy.array_equal(encoder.encode(originalValue), output))
testObj1 = DictObj(dow=4, myval=9)
self.assertEqual(original.decode(encoder.encode(testObj1)),
encoder.decode(original.encode(testObj1)))
# Feed in a new value and ensure the encodings match
testObj2 = DictObj(dow=5, myval=8)
result1 = original.encode(testObj2)
result2 = encoder.encode(testObj2)
self.assertTrue(numpy.array_equal(result1, result2))
if __name__ == "__main__":
unittest.main()
| 4,965 | Python | .py | 110 | 37.990909 | 91 | 0.649751 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,132 | adaptivescalar_test.py | numenta_nupic-legacy/tests/unit/nupic/encoders/adaptivescalar_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import tempfile
import unittest
import numpy
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.encoders.adaptive_scalar import AdaptiveScalarEncoder
from nupic.encoders.base import defaultDtype
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.adaptive_scalar_capnp import AdaptiveScalarEncoderProto
class AdaptiveScalarTest(unittest.TestCase):
"""Tests for AdaptiveScalarEncoder"""
def setUp(self):
# forced: it's strongly recommended to use w>=21, in the example we force
# skip the check for readibility
self._l = AdaptiveScalarEncoder(name="scalar", n=14, w=5, minval=1,
maxval=10, periodic=False, forced=True)
def testMissingValues(self):
"""missing values"""
# forced: it's strongly recommended to use w>=21, in the example we force
# skip the check for readib.
mv = AdaptiveScalarEncoder(name="mv", n=14, w=3, minval=1, maxval=8,
periodic=False, forced=True)
empty = mv.encode(SENTINEL_VALUE_FOR_MISSING_DATA)
self.assertEqual(empty.sum(), 0)
def testNonPeriodicEncoderMinMaxSpec(self):
"""Non-periodic encoder, min and max specified"""
self.assertTrue(numpy.array_equal(
self._l.encode(1),
numpy.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
dtype=defaultDtype)))
self.assertTrue(numpy.array_equal(
self._l.encode(2),
numpy.array([0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
dtype=defaultDtype)))
self.assertTrue(numpy.array_equal(
self._l.encode(10),
numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
dtype=defaultDtype)))
def testTopDownDecode(self):
"""Test the input description generation and topDown decoding"""
l = self._l
v = l.minval
while v < l.maxval:
output = l.encode(v)
decoded = l.decode(output)
(fieldsDict, _) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, _) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 1)
(rangeMin, rangeMax) = ranges[0]
self.assertEqual(rangeMin, rangeMax)
self.assertLess(abs(rangeMin - v), l.resolution)
topDown = l.topDownCompute(output)[0]
self.assertLessEqual(abs(topDown.value - v), l.resolution)
# Test bucket support
bucketIndices = l.getBucketIndices(v)
topDown = l.getBucketInfo(bucketIndices)[0]
self.assertLessEqual(abs(topDown.value - v), l.resolution / 2)
self.assertEqual(topDown.value, l.getBucketValues()[bucketIndices[0]])
self.assertEqual(topDown.scalar, topDown.value)
self.assertTrue(numpy.array_equal(topDown.encoding, output))
# Next value
v += l.resolution / 4
def testFillHoles(self):
"""Make sure we can fill in holes"""
l=self._l
decoded = l.decode(numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1]))
(fieldsDict, _) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, _) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 1)
self.assertSequenceEqual(ranges[0], [10, 10])
decoded = l.decode(numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1]))
(fieldsDict, _) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, _) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 1)
self.assertSequenceEqual(ranges[0], [10, 10])
def testNonPeriodicEncoderMinMaxNotSpec(self):
"""Non-periodic encoder, min and max not specified"""
l = AdaptiveScalarEncoder(name="scalar", n=14, w=5, minval=None,
maxval=None, periodic=False, forced=True)
def _verify(v, encoded, expV=None):
if expV is None:
expV = v
self.assertTrue(numpy.array_equal(
l.encode(v),
numpy.array(encoded, dtype=defaultDtype)))
self.assertLessEqual(
abs(l.getBucketInfo(l.getBucketIndices(v))[0].value - expV),
l.resolution/2)
def _verifyNot(v, encoded):
self.assertFalse(numpy.array_equal(
l.encode(v), numpy.array(encoded, dtype=defaultDtype)))
_verify(1, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
_verify(2, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
_verify(10, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
_verify(3, [0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0])
_verify(-9, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
_verify(-8, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
_verify(-7, [0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0])
_verify(-6, [0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0])
_verify(-5, [0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0])
_verify(0, [0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
_verify(8, [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0])
_verify(8, [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0])
_verify(10, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
_verify(11, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
_verify(12, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
_verify(13, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
_verify(14, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
_verify(15, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
#"""Test switching learning off"""
l = AdaptiveScalarEncoder(name="scalar", n=14, w=5, minval=1, maxval=10,
periodic=False, forced=True)
_verify(1, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
_verify(10, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
_verify(20, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
_verify(10, [0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
l.setLearning(False)
_verify(30, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1], expV=20)
_verify(20, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
_verify(-10, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], expV=1)
_verify(-1, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], expV=1)
l.setLearning(True)
_verify(30, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
_verifyNot(20, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
_verify(-10, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
_verifyNot(-1, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
def testSetFieldStats(self):
"""Test setting the min and max using setFieldStats"""
def _dumpParams(enc):
return (enc.n, enc.w, enc.minval, enc.maxval, enc.resolution,
enc._learningEnabled, enc.recordNum,
enc.radius, enc.rangeInternal, enc.padding, enc.nInternal)
sfs = AdaptiveScalarEncoder(name='scalar', n=14, w=5, minval=1, maxval=10,
periodic=False, forced=True)
reg = AdaptiveScalarEncoder(name='scalar', n=14, w=5, minval=1, maxval=100,
periodic=False, forced=True)
self.assertNotEqual(_dumpParams(sfs), _dumpParams(reg),
("Params should not be equal, since the two encoders "
"were instantiated with different values."))
# set the min and the max using sFS to 1,100 respectively.
sfs.setFieldStats("this", {"this":{"min":1, "max":100}})
#Now the parameters for both should be the same
self.assertEqual(_dumpParams(sfs), _dumpParams(reg),
("Params should now be equal, but they are not. sFS "
"should be equivalent to initialization."))
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testReadWrite(self):
originalValue = self._l.encode(1)
proto1 = AdaptiveScalarEncoderProto.new_message()
self._l.write(proto1)
# Write the proto to a temp file and read it back into a new proto
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = AdaptiveScalarEncoderProto.read(f)
encoder = AdaptiveScalarEncoder.read(proto2)
self.assertIsInstance(encoder, AdaptiveScalarEncoder)
self.assertEqual(encoder.recordNum, self._l.recordNum)
self.assertDictEqual(encoder.slidingWindow.__dict__,
self._l.slidingWindow.__dict__)
self.assertEqual(encoder.w, self._l.w)
self.assertEqual(encoder.minval, self._l.minval)
self.assertEqual(encoder.maxval, self._l.maxval)
self.assertEqual(encoder.periodic, self._l.periodic)
self.assertEqual(encoder.n, self._l.n)
self.assertEqual(encoder.radius, self._l.radius)
self.assertEqual(encoder.resolution, self._l.resolution)
self.assertEqual(encoder.name, self._l.name)
self.assertEqual(encoder.verbosity, self._l.verbosity)
self.assertEqual(encoder.clipInput, self._l.clipInput)
self.assertTrue(numpy.array_equal(encoder.encode(1), originalValue))
self.assertEqual(self._l.decode(encoder.encode(1)),
encoder.decode(self._l.encode(1)))
# Feed in a new value and ensure the encodings match
result1 = self._l.encode(7)
result2 = encoder.encode(7)
self.assertTrue(numpy.array_equal(result1, result2))
if __name__ == '__main__':
unittest.main()
| 10,008 | Python | .py | 206 | 41.975728 | 79 | 0.607773 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,133 | scalarspace_test.py | numenta_nupic-legacy/tests/unit/nupic/encoders/scalarspace_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for scalar space encoder"""
import unittest2 as unittest
from nupic.encoders.scalar_space import ScalarSpaceEncoder, DeltaEncoder
class ScalarSpaceEncoderTest(unittest.TestCase):
'''Unit tests for ScalarSpaceEncoder class'''
def testScalarSpaceEncoder(self):
"""scalar space encoder"""
# use of forced=True is not recommended, but used in the example for readibility, see scalar.py
sse = ScalarSpaceEncoder(1,1,2,False,2,1,1,None,0,False,"delta",
forced=True)
self.assertTrue(isinstance(sse, DeltaEncoder))
sse = ScalarSpaceEncoder(1,1,2,False,2,1,1,None,0,False,"absolute",
forced=True)
self.assertFalse(isinstance(sse, DeltaEncoder))
if __name__ == '__main__':
unittest.main()
| 1,768 | Python | .py | 36 | 45.361111 | 99 | 0.68158 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,134 | coordinate_test.py | numenta_nupic-legacy/tests/unit/nupic/encoders/coordinate_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy as np
import tempfile
import unittest
from mock import patch
from nupic.encoders.base import defaultDtype
from nupic.encoders.coordinate import CoordinateEncoder
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.coordinate_capnp import CoordinateEncoderProto
# Disable warnings about accessing protected members
# pylint: disable=W0212
class CoordinateEncoderTest(unittest.TestCase):
"""Unit tests for CoordinateEncoder class"""
def setUp(self):
self.encoder = CoordinateEncoder(name="coordinate", n=33, w=3)
def testInvalidW(self):
# Even
args = {"name": "coordinate",
"n": 45,
"w": 4}
self.assertRaises(ValueError, CoordinateEncoder, **args)
# 0
args = {"name": "coordinate",
"n": 45,
"w": 0}
self.assertRaises(ValueError, CoordinateEncoder, **args)
# Negative
args = {"name": "coordinate",
"n": 45,
"w": -2}
self.assertRaises(ValueError, CoordinateEncoder, **args)
def testInvalidN(self):
# Too small
args = {"name": "coordinate",
"n": 11,
"w": 3}
self.assertRaises(ValueError, CoordinateEncoder, **args)
def testHashCoordinate(self):
h1 = self.encoder._hashCoordinate(np.array([0]))
self.assertEqual(h1, 7415141576215061722)
h2 = self.encoder._hashCoordinate(np.array([0, 1]))
self.assertEqual(h2, 6909411824118942936)
def testOrderForCoordinate(self):
h1 = self.encoder._orderForCoordinate(np.array([2, 5, 10]))
h2 = self.encoder._orderForCoordinate(np.array([2, 5, 11]))
h3 = self.encoder._orderForCoordinate(np.array([2497477, -923478]))
self.assertTrue(0 <= h1 and h1 < 1)
self.assertTrue(0 <= h2 and h2 < 1)
self.assertTrue(0 <= h3 and h3 < 1)
self.assertTrue(h1 != h2)
self.assertTrue(h2 != h3)
def testBitForCoordinate(self):
n = 1000
b1 = self.encoder._bitForCoordinate(np.array([2, 5, 10]), n)
b2 = self.encoder._bitForCoordinate(np.array([2, 5, 11]), n)
b3 = self.encoder._bitForCoordinate(np.array([2497477, -923478]), n)
self.assertTrue(0 <= b1 and b1 < n)
self.assertTrue(0 <= b2 and b2 < n)
self.assertTrue(0 <= b3 and b3 < n)
self.assertTrue(b1 != b2)
self.assertTrue(b2 != b3)
# Small n
n = 2
b4 = self.encoder._bitForCoordinate(np.array([5, 10]), n)
self.assertTrue(0 <= b4 < n)
@patch.object(CoordinateEncoder, "_orderForCoordinate")
def testTopWCoordinates(self, mockOrderForCoordinate):
# Mock orderForCoordinate
mockFn = lambda coordinate: np.sum(coordinate) / 5.0
mockOrderForCoordinate.side_effect = mockFn
coordinates = np.array([[1], [2], [3], [4], [5]])
top = self.encoder._topWCoordinates(coordinates, 2).tolist()
self.assertEqual(len(top), 2)
self.assertIn([5], top)
self.assertIn([4], top)
def testNeighbors1D(self):
coordinate = np.array([100])
radius = 5
neighbors = self.encoder._neighbors(coordinate, radius).tolist()
self.assertEqual(len(neighbors), 11)
self.assertIn([95], neighbors)
self.assertIn([100], neighbors)
self.assertIn([105], neighbors)
def testNeighbors2D(self):
coordinate = np.array([100, 200])
radius = 5
neighbors = self.encoder._neighbors(coordinate, radius).tolist()
self.assertEqual(len(neighbors), 121)
self.assertIn([95, 195], neighbors)
self.assertIn([95, 205], neighbors)
self.assertIn([100, 200], neighbors)
self.assertIn([105, 195], neighbors)
self.assertIn([105, 205], neighbors)
def testNeighbors0Radius(self):
coordinate = np.array([100, 200, 300])
radius = 0
neighbors = self.encoder._neighbors(coordinate, radius).tolist()
self.assertEqual(len(neighbors), 1)
self.assertIn([100, 200, 300], neighbors)
def testEncodeIntoArray(self):
n = 33
w = 3
encoder = CoordinateEncoder(name="coordinate", n=n, w=w)
coordinate = np.array([100, 200])
radius = 5
output1 = encode(encoder, coordinate, radius)
self.assertEqual(np.sum(output1), w)
# Test that we get the same output for the same input
output2 = encode(encoder, coordinate, radius)
self.assertTrue(np.array_equal(output2, output1))
# Test that a float radius raises an assertion error
with self.assertRaises(AssertionError):
encoder.encode((coordinate, float(radius)))
def testEncodeSaturateArea(self):
n = 1999
w = 25
encoder = CoordinateEncoder(name="coordinate", n=n, w=w)
outputA = encode(encoder, np.array([0, 0]), 2)
outputB = encode(encoder, np.array([0, 1]), 2)
self.assertEqual(overlap(outputA, outputB), 0.8)
def testEncodeRelativePositions(self):
# As you get farther from a coordinate, the overlap should decrease
overlaps = overlapsForRelativeAreas(999, 51, np.array([100, 200]), 10,
dPosition=np.array([2, 2]),
num=5)
self.assertDecreasingOverlaps(overlaps)
def testEncodeRelativeRadii(self):
# As radius increases, the overlap should decrease
overlaps = overlapsForRelativeAreas(999, 25, np.array([100, 200]), 5,
dRadius=2,
num=5)
self.assertDecreasingOverlaps(overlaps)
# As radius decreases, the overlap should decrease
overlaps = overlapsForRelativeAreas(999, 51, np.array([100, 200]), 20,
dRadius=-2,
num=5)
self.assertDecreasingOverlaps(overlaps)
def testEncodeRelativePositionsAndRadii(self):
# As radius increases and positions change, the overlap should decrease
overlaps = overlapsForRelativeAreas(999, 25, np.array([100, 200]), 5,
dPosition=np.array([1, 1]),
dRadius=1,
num=5)
self.assertDecreasingOverlaps(overlaps)
def testEncodeUnrelatedAreas(self):
"""
assert unrelated areas don"t share bits
(outside of chance collisions)
"""
avgThreshold = 0.3
maxThreshold = 0.12
overlaps = overlapsForUnrelatedAreas(1499, 37, 5)
self.assertLess(np.max(overlaps), maxThreshold)
self.assertLess(np.average(overlaps), avgThreshold)
maxThreshold = 0.12
overlaps = overlapsForUnrelatedAreas(1499, 37, 10)
self.assertLess(np.max(overlaps), maxThreshold)
self.assertLess(np.average(overlaps), avgThreshold)
maxThreshold = 0.17
overlaps = overlapsForUnrelatedAreas(999, 25, 10)
self.assertLess(np.max(overlaps), maxThreshold)
self.assertLess(np.average(overlaps), avgThreshold)
maxThreshold = 0.25
overlaps = overlapsForUnrelatedAreas(499, 13, 10)
self.assertLess(np.max(overlaps), maxThreshold)
self.assertLess(np.average(overlaps), avgThreshold)
def testEncodeAdjacentPositions(self, verbose=False):
repetitions = 100
n = 999
w = 25
radius = 10
minThreshold = 0.75
avgThreshold = 0.90
allOverlaps = np.empty(repetitions)
for i in range(repetitions):
overlaps = overlapsForRelativeAreas(n, w,
np.array([i * 10, i * 10]), radius,
dPosition=np.array([0, 1]),
num=1)
allOverlaps[i] = overlaps[0]
self.assertGreater(np.min(allOverlaps), minThreshold)
self.assertGreater(np.average(allOverlaps), avgThreshold)
if verbose:
print ("===== Adjacent positions overlap "
"(n = {0}, w = {1}, radius = {2}) ===").format(n, w, radius)
print "Max: {0}".format(np.max(allOverlaps))
print "Min: {0}".format(np.min(allOverlaps))
print "Average: {0}".format(np.average(allOverlaps))
def assertDecreasingOverlaps(self, overlaps):
self.assertEqual((np.diff(overlaps) > 0).sum(), 0)
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testReadWrite(self):
coordinate = np.array([100, 200])
radius = 5
output1 = encode(self.encoder, coordinate, radius)
proto1 = CoordinateEncoderProto.new_message()
self.encoder.write(proto1)
# Write the proto to a temp file and read it back into a new proto
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = CoordinateEncoderProto.read(f)
encoder = CoordinateEncoder.read(proto2)
self.assertIsInstance(encoder, CoordinateEncoder)
self.assertEqual(encoder.w, self.encoder.w)
self.assertEqual(encoder.n, self.encoder.n)
self.assertEqual(encoder.name, self.encoder.name)
self.assertEqual(encoder.verbosity, self.encoder.verbosity)
coordinate = np.array([100, 200])
radius = 5
output2 = encode(encoder, coordinate, radius)
self.assertTrue(np.array_equal(output1, output2))
def encode(encoder, coordinate, radius):
output = np.zeros(encoder.getWidth(), dtype=defaultDtype)
encoder.encodeIntoArray((coordinate, radius), output)
return output
def overlap(sdr1, sdr2):
assert sdr1.size == sdr2.size
return float((sdr1 & sdr2).sum()) / sdr1.sum()
def overlapsForRelativeAreas(n, w, initPosition, initRadius, dPosition=None,
dRadius=0, num=100, verbose=False):
"""
Return overlaps between an encoding and other encodings relative to it
:param n: the size of the encoder output
:param w: the number of active bits in the encoder output
:param initPosition: the position of the first encoding
:param initRadius: the radius of the first encoding
:param dPosition: the offset to apply to each subsequent position
:param dRadius: the offset to apply to each subsequent radius
:param num: the number of encodings to generate
:param verbose: whether to print verbose output
"""
encoder = CoordinateEncoder(name="coordinate", n=n, w=w)
overlaps = np.empty(num)
outputA = encode(encoder, np.array(initPosition), initRadius)
for i in range(num):
newPosition = initPosition if dPosition is None else (
initPosition + (i + 1) * dPosition)
newRadius = initRadius + (i + 1) * dRadius
outputB = encode(encoder, newPosition, newRadius)
overlaps[i] = overlap(outputA, outputB)
if verbose:
print
print ("===== Relative encoding overlaps (n = {0}, w = {1}, "
"initPosition = {2}, initRadius = {3}, "
"dPosition = {4}, dRadius = {5}) =====").format(
n, w, initPosition, initRadius, dPosition, dRadius)
print "Average: {0}".format(np.average(overlaps))
print "Max: {0}".format(np.max(overlaps))
return overlaps
def overlapsForUnrelatedAreas(n, w, radius, repetitions=100, verbose=False):
"""
Return overlaps between an encoding and other, unrelated encodings
"""
return overlapsForRelativeAreas(n, w, np.array([0, 0]), radius,
dPosition=np.array([0, radius * 10]),
num=repetitions, verbose=verbose)
if __name__ == "__main__":
unittest.main()
| 12,171 | Python | .py | 283 | 36.208481 | 77 | 0.663078 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,135 | pass_through_encoder_test.py | numenta_nupic-legacy/tests/unit/nupic/encoders/pass_through_encoder_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for PassThru Encoder."""
CL_VERBOSITY = 0
import tempfile
import unittest2 as unittest
import numpy
from nupic.encoders import PassThroughEncoder
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.pass_through_capnp import PassThroughEncoderProto
class PassThroughEncoderTest(unittest.TestCase):
"""Unit tests for PassThroughEncoder class."""
def setUp(self):
self.n = 9
self.name = "foo"
self._encoder = PassThroughEncoder
def testEncodeArray(self):
"""Send bitmap as array"""
e = self._encoder(self.n, name=self.name)
bitmap = [0,0,0,1,0,0,0,0,0]
out = e.encode(bitmap)
self.assertEqual(out.sum(), sum(bitmap))
x = e.decode(out)
self.assertIsInstance(x[0], dict)
self.assertTrue(self.name in x[0])
def testEncodeBitArray(self):
"""Send bitmap as numpy bit array"""
e = self._encoder(self.n, name=self.name)
bitmap = numpy.zeros(self.n, dtype=numpy.uint8)
bitmap[3] = 1
bitmap[5] = 1
out = e.encode(bitmap)
expectedSum = sum(bitmap)
realSum = out.sum()
self.assertEqual(realSum, expectedSum)
def testClosenessScores(self):
"""Compare two bitmaps for closeness"""
e = self._encoder(self.n, name=self.name)
"""Identical => 1"""
bitmap1 = [0,0,0,1,1,1,0,0,0]
bitmap2 = [0,0,0,1,1,1,0,0,0]
out1 = e.encode(bitmap1)
out2 = e.encode(bitmap2)
c = e.closenessScores(out1, out2)
self.assertEqual(c[0], 1.0)
"""No overlap => 0"""
bitmap1 = [0,0,0,1,1,1,0,0,0]
bitmap2 = [1,1,1,0,0,0,1,1,1]
out1 = e.encode(bitmap1)
out2 = e.encode(bitmap2)
c = e.closenessScores(out1, out2)
self.assertEqual(c[0], 0.0)
"""Similar => 4 of 5 match"""
bitmap1 = [1,0,1,0,1,0,1,0,1]
bitmap2 = [1,0,0,1,1,0,1,0,1]
out1 = e.encode(bitmap1)
out2 = e.encode(bitmap2)
c = e.closenessScores(out1, out2)
self.assertEqual(c[0], 0.8)
"""Little => 1 of 5 match"""
bitmap1 = [1,0,0,1,1,0,1,0,1]
bitmap2 = [0,1,1,1,0,1,0,1,0]
out1 = e.encode(bitmap1)
out2 = e.encode(bitmap2)
c = e.closenessScores(out1, out2)
self.assertEqual(c[0], 0.2)
"""Extra active bit => off by 1 of 5"""
bitmap1 = [1,0,1,0,1,0,1,0,1]
bitmap2 = [1,0,1,1,1,0,1,0,1]
out1 = e.encode(bitmap1)
out2 = e.encode(bitmap2)
c = e.closenessScores(out1, out2)
self.assertEqual(c[0], 0.8)
"""Missing active bit => off by 1 of 5"""
bitmap1 = [1,0,1,0,1,0,1,0,1]
bitmap2 = [1,0,0,0,1,0,1,0,1]
out1 = e.encode(bitmap1)
out2 = e.encode(bitmap2)
c = e.closenessScores(out1, out2)
self.assertEqual(c[0], 0.8)
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testReadWrite(self):
original = self._encoder(self.n, name=self.name)
originalValue = original.encode([1,0,1,0,1,0,1,0,1])
proto1 = PassThroughEncoderProto.new_message()
original.write(proto1)
# Write the proto to a temp file and read it back into a new proto
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = PassThroughEncoderProto.read(f)
encoder = PassThroughEncoder.read(proto2)
self.assertIsInstance(encoder, PassThroughEncoder)
self.assertEqual(encoder.name, original.name)
self.assertEqual(encoder.verbosity, original.verbosity)
self.assertEqual(encoder.w, original.w)
self.assertEqual(encoder.n, original.n)
self.assertEqual(encoder.description, original.description)
self.assertTrue(numpy.array_equal(encoder.encode([1,0,1,0,1,0,1,0,1]),
originalValue))
self.assertEqual(original.decode(encoder.encode([1,0,1,0,1,0,1,0,1])),
encoder.decode(original.encode([1,0,1,0,1,0,1,0,1])))
# Feed in a new value and ensure the encodings match
result1 = original.encode([0,1,0,1,0,1,0,1,0])
result2 = encoder.encode([0,1,0,1,0,1,0,1,0])
self.assertTrue(numpy.array_equal(result1, result2))
if __name__ == "__main__":
unittest.main()
| 5,056 | Python | .py | 131 | 34 | 74 | 0.657943 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,136 | geospatial_coordinate_test.py | numenta_nupic-legacy/tests/unit/nupic/encoders/geospatial_coordinate_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy as np
import tempfile
import unittest
from nupic.encoders.base import defaultDtype
from nupic.encoders.geospatial_coordinate import GeospatialCoordinateEncoder
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.geospatial_coordinate_capnp import (
GeospatialCoordinateEncoderProto
)
# Disable warnings about accessing protected members
# pylint: disable=W0212
class GeospatialCoordinateEncoderTest(unittest.TestCase):
"""Unit tests for GeospatialCoordinateEncoder class"""
def testCoordinateForPosition(self):
scale = 30 # meters
encoder = GeospatialCoordinateEncoder(scale, 60)
coordinate = encoder.coordinateForPosition(
-122.229194, 37.486782
)
self.assertEqual(coordinate.tolist(), [-453549, 150239])
def testCoordinateForPosition3D(self):
scale = 30 # meters
encoder = GeospatialCoordinateEncoder(scale, 60)
coordinate = encoder.coordinateForPosition(
-122.229194, 37.486782, 1500
)
self.assertEqual(coordinate.tolist(), [-90102, -142918, 128710])
def testCoordinateForPositionOrigin3D(self):
scale = 1 # meters
encoder = GeospatialCoordinateEncoder(scale, 60)
coordinate = encoder.coordinateForPosition(0,0,0)
# see WGS80 defining parameters (semi-major axis) on
# http://en.wikipedia.org/wiki/Geodetic_datum#Parameters_for_some_geodetic_systems
self.assertEqual(coordinate.tolist(), [6378137, 0, 0])
def testCoordinateForPositionOrigin(self):
scale = 30 # meters
encoder = GeospatialCoordinateEncoder(scale, 60)
coordinate = encoder.coordinateForPosition(0, 0)
self.assertEqual(coordinate.tolist(), [0, 0])
def testRadiusForSpeed(self):
scale = 30 # meters
timestep = 60 #seconds
speed = 50 # meters per second
encoder = GeospatialCoordinateEncoder(scale, timestep)
radius = encoder.radiusForSpeed(speed)
self.assertEqual(radius, 75)
def testRadiusForSpeed0(self):
scale = 30 # meters
timestep = 60 #seconds
speed = 0 # meters per second
n = 999
w = 27
encoder = GeospatialCoordinateEncoder(scale, timestep, n=n, w=w)
radius = encoder.radiusForSpeed(speed)
self.assertEqual(radius, 3)
def testRadiusForSpeedInt(self):
"""Test that radius will round to the nearest integer"""
scale = 30 # meters
timestep = 62 #seconds
speed = 25 # meters per second
encoder = GeospatialCoordinateEncoder(scale, timestep)
radius = encoder.radiusForSpeed(speed)
self.assertEqual(radius, 38)
def testEncodeIntoArray(self):
scale = 30 # meters
timestep = 60 #seconds
speed = 2.5 # meters per second
encoder = GeospatialCoordinateEncoder(scale, timestep,
n=999,
w=25)
encoding1 = encode(encoder, speed, -122.229194, 37.486782)
encoding2 = encode(encoder, speed, -122.229294, 37.486882)
encoding3 = encode(encoder, speed, -122.229294, 37.486982)
overlap1 = overlap(encoding1, encoding2)
overlap2 = overlap(encoding1, encoding3)
self.assertTrue(overlap1 > overlap2)
def testEncodeIntoArrayAltitude(self):
scale = 30 # meters
timestep = 60 # seconds
speed = 2.5 # meters per second
longitude, latitude = -122.229294, 37.486782
encoder = GeospatialCoordinateEncoder(scale, timestep,
n=999,
w=25)
encoding1 = encode(encoder, speed, longitude, latitude, 0)
encoding2 = encode(encoder, speed, longitude, latitude, 100)
encoding3 = encode(encoder, speed, longitude, latitude, 1000)
overlap1 = overlap(encoding1, encoding2)
overlap2 = overlap(encoding1, encoding3)
self.assertGreater(overlap1, overlap2)
def testEncodeIntoArray3D(self):
scale = 30 # meters
timestep = 60 # seconds
speed = 2.5 # meters per second
encoder = GeospatialCoordinateEncoder(scale, timestep,
n=999,
w=25)
encoding1 = encode(encoder, speed, -122.229194, 37.486782, 0)
encoding2 = encode(encoder, speed, -122.229294, 37.486882, 100)
encoding3 = encode(encoder, speed, -122.229294, 37.486982, 1000)
overlap1 = overlap(encoding1, encoding2)
overlap2 = overlap(encoding1, encoding3)
self.assertGreater(overlap1, overlap2)
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testReadWrite(self):
scale = 30 # meters
timestep = 60 # seconds
speed = 2.5 # meters per second
original = GeospatialCoordinateEncoder(scale, timestep, n=999, w=25)
encode(original, speed, -122.229194, 37.486782, 0)
encode(original, speed, -122.229294, 37.486882, 100)
proto1 = GeospatialCoordinateEncoderProto.new_message()
original.write(proto1)
# Write the proto to a temp file and read it back into a new proto
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = GeospatialCoordinateEncoderProto.read(f)
encoder = GeospatialCoordinateEncoder.read(proto2)
self.assertIsInstance(encoder, GeospatialCoordinateEncoder)
self.assertEqual(encoder.w, original.w)
self.assertEqual(encoder.n, original.n)
self.assertEqual(encoder.name, original.name)
self.assertEqual(encoder.verbosity, original.verbosity)
# Compare a new value with the original and deserialized.
encoding3 = encode(original, speed, -122.229294, 37.486982, 1000)
encoding4 = encode(encoder, speed, -122.229294, 37.486982, 1000)
self.assertTrue(np.array_equal(encoding3, encoding4))
def encode(encoder, speed, longitude, latitude, altitude=None):
output = np.zeros(encoder.getWidth(), dtype=defaultDtype)
encoder.encodeIntoArray((speed, longitude, latitude, altitude), output)
return output
def overlap(sdr1, sdr2):
assert sdr1.size == sdr2.size
return float((sdr1 & sdr2).sum()) / sdr1.sum()
if __name__ == "__main__":
unittest.main()
| 7,063 | Python | .py | 162 | 37.950617 | 86 | 0.701342 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,137 | delta_test.py | numenta_nupic-legacy/tests/unit/nupic/encoders/delta_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for delta encoder"""
import numpy as np
import tempfile
import unittest
from nupic.encoders.delta import (DeltaEncoder,
AdaptiveScalarEncoder)
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.delta_capnp import DeltaEncoderProto
class DeltaEncoderTest(unittest.TestCase):
"""Unit tests for DeltaEncoder class"""
def setUp(self):
self._dencoder = DeltaEncoder(w=21, n=100, forced=True)
self._adaptscalar = AdaptiveScalarEncoder(w=21, n=100, forced=True)
def testDeltaEncoder(self):
"""simple delta reconstruction test"""
for i in range(5):
encarr = self._dencoder.encodeIntoArray(i, np.zeros(100), learn=True)
self._dencoder.setStateLock(True)
for i in range(5, 7):
encarr = self._dencoder.encodeIntoArray(i, np.zeros(100), learn=True)
res = self._dencoder.topDownCompute(encarr)
self.assertEqual(res[0].value, 6)
self.assertEqual(self._dencoder.topDownCompute(encarr)[0].value,
res[0].value)
self.assertEqual(self._dencoder.topDownCompute(encarr)[0].scalar,
res[0].scalar)
self.assertTrue(np.array_equal(
self._dencoder.topDownCompute(encarr)[0].encoding,
res[0].encoding))
def testEncodingVerification(self):
"""encoding verification test passed"""
feedIn = [1, 10, 4, 7, 9, 6, 3, 1]
expectedOut = [0, 9, -6, 3, 2, -3, -3, -2]
self._dencoder.setStateLock(False)
#Check that the deltas are being returned correctly.
for i in range(len(feedIn)):
aseencode = np.zeros(100)
self._adaptscalar.encodeIntoArray(expectedOut[i], aseencode, learn=True)
delencode = np.zeros(100)
self._dencoder.encodeIntoArray(feedIn[i], delencode, learn=True)
self.assertTrue(np.array_equal(delencode[0], aseencode[0]))
def testLockingState(self):
"""Check that locking the state works correctly"""
feedIn = [1, 10, 9, 7, 9, 6, 3, 1]
expectedOut = [0, 9, -6, 3, 2, -3, -3, -2]
for i in range(len(feedIn)):
if i == 3:
self._dencoder.setStateLock(True)
aseencode = np.zeros(100)
self._adaptscalar.encodeIntoArray(expectedOut[i], aseencode, learn=True)
delencode = np.zeros(100)
if i>=3:
self._dencoder.encodeIntoArray(feedIn[i]-feedIn[2], delencode,
learn=True)
else:
self._dencoder.encodeIntoArray(expectedOut[i], delencode, learn=True)
self.assertTrue(np.array_equal(delencode[0], aseencode[0]))
def testEncodeInvalidInputType(self):
try:
self._dencoder.encode("String")
except TypeError as e:
self.assertEqual(
e.message,
"Expected a scalar input but got input of type <type 'str'>")
else:
self.fail("Should have thrown TypeError during attempt to encode string "
"with scalar encoder.")
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testReadWrite(self):
feedIn = [1, 10, 4, 7, 9, 6, 3, 1]
expectedOut = [0, 9, -6, 3, 2, -3, -3, -2]
self._dencoder.setStateLock(False)
outp = []
#Check that the deltas are being returned correctly.
for i in range(len(feedIn)-1):
aseencode = np.zeros(100)
self._adaptscalar.encodeIntoArray(expectedOut[i], aseencode, learn=True)
delencode = np.zeros(100)
self._dencoder.encodeIntoArray(feedIn[i], delencode, learn=True)
outp.append(delencode)
proto1 = DeltaEncoderProto.new_message()
self._dencoder.write(proto1)
# Write the proto to a temp file and read it back into a new proto
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = DeltaEncoderProto.read(f)
encoder = DeltaEncoder.read(proto2)
self.assertIsInstance(encoder, DeltaEncoder)
self.assertEqual(encoder.width, self._dencoder.width)
self.assertEqual(encoder.n, self._dencoder.n)
self.assertEqual(encoder.name, self._dencoder.name)
self.assertEqual(encoder._prevAbsolute, self._dencoder._prevAbsolute)
self.assertEqual(encoder._prevDelta, self._dencoder._prevDelta)
self.assertEqual(encoder._stateLock, self._dencoder._stateLock)
delencode = np.zeros(100)
self._dencoder.encodeIntoArray(feedIn[-1], delencode, learn=True)
delencode2 = np.zeros(100)
encoder.encodeIntoArray(feedIn[-1], delencode2, learn=True)
self.assertTrue(np.array_equal(delencode, delencode2))
if __name__ == "__main__":
unittest.main()
| 5,547 | Python | .py | 127 | 38.094488 | 79 | 0.679161 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,138 | random_distributed_scalar_test.py | numenta_nupic-legacy/tests/unit/nupic/encoders/random_distributed_scalar_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from cStringIO import StringIO
import sys
import tempfile
import unittest2 as unittest
import numpy
from nupic.encoders.base import defaultDtype
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.data.field_meta import FieldMetaType
from nupic.support.unittesthelpers.algorithm_test_helpers import getSeed
from nupic.encoders import RandomDistributedScalarEncoder
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.random_distributed_scalar_capnp import (
RandomDistributedScalarEncoderProto
)
# Disable warnings about accessing protected members
# pylint: disable=W0212
def computeOverlap(x, y):
"""
Given two binary arrays, compute their overlap. The overlap is the number
of bits where x[i] and y[i] are both 1
"""
return (x & y).sum()
def validateEncoder(encoder, subsampling):
"""
Given an encoder, calculate overlaps statistics and ensure everything is ok.
We don't check every possible combination for speed reasons.
"""
for i in range(encoder.minIndex, encoder.maxIndex+1, 1):
for j in range(i+1, encoder.maxIndex+1, subsampling):
if not encoder._overlapOK(i, j):
return False
return True
class RandomDistributedScalarEncoderTest(unittest.TestCase):
"""
Unit tests for RandomDistributedScalarEncoder class.
"""
def testEncoding(self):
"""
Test basic encoding functionality. Create encodings without crashing and
check they contain the correct number of on and off bits. Check some
encodings for expected overlap. Test that encodings for old values don't
change once we generate new buckets.
"""
# Initialize with non-default parameters and encode with a number close to
# the offset
encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0,
w=23, n=500, offset=0.0)
e0 = encoder.encode(-0.1)
self.assertEqual(e0.sum(), 23, "Number of on bits is incorrect")
self.assertEqual(e0.size, 500, "Width of the vector is incorrect")
self.assertEqual(encoder.getBucketIndices(0.0)[0], encoder._maxBuckets / 2,
"Offset doesn't correspond to middle bucket")
self.assertEqual(len(encoder.bucketMap), 1, "Number of buckets is not 1")
# Encode with a number that is resolution away from offset. Now we should
# have two buckets and this encoding should be one bit away from e0
e1 = encoder.encode(1.0)
self.assertEqual(len(encoder.bucketMap), 2, "Number of buckets is not 2")
self.assertEqual(e1.sum(), 23, "Number of on bits is incorrect")
self.assertEqual(e1.size, 500, "Width of the vector is incorrect")
self.assertEqual(computeOverlap(e0, e1), 22, "Overlap is not equal to w-1")
# Encode with a number that is resolution*w away from offset. Now we should
# have many buckets and this encoding should have very little overlap with
# e0
e25 = encoder.encode(25.0)
self.assertGreater(len(encoder.bucketMap), 23,
"Number of buckets is not 2")
self.assertEqual(e25.sum(), 23, "Number of on bits is incorrect")
self.assertEqual(e25.size, 500, "Width of the vector is incorrect")
self.assertLess(computeOverlap(e0, e25), 4, "Overlap is too high")
# Test encoding consistency. The encodings for previous numbers
# shouldn't change even though we have added additional buckets
self.assertTrue(numpy.array_equal(e0, encoder.encode(-0.1)),
"Encodings are not consistent - they have changed after new buckets "
"have been created")
self.assertTrue(numpy.array_equal(e1, encoder.encode(1.0)),
"Encodings are not consistent - they have changed after new buckets "
"have been created")
def testMissingValues(self):
"""
Test that missing values and NaN return all zero's.
"""
encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0)
empty = encoder.encode(SENTINEL_VALUE_FOR_MISSING_DATA)
self.assertEqual(empty.sum(), 0)
empty = encoder.encode(float("nan"))
self.assertEqual(empty.sum(), 0)
def testResolution(self):
"""
Test that numbers within the same resolution return the same encoding.
Numbers outside the resolution should return different encodings.
"""
encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0)
# Since 23.0 is the first encoded number, it will be the offset.
# Since resolution is 1, 22.9 and 23.4 should have the same bucket index and
# encoding.
e23 = encoder.encode(23.0)
e23p1 = encoder.encode(23.1)
e22p9 = encoder.encode(22.9)
e24 = encoder.encode(24.0)
self.assertEqual(e23.sum(), encoder.w)
self.assertEqual((e23 == e23p1).sum(), encoder.getWidth(),
"Numbers within resolution don't have the same encoding")
self.assertEqual((e23 == e22p9).sum(), encoder.getWidth(),
"Numbers within resolution don't have the same encoding")
self.assertNotEqual((e23 == e24).sum(), encoder.getWidth(),
"Numbers outside resolution have the same encoding")
e22p9 = encoder.encode(22.5)
self.assertNotEqual((e23 == e22p9).sum(), encoder.getWidth(),
"Numbers outside resolution have the same encoding")
def testMapBucketIndexToNonZeroBits(self):
"""
Test that mapBucketIndexToNonZeroBits works and that max buckets and
clipping are handled properly.
"""
encoder = RandomDistributedScalarEncoder(resolution=1.0, w=11, n=150)
# Set a low number of max buckets
encoder._initializeBucketMap(10, None)
encoder.encode(0.0)
encoder.encode(-7.0)
encoder.encode(7.0)
self.assertEqual(len(encoder.bucketMap), encoder._maxBuckets,
"_maxBuckets exceeded")
self.assertTrue(
numpy.array_equal(encoder.mapBucketIndexToNonZeroBits(-1),
encoder.bucketMap[0]),
"mapBucketIndexToNonZeroBits did not handle negative"
" index")
self.assertTrue(
numpy.array_equal(encoder.mapBucketIndexToNonZeroBits(1000),
encoder.bucketMap[9]),
"mapBucketIndexToNonZeroBits did not handle negative index")
e23 = encoder.encode(23.0)
e6 = encoder.encode(6)
self.assertEqual((e23 == e6).sum(), encoder.getWidth(),
"Values not clipped correctly during encoding")
ep8 = encoder.encode(-8)
ep7 = encoder.encode(-7)
self.assertEqual((ep8 == ep7).sum(), encoder.getWidth(),
"Values not clipped correctly during encoding")
self.assertEqual(encoder.getBucketIndices(-8)[0], 0,
"getBucketIndices returned negative bucket index")
self.assertEqual(encoder.getBucketIndices(23)[0], encoder._maxBuckets-1,
"getBucketIndices returned bucket index that is too"
" large")
def testParameterChecks(self):
"""
Test that some bad construction parameters get handled.
"""
# n must be >= 6*w
with self.assertRaises(ValueError):
RandomDistributedScalarEncoder(name="mv", resolution=1.0, n=int(5.9*21))
# n must be an int
with self.assertRaises(ValueError):
RandomDistributedScalarEncoder(name="mv", resolution=1.0, n=5.9*21)
# w can't be negative
with self.assertRaises(ValueError):
RandomDistributedScalarEncoder(name="mv", resolution=1.0, w=-1)
# resolution can't be negative
with self.assertRaises(ValueError):
RandomDistributedScalarEncoder(name="mv", resolution=-2)
def testOverlapStatistics(self):
"""
Check that the overlaps for the encodings are within the expected range.
Here we ask the encoder to create a bunch of representations under somewhat
stressful conditions, and then verify they are correct. We rely on the fact
that the _overlapOK and _countOverlapIndices methods are working correctly.
"""
seed = getSeed()
# Generate about 600 encodings. Set n relatively low to increase
# chance of false overlaps
encoder = RandomDistributedScalarEncoder(resolution=1.0, w=11, n=150,
seed=seed)
encoder.encode(0.0)
encoder.encode(-300.0)
encoder.encode(300.0)
self.assertTrue(validateEncoder(encoder, subsampling=3),
"Illegal overlap encountered in encoder")
def testGetMethods(self):
"""
Test that the getWidth, getDescription, and getDecoderOutputFieldTypes
methods work.
"""
encoder = RandomDistributedScalarEncoder(name="theName", resolution=1.0, n=500)
self.assertEqual(encoder.getWidth(), 500,
"getWidth doesn't return the correct result")
self.assertEqual(encoder.getDescription(), [("theName", 0)],
"getDescription doesn't return the correct result")
self.assertEqual(encoder.getDecoderOutputFieldTypes(),
(FieldMetaType.float, ),
"getDecoderOutputFieldTypes doesn't return the correct"
" result")
def testOffset(self):
"""
Test that offset is working properly
"""
encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0)
encoder.encode(23.0)
self.assertEqual(encoder._offset, 23.0,
"Offset not specified and not initialized to first input")
encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0,
offset=25.0)
encoder.encode(23.0)
self.assertEqual(encoder._offset, 25.0,
"Offset not initialized to specified constructor"
" parameter")
def testSeed(self):
"""
Test that initializing twice with the same seed returns identical encodings
and different when not specified
"""
encoder1 = RandomDistributedScalarEncoder(name="encoder1", resolution=1.0,
seed=42)
encoder2 = RandomDistributedScalarEncoder(name="encoder2", resolution=1.0,
seed=42)
encoder3 = RandomDistributedScalarEncoder(name="encoder3", resolution=1.0,
seed=-1)
encoder4 = RandomDistributedScalarEncoder(name="encoder4", resolution=1.0,
seed=-1)
e1 = encoder1.encode(23.0)
e2 = encoder2.encode(23.0)
e3 = encoder3.encode(23.0)
e4 = encoder4.encode(23.0)
self.assertEqual((e1 == e2).sum(), encoder1.getWidth(),
"Same seed gives rise to different encodings")
self.assertNotEqual((e1 == e3).sum(), encoder1.getWidth(),
"Different seeds gives rise to same encodings")
self.assertNotEqual((e3 == e4).sum(), encoder1.getWidth(),
"seeds of -1 give rise to same encodings")
def testCountOverlapIndices(self):
"""
Test that the internal method _countOverlapIndices works as expected.
"""
# Create a fake set of encodings.
encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0,
w=5, n=5*20)
midIdx = encoder._maxBuckets/2
encoder.bucketMap[midIdx-2] = numpy.array(range(3, 8))
encoder.bucketMap[midIdx-1] = numpy.array(range(4, 9))
encoder.bucketMap[midIdx] = numpy.array(range(5, 10))
encoder.bucketMap[midIdx+1] = numpy.array(range(6, 11))
encoder.bucketMap[midIdx+2] = numpy.array(range(7, 12))
encoder.bucketMap[midIdx+3] = numpy.array(range(8, 13))
encoder.minIndex = midIdx - 2
encoder.maxIndex = midIdx + 3
# Indices must exist
with self.assertRaises(ValueError):
encoder._countOverlapIndices(midIdx-3, midIdx-2)
with self.assertRaises(ValueError):
encoder._countOverlapIndices(midIdx-2, midIdx-3)
# Test some overlaps
self.assertEqual(encoder._countOverlapIndices(midIdx-2, midIdx-2), 5,
"_countOverlapIndices didn't work")
self.assertEqual(encoder._countOverlapIndices(midIdx-1, midIdx-2), 4,
"_countOverlapIndices didn't work")
self.assertEqual(encoder._countOverlapIndices(midIdx+1, midIdx-2), 2,
"_countOverlapIndices didn't work")
self.assertEqual(encoder._countOverlapIndices(midIdx-2, midIdx+3), 0,
"_countOverlapIndices didn't work")
def testOverlapOK(self):
"""
Test that the internal method _overlapOK works as expected.
"""
# Create a fake set of encodings.
encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0,
w=5, n=5*20)
midIdx = encoder._maxBuckets/2
encoder.bucketMap[midIdx-3] = numpy.array(range(4, 9)) # Not ok with
# midIdx-1
encoder.bucketMap[midIdx-2] = numpy.array(range(3, 8))
encoder.bucketMap[midIdx-1] = numpy.array(range(4, 9))
encoder.bucketMap[midIdx] = numpy.array(range(5, 10))
encoder.bucketMap[midIdx+1] = numpy.array(range(6, 11))
encoder.bucketMap[midIdx+2] = numpy.array(range(7, 12))
encoder.bucketMap[midIdx+3] = numpy.array(range(8, 13))
encoder.minIndex = midIdx - 3
encoder.maxIndex = midIdx + 3
self.assertTrue(encoder._overlapOK(midIdx, midIdx-1),
"_overlapOK didn't work")
self.assertTrue(encoder._overlapOK(midIdx-2, midIdx+3),
"_overlapOK didn't work")
self.assertFalse(encoder._overlapOK(midIdx-3, midIdx-1),
"_overlapOK didn't work")
# We'll just use our own numbers
self.assertTrue(encoder._overlapOK(100, 50, 0),
"_overlapOK didn't work for far values")
self.assertTrue(encoder._overlapOK(100, 50, encoder._maxOverlap),
"_overlapOK didn't work for far values")
self.assertFalse(encoder._overlapOK(100, 50, encoder._maxOverlap+1),
"_overlapOK didn't work for far values")
self.assertTrue(encoder._overlapOK(50, 50, 5),
"_overlapOK didn't work for near values")
self.assertTrue(encoder._overlapOK(48, 50, 3),
"_overlapOK didn't work for near values")
self.assertTrue(encoder._overlapOK(46, 50, 1),
"_overlapOK didn't work for near values")
self.assertTrue(encoder._overlapOK(45, 50, encoder._maxOverlap),
"_overlapOK didn't work for near values")
self.assertFalse(encoder._overlapOK(48, 50, 4),
"_overlapOK didn't work for near values")
self.assertFalse(encoder._overlapOK(48, 50, 2),
"_overlapOK didn't work for near values")
self.assertFalse(encoder._overlapOK(46, 50, 2),
"_overlapOK didn't work for near values")
self.assertFalse(encoder._overlapOK(50, 50, 6),
"_overlapOK didn't work for near values")
def testCountOverlap(self):
"""
Test that the internal method _countOverlap works as expected.
"""
encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0,
n=500)
r1 = numpy.array([1, 2, 3, 4, 5, 6])
r2 = numpy.array([1, 2, 3, 4, 5, 6])
self.assertEqual(encoder._countOverlap(r1, r2), 6,
"_countOverlap result is incorrect")
r1 = numpy.array([1, 2, 3, 4, 5, 6])
r2 = numpy.array([1, 2, 3, 4, 5, 7])
self.assertEqual(encoder._countOverlap(r1, r2), 5,
"_countOverlap result is incorrect")
r1 = numpy.array([1, 2, 3, 4, 5, 6])
r2 = numpy.array([6, 5, 4, 3, 2, 1])
self.assertEqual(encoder._countOverlap(r1, r2), 6,
"_countOverlap result is incorrect")
r1 = numpy.array([1, 2, 8, 4, 5, 6])
r2 = numpy.array([1, 2, 3, 4, 9, 6])
self.assertEqual(encoder._countOverlap(r1, r2), 4,
"_countOverlap result is incorrect")
r1 = numpy.array([1, 2, 3, 4, 5, 6])
r2 = numpy.array([1, 2, 3])
self.assertEqual(encoder._countOverlap(r1, r2), 3,
"_countOverlap result is incorrect")
r1 = numpy.array([7, 8, 9, 10, 11, 12])
r2 = numpy.array([1, 2, 3, 4, 5, 6])
self.assertEqual(encoder._countOverlap(r1, r2), 0,
"_countOverlap result is incorrect")
def testVerbosity(self):
"""
Test that nothing is printed out when verbosity=0
"""
_stdout = sys.stdout
sys.stdout = _stringio = StringIO()
encoder = RandomDistributedScalarEncoder(name="mv", resolution=1.0,
verbosity=0)
output = numpy.zeros(encoder.getWidth(), dtype=defaultDtype)
encoder.encodeIntoArray(23.0, output)
encoder.getBucketIndices(23.0)
sys.stdout = _stdout
self.assertEqual(len(_stringio.getvalue()), 0,
"zero verbosity doesn't lead to zero output")
def testEncodeInvalidInputType(self):
encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0,
verbosity=0)
with self.assertRaises(TypeError):
encoder.encode("String")
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testWriteRead(self):
original = RandomDistributedScalarEncoder(
name="encoder", resolution=1.0, w=23, n=500, offset=0.0)
originalValue = original.encode(1)
proto1 = RandomDistributedScalarEncoderProto.new_message()
original.write(proto1)
# Write the proto to a temp file and read it back into a new proto
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = RandomDistributedScalarEncoderProto.read(f)
encoder = RandomDistributedScalarEncoder.read(proto2)
self.assertIsInstance(encoder, RandomDistributedScalarEncoder)
self.assertEqual(encoder.resolution, original.resolution)
self.assertEqual(encoder.w, original.w)
self.assertEqual(encoder.n, original.n)
self.assertEqual(encoder.name, original.name)
self.assertEqual(encoder.verbosity, original.verbosity)
self.assertEqual(encoder.minIndex, original.minIndex)
self.assertEqual(encoder.maxIndex, original.maxIndex)
encodedFromOriginal = original.encode(1)
encodedFromNew = encoder.encode(1)
self.assertTrue(numpy.array_equal(encodedFromNew, originalValue))
self.assertEqual(original.decode(encodedFromNew),
encoder.decode(encodedFromOriginal))
self.assertEqual(original.random.getSeed(), encoder.random.getSeed())
for key, value in original.bucketMap.items():
self.assertTrue(numpy.array_equal(value, encoder.bucketMap[key]))
if __name__ == "__main__":
unittest.main()
| 19,711 | Python | .py | 410 | 40.073171 | 83 | 0.667638 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,139 | sdrcategory_test.py | numenta_nupic-legacy/tests/unit/nupic/encoders/sdrcategory_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for SDR Category encoder"""
import numpy
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
import tempfile
import unittest
from nupic.encoders.sdr_category import SDRCategoryEncoder
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.sdr_category_capnp import SDRCategoryEncoderProto
class SDRCategoryEncoderTest(unittest.TestCase):
"""Unit tests for SDRCategory encoder class"""
def testSDRCategoryEncoder(self):
# make sure we have > 16 categories so that we have to grow our sdrs
categories = ["ES", "S1", "S2", "S3", "S4", "S5", "S6", "S7", "S8",
"S9","S10", "S11", "S12", "S13", "S14", "S15", "S16",
"S17", "S18", "S19", "GB", "US"]
fieldWidth = 100
bitsOn = 10
s = SDRCategoryEncoder(n=fieldWidth, w=bitsOn, categoryList = categories,
name="foo", verbosity=0, forced=True)
# internal check
self.assertEqual(s.sdrs.shape, (32, fieldWidth))
# ES
es = s.encode("ES")
self.assertEqual(es.sum(), bitsOn)
self.assertEqual(es.shape, (fieldWidth,))
self.assertEqual(es.sum(), bitsOn)
x = s.decode(es)
self.assertIsInstance(x[0], dict)
self.assertTrue("foo" in x[0])
self.assertEqual(x[0]["foo"][1], "ES")
topDown = s.topDownCompute(es)
self.assertEqual(topDown.value, "ES")
self.assertEqual(topDown.scalar, 1)
self.assertEqual(topDown.encoding.sum(), bitsOn)
# ----------------------------------------------------------------------
# Test topdown compute
for v in categories:
output = s.encode(v)
topDown = s.topDownCompute(output)
self.assertEqual(topDown.value, v)
self.assertEqual(topDown.scalar, s.getScalars(v)[0])
bucketIndices = s.getBucketIndices(v)
topDown = s.getBucketInfo(bucketIndices)[0]
self.assertEqual(topDown.value, v)
self.assertEqual(topDown.scalar, s.getScalars(v)[0])
self.assertTrue(numpy.array_equal(topDown.encoding, output))
self.assertEqual(topDown.value, s.getBucketValues()[bucketIndices[0]])
# Unknown
unknown = s.encode("ASDFLKJLK")
self.assertEqual(unknown.sum(), bitsOn)
self.assertEqual(unknown.shape, (fieldWidth,))
self.assertEqual(unknown.sum(), bitsOn)
x = s.decode(unknown)
self.assertEqual(x[0]["foo"][1], "<UNKNOWN>")
topDown = s.topDownCompute(unknown)
self.assertEqual(topDown.value, "<UNKNOWN>")
self.assertEqual(topDown.scalar, 0)
# US
us = s.encode("US")
self.assertEqual(us.sum(), bitsOn)
self.assertEqual(us.shape, (fieldWidth,))
self.assertEqual(us.sum(), bitsOn)
x = s.decode(us)
self.assertEqual(x[0]["foo"][1], "US")
topDown = s.topDownCompute(us)
self.assertEqual(topDown.value, "US")
self.assertEqual(topDown.scalar, len(categories))
self.assertEqual(topDown.encoding.sum(), bitsOn)
# empty field
empty = s.encode(SENTINEL_VALUE_FOR_MISSING_DATA)
self.assertEqual(empty.sum(), 0)
self.assertEqual(empty.shape, (fieldWidth,))
self.assertEqual(empty.sum(), 0)
# make sure it can still be decoded after a change
bit = s.random.getUInt32(s.getWidth()-1)
us[bit] = 1 - us[bit]
x = s.decode(us)
self.assertEqual(x[0]["foo"][1], "US")
# add two reps together
newrep = ((us + unknown) > 0).astype(numpy.uint8)
x = s.decode(newrep)
name =x[0]["foo"][1]
if name != "US <UNKNOWN>" and name != "<UNKNOWN> US":
othercategory = name.replace("US", "")
othercategory = othercategory.replace("<UNKNOWN>", "")
othercategory = othercategory.replace(" ", "")
otherencoded = s.encode(othercategory)
raise RuntimeError("Decoding failure")
# serialization
# TODO: Remove pickle-based serialization tests -- issues #1419 and #1420
import cPickle as pickle
t = pickle.loads(pickle.dumps(s))
self.assertTrue((t.encode("ES") == es).all())
self.assertTrue((t.encode("GB") == s.encode("GB")).all())
# Test autogrow
s = SDRCategoryEncoder(n=fieldWidth, w=bitsOn, categoryList=None,
name="bar", forced=True)
es = s.encode("ES")
self.assertEqual(es.shape, (fieldWidth,))
self.assertEqual(es.sum(), bitsOn)
x = s.decode(es)
self.assertIsInstance(x[0], dict)
self.assertTrue("bar" in x[0])
self.assertEqual(x[0]["bar"][1], "ES")
us = s.encode("US")
self.assertEqual(us.shape, (fieldWidth,))
self.assertEqual(us.sum(), bitsOn)
x = s.decode(us)
self.assertEqual(x[0]["bar"][1], "US")
es2 = s.encode("ES")
self.assertTrue(numpy.array_equal(es2, es))
us2 = s.encode("US")
self.assertTrue(numpy.array_equal(us2, us))
# make sure it can still be decoded after a change
bit = s.random.getUInt32(s.getWidth() - 1)
us[bit] = 1 - us[bit]
x = s.decode(us)
self.assertEqual(x[0]["bar"][1], "US")
# add two reps together
newrep = ((us + es) > 0).astype(numpy.uint8)
x = s.decode(newrep)
name = x[0]["bar"][1]
self.assertTrue(name == "US ES" or name == "ES US")
# Catch duplicate categories
caughtException = False
newcategories = categories[:]
self.assertTrue("ES" in newcategories)
newcategories.append("ES")
try:
s = SDRCategoryEncoder(n=fieldWidth, w=bitsOn,
categoryList=newcategories, name="foo",
forced=True)
except RuntimeError, e:
caughtException = True
finally:
if not caughtException:
raise RuntimeError("Did not catch duplicate category in constructor")
# serialization for autogrow encoder
gs = s.encode("GS")
# TODO: Remove as part of issues #1419 and #1420
t = pickle.loads(pickle.dumps(s))
self.assertTrue(numpy.array_equal(t.encode("ES"), es))
self.assertTrue(numpy.array_equal(t.encode("GS"), gs))
# -----------------------------------------------------------------------
def testAutogrow(self):
"""testing auto-grow"""
fieldWidth = 100
bitsOn = 10
s = SDRCategoryEncoder(n=fieldWidth, w=bitsOn, name="foo", verbosity=2,
forced=True)
encoded = numpy.zeros(fieldWidth)
self.assertEqual(s.topDownCompute(encoded).value, "<UNKNOWN>")
s.encodeIntoArray("catA", encoded)
self.assertEqual(encoded.sum(), bitsOn)
self.assertEqual(s.getScalars("catA"), 1)
catA = encoded.copy()
s.encodeIntoArray("catB", encoded)
self.assertEqual(encoded.sum(), bitsOn)
self.assertEqual(s.getScalars("catB"), 2)
catB = encoded.copy()
self.assertEqual(s.topDownCompute(catA).value, "catA")
self.assertEqual(s.topDownCompute(catB).value, "catB")
s.encodeIntoArray(SENTINEL_VALUE_FOR_MISSING_DATA, encoded)
self.assertEqual(sum(encoded), 0)
self.assertEqual(s.topDownCompute(encoded).value, "<UNKNOWN>")
#Test Disabling Learning and autogrow
s.setLearning(False)
s.encodeIntoArray("catC", encoded)
self.assertEqual(encoded.sum(), bitsOn)
self.assertEqual(s.getScalars("catC"), 0)
self.assertEqual(s.topDownCompute(encoded).value, "<UNKNOWN>")
s.setLearning(True)
s.encodeIntoArray("catC", encoded)
self.assertEqual(encoded.sum(), bitsOn)
self.assertEqual(s.getScalars("catC"), 3)
self.assertEqual(s.topDownCompute(encoded).value, "catC")
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testReadWrite(self):
categories = ["ES", "S1", "S2", "S3", "S4", "S5", "S6", "S7", "S8",
"S9","S10", "S11", "S12", "S13", "S14", "S15", "S16",
"S17", "S18", "S19", "GB", "US"]
fieldWidth = 100
bitsOn = 10
original = SDRCategoryEncoder(n=fieldWidth, w=bitsOn,
categoryList=categories,
name="foo", verbosity=0, forced=True)
# internal check
self.assertEqual(original.sdrs.shape, (32, fieldWidth))
# ES
es = original.encode("ES")
self.assertEqual(es.sum(), bitsOn)
self.assertEqual(es.shape, (fieldWidth,))
self.assertEqual(es.sum(), bitsOn)
decoded = original.decode(es)
proto1 = SDRCategoryEncoderProto.new_message()
original.write(proto1)
# Write the proto to a temp file and read it back into a new proto
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = SDRCategoryEncoderProto.read(f)
encoder = SDRCategoryEncoder.read(proto2)
self.assertIsInstance(encoder, SDRCategoryEncoder)
self.assertEqual(encoder.n, original.n)
self.assertEqual(encoder.w, original.w)
self.assertEqual(encoder.verbosity, original.verbosity)
self.assertEqual(encoder.description, original.description)
self.assertEqual(encoder.name, original.name)
self.assertDictEqual(encoder.categoryToIndex, original.categoryToIndex)
self.assertTrue(numpy.array_equal(encoder.encode("ES"), es))
self.assertEqual(original.decode(encoder.encode("ES")),
encoder.decode(original.encode("ES")))
self.assertEqual(decoded, encoder.decode(es))
# Test autogrow serialization
autogrow = SDRCategoryEncoder(n=fieldWidth, w=bitsOn, categoryList = None,
name="bar", forced=True)
es = autogrow.encode("ES")
us = autogrow.encode("US")
gs = autogrow.encode("GS")
proto1 = SDRCategoryEncoderProto.new_message()
autogrow.write(proto1)
# Write the proto to a temp file and read it back into a new proto
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = SDRCategoryEncoderProto.read(f)
t = SDRCategoryEncoder.read(proto2)
self.assertTrue(numpy.array_equal(t.encode("ES"), es))
self.assertTrue(numpy.array_equal(t.encode("US"), us))
self.assertTrue(numpy.array_equal(t.encode("GS"), gs))
if __name__ == "__main__":
unittest.main()
| 10,958 | Python | .py | 257 | 36.614786 | 78 | 0.653405 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,140 | sparse_pass_through_encoder_test.py | numenta_nupic-legacy/tests/unit/nupic/encoders/sparse_pass_through_encoder_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for BitmapArray Encoder."""
CL_VERBOSITY = 0
import tempfile
import unittest2 as unittest
import numpy
from nupic.encoders import SparsePassThroughEncoder
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.sparse_pass_through_capnp import (
SparsePassThroughEncoderProto)
class SparsePassThroughEncoderTest(unittest.TestCase):
"""Unit tests for SparsePassThroughEncoder class."""
def setUp(self):
self.n = 25
self.name = "foo"
self._encoder = SparsePassThroughEncoder
def testEncodeArray(self):
"""Send bitmap as array of indicies"""
e = self._encoder(self.n, name=self.name)
bitmap = [2,7,15,18,23]
out = e.encode(bitmap)
self.assertEqual(out.sum(), len(bitmap))
x = e.decode(out)
self.assertIsInstance(x[0], dict)
self.assertTrue(self.name in x[0])
def testEncodeArrayInvalidType(self):
e = self._encoder(self.n, 1)
v = numpy.zeros(self.n)
v[0] = 12
with self.assertRaises(ValueError):
e.encode(v)
def testEncodeArrayInvalidW(self):
"""Send bitmap as array of indicies"""
e = self._encoder(self.n, 3, name=self.name)
with self.assertRaises(ValueError):
e.encode([2])
with self.assertRaises(ValueError):
e.encode([2,7,15,18,23])
def testClosenessScores(self):
"""Compare two bitmaps for closeness"""
e = self._encoder(self.n, name=self.name)
"""Identical => 1"""
bitmap1 = [2,7,15,18,23]
bitmap2 = [2,7,15,18,23]
out1 = e.encode(bitmap1)
out2 = e.encode(bitmap2)
c = e.closenessScores(out1, out2)
self.assertEqual(c[0], 1.0)
"""No overlap => 0"""
bitmap1 = [2,7,15,18,23]
bitmap2 = [3,9,14,19,24]
out1 = e.encode(bitmap1)
out2 = e.encode(bitmap2)
c = e.closenessScores(out1, out2)
self.assertEqual(c[0], 0.0)
"""Similar => 4 of 5 match"""
bitmap1 = [2,7,15,18,23]
bitmap2 = [2,7,17,18,23]
out1 = e.encode(bitmap1)
out2 = e.encode(bitmap2)
c = e.closenessScores(out1, out2)
self.assertEqual(c[0], 0.8)
"""Little => 1 of 5 match"""
bitmap1 = [2,7,15,18,23]
bitmap2 = [3,7,17,19,24]
out1 = e.encode(bitmap1)
out2 = e.encode(bitmap2)
c = e.closenessScores(out1, out2)
self.assertEqual(c[0], 0.2)
"""Extra active bit => off by 1 of 5"""
bitmap1 = [2,7,15,18,23]
bitmap2 = [2,7,11,15,18,23]
out1 = e.encode(bitmap1)
out2 = e.encode(bitmap2)
c = e.closenessScores(out1, out2)
self.assertEqual(c[0], 0.8)
"""Missing active bit => off by 1 of 5"""
bitmap1 = [2,7,15,18,23]
bitmap2 = [2,7,18,23]
out1 = e.encode(bitmap1)
out2 = e.encode(bitmap2)
c = e.closenessScores(out1, out2)
self.assertEqual(c[0], 0.8)
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testReadWrite(self):
original = self._encoder(self.n, name=self.name)
originalValue = original.encode([1,0,1,0,1,0,1,0,1])
proto1 = SparsePassThroughEncoderProto.new_message()
original.write(proto1)
# Write the proto to a temp file and read it back into a new proto
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = SparsePassThroughEncoderProto.read(f)
encoder = SparsePassThroughEncoder.read(proto2)
self.assertIsInstance(encoder, SparsePassThroughEncoder)
self.assertEqual(encoder.name, original.name)
self.assertEqual(encoder.verbosity, original.verbosity)
self.assertEqual(encoder.w, original.w)
self.assertEqual(encoder.n, original.n)
self.assertEqual(encoder.description, original.description)
self.assertTrue(numpy.array_equal(encoder.encode([1,0,1,0,1,0,1,0,1]),
originalValue))
self.assertEqual(original.decode(encoder.encode([1,0,1,0,1,0,1,0,1])),
encoder.decode(original.encode([1,0,1,0,1,0,1,0,1])))
# Feed in a new value and ensure the encodings match
result1 = original.encode([0,1,0,1,0,1,0,1,0])
result2 = encoder.encode([0,1,0,1,0,1,0,1,0])
self.assertTrue(numpy.array_equal(result1, result2))
if __name__ == "__main__":
unittest.main()
| 5,186 | Python | .py | 135 | 33.748148 | 74 | 0.667797 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,141 | scalar_test.py | numenta_nupic-legacy/tests/unit/nupic/encoders/scalar_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for date encoder"""
import numpy
import itertools
import tempfile
from nupic.encoders.base import defaultDtype
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
import unittest
from nupic.encoders.scalar import ScalarEncoder
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.scalar_capnp import ScalarEncoderProto
class ScalarEncoderTest(unittest.TestCase):
"""Unit tests for ScalarEncoder class"""
def setUp(self):
# use of forced is not recommended, but used here for readability, see
# scalar.py
self._l = ScalarEncoder(name="scalar", n=14, w=3, minval=1, maxval=8,
periodic=True, forced=True)
def testScalarEncoder(self):
"""Testing ScalarEncoder..."""
# -------------------------------------------------------------------------
# test missing values
mv = ScalarEncoder(name="mv", n=14, w=3, minval=1, maxval=8,
periodic=False, forced=True)
empty = mv.encode(SENTINEL_VALUE_FOR_MISSING_DATA)
self.assertEqual(empty.sum(), 0)
def testNaNs(self):
"""test NaNs"""
mv = ScalarEncoder(name="mv", n=14, w=3, minval=1, maxval=8,
periodic=False, forced=True)
empty = mv.encode(float("nan"))
self.assertEqual(empty.sum(), 0)
def testBottomUpEncodingPeriodicEncoder(self):
"""Test bottom-up encoding for a Periodic encoder"""
l = ScalarEncoder(n=14, w=3, minval=1, maxval=8, periodic=True,
forced=True)
self.assertEqual(l.getDescription(), [("[1:8]", 0)])
l = ScalarEncoder(name="scalar", n=14, w=3, minval=1, maxval=8,
periodic=True, forced=True)
self.assertEqual(l.getDescription(), [("scalar", 0)])
self.assertTrue(numpy.array_equal(
l.encode(3),
numpy.array([0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
dtype=defaultDtype)))
self.assertTrue(numpy.array_equal(l.encode(3.1), l.encode(3)))
self.assertTrue(numpy.array_equal(
l.encode(3.5),
numpy.array([0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
dtype=defaultDtype)))
self.assertTrue(numpy.array_equal(l.encode(3.6), l.encode(3.5)))
self.assertTrue(numpy.array_equal(l.encode(3.7), l.encode(3.5)))
self.assertTrue(numpy.array_equal(
l.encode(4),
numpy.array([0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0],
dtype=defaultDtype)))
self.assertTrue(numpy.array_equal(
l.encode(1),
numpy.array([1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
dtype=defaultDtype)))
self.assertTrue(numpy.array_equal(
l.encode(1.5),
numpy.array([1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
dtype=defaultDtype)))
self.assertTrue(numpy.array_equal(
l.encode(7),
numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
dtype=defaultDtype)))
self.assertTrue(numpy.array_equal(
l.encode(7.5),
numpy.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
dtype=defaultDtype)))
self.assertEqual(l.resolution, 0.5)
self.assertEqual(l.radius, 1.5)
def testCreateResolution(self):
"""Test that we get the same encoder when we construct it using resolution
instead of n
"""
l = self._l
d = l.__dict__
l = ScalarEncoder(name="scalar", resolution=0.5, w=3, minval=1, maxval=8,
periodic=True, forced=True)
self.assertEqual(l.__dict__, d)
# Test that we get the same encoder when we construct it using radius
# instead of n
l = ScalarEncoder(name="scalar", radius=1.5, w=3, minval=1, maxval=8,
periodic=True, forced=True)
self.assertEqual(l.__dict__, d)
def testDecodeAndResolution(self):
"""Test the input description generation, top-down compute, and bucket
support on a periodic encoder
"""
l = self._l
v = l.minval
while v < l.maxval:
output = l.encode(v)
decoded = l.decode(output)
(fieldsDict, fieldNames) = decoded
self.assertEqual(len(fieldsDict), 1)
self.assertEqual(len(fieldNames), 1)
self.assertEqual(fieldNames, fieldsDict.keys())
(ranges, _) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 1)
(rangeMin, rangeMax) = ranges[0]
self.assertEqual(rangeMin, rangeMax)
self.assertLess(abs(rangeMin - v), l.resolution)
topDown = l.topDownCompute(output)[0]
self.assertTrue(numpy.array_equal(topDown.encoding, output))
self.assertLessEqual(abs(topDown.value - v), l.resolution / 2)
# Test bucket support
bucketIndices = l.getBucketIndices(v)
topDown = l.getBucketInfo(bucketIndices)[0]
self.assertLessEqual(abs(topDown.value - v), l.resolution / 2)
self.assertEqual(topDown.value, l.getBucketValues()[bucketIndices[0]])
self.assertEqual(topDown.scalar, topDown.value)
self.assertTrue(numpy.array_equal(topDown.encoding, output))
# Next value
v += l.resolution / 4
# -----------------------------------------------------------------------
# Test the input description generation on a large number, periodic encoder
l = ScalarEncoder(name='scalar', radius=1.5, w=3, minval=1, maxval=8,
periodic=True, forced=True)
# Test with a "hole"
decoded = l.decode(numpy.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]))
(fieldsDict, fieldNames) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, _) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 1)
self.assertTrue(numpy.array_equal(ranges[0], [7.5, 7.5]))
# Test with something wider than w, and with a hole, and wrapped
decoded = l.decode(numpy.array([1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]))
(fieldsDict, fieldNames) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, _) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 2)
self.assertTrue(numpy.array_equal(ranges[0], [7.5, 8]))
self.assertTrue(numpy.array_equal(ranges[1], [1, 1]))
# Test with something wider than w, no hole
decoded = l.decode(numpy.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]))
(fieldsDict, fieldNames) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, _) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 1)
self.assertTrue(numpy.array_equal(ranges[0], [1.5, 2.5]))
# Test with 2 ranges
decoded = l.decode(numpy.array([1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0]))
(fieldsDict, fieldNames) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, _) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 2)
self.assertTrue(numpy.array_equal(ranges[0], [1.5, 1.5]))
self.assertTrue(numpy.array_equal(ranges[1], [5.5, 6.0]))
# Test with 2 ranges, 1 of which is narrower than w
decoded = l.decode(numpy.array([0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0]))
(fieldsDict, fieldNames) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, _) = fieldsDict.values()[0]
self.assertTrue(len(ranges), 2)
self.assertTrue(numpy.array_equal(ranges[0], [1.5, 1.5]))
self.assertTrue(numpy.array_equal(ranges[1], [5.5, 6.0]))
def testCloseness(self):
"""Test closenessScores for a periodic encoder"""
encoder = ScalarEncoder(w=7, minval=0, maxval=7, radius=1, periodic=True,
name="day of week", forced=True)
scores = encoder.closenessScores((2, 4, 7), (4, 2, 1), fractional=False)
for actual, score in itertools.izip((2, 2, 1), scores):
self.assertEqual(actual, score)
def testNonPeriodicBottomUp(self):
"""Test Non-periodic encoder bottom-up"""
l = ScalarEncoder(name="scalar", n=14, w=5, minval=1, maxval=10,
periodic=False, forced=True)
self.assertTrue(numpy.array_equal(
l.encode(1),
numpy.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
dtype=defaultDtype)))
self.assertTrue(numpy.array_equal(
l.encode(2),
numpy.array([0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
dtype=defaultDtype)))
self.assertTrue(numpy.array_equal(
l.encode(10),
numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
dtype=defaultDtype)))
# Test that we get the same encoder when we construct it using resolution
# instead of n
d = l.__dict__
l = ScalarEncoder(name="scalar", resolution=1, w=5, minval=1, maxval=10,
periodic=False, forced=True)
self.assertEqual(l.__dict__, d)
# Test that we get the same encoder when we construct it using radius
# instead of n
l = ScalarEncoder(name="scalar", radius=5, w=5, minval=1, maxval=10,
periodic=False, forced=True)
self.assertEqual(l.__dict__, d)
# -------------------------------------------------------------------------
# Test the input description generation and topDown decoding of a
# non-periodic encoder
v = l.minval
while v < l.maxval:
output = l.encode(v)
decoded = l.decode(output)
(fieldsDict, _) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, _) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 1)
(rangeMin, rangeMax) = ranges[0]
self.assertEqual(rangeMin, rangeMax)
self.assertLess(abs(rangeMin - v), l.resolution)
topDown = l.topDownCompute(output)[0]
self.assertTrue(numpy.array_equal(topDown.encoding, output))
self.assertLessEqual(abs(topDown.value - v), l.resolution)
# Test bucket support
bucketIndices = l.getBucketIndices(v)
topDown = l.getBucketInfo(bucketIndices)[0]
self.assertLessEqual(abs(topDown.value - v), l.resolution / 2)
self.assertEqual(topDown.scalar, topDown.value)
self.assertTrue(numpy.array_equal(topDown.encoding, output))
# Next value
v += l.resolution / 4
# Make sure we can fill in holes
decoded = l.decode(numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1]))
(fieldsDict, _) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, _) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 1)
self.assertTrue(numpy.array_equal(ranges[0], [10, 10]))
decoded = l.decode(numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1]))
(fieldsDict, _) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, _) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 1)
self.assertTrue(numpy.array_equal(ranges[0], [10, 10]))
#Test min and max
l = ScalarEncoder(name="scalar", n=14, w=3, minval=1, maxval=10,
periodic=False, forced=True)
decoded = l.topDownCompute(
numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]))[0]
self.assertEqual(decoded.value, 10)
decoded = l.topDownCompute(
numpy.array([1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))[0]
self.assertEqual(decoded.value, 1)
#Make sure only the last and first encoding encodes to max and min, and
#there is no value greater than max or min
l = ScalarEncoder(name="scalar", n=140, w=3, minval=1, maxval=141,
periodic=False, forced=True)
for i in range(137):
iterlist = [0 for _ in range(140)]
for j in range(i, i+3):
iterlist[j] =1
npar = numpy.array(iterlist)
decoded = l.topDownCompute(npar)[0]
self.assertLessEqual(decoded.value, 141)
self.assertGreaterEqual(decoded.value, 1)
self.assertTrue(decoded.value < 141 or i==137)
self.assertTrue(decoded.value > 1 or i == 0)
# -------------------------------------------------------------------------
# Test the input description generation and top-down compute on a small
# number non-periodic encoder
l = ScalarEncoder(name="scalar", n=15, w=3, minval=.001, maxval=.002,
periodic=False, forced=True)
v = l.minval
while v < l.maxval:
output = l.encode(v)
decoded = l.decode(output)
(fieldsDict, _) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, _) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 1)
(rangeMin, rangeMax) = ranges[0]
self.assertEqual(rangeMin, rangeMax)
self.assertLess(abs(rangeMin - v), l.resolution)
topDown = l.topDownCompute(output)[0].value
self.assertLessEqual(abs(topDown - v), l.resolution / 2)
v += l.resolution / 4
# -------------------------------------------------------------------------
# Test the input description generation on a large number, non-periodic
# encoder
l = ScalarEncoder(name="scalar", n=15, w=3, minval=1, maxval=1000000000,
periodic=False, forced=True)
v = l.minval
while v < l.maxval:
output = l.encode(v)
decoded = l.decode(output)
(fieldsDict, _) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, _) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 1)
(rangeMin, rangeMax) = ranges[0]
self.assertEqual(rangeMin, rangeMax)
self.assertLess(abs(rangeMin - v), l.resolution)
topDown = l.topDownCompute(output)[0].value
self.assertLessEqual(abs(topDown - v), l.resolution / 2)
v += l.resolution / 4
def testEncodeInvalidInputType(self):
encoder = ScalarEncoder(name="enc", n=14, w=3, minval=1, maxval=8,
periodic=False, forced=True)
with self.assertRaises(TypeError):
encoder.encode("String")
def testGetBucketInfoIntResolution(self):
"""Ensures that passing resolution as an int doesn't truncate values."""
encoder = ScalarEncoder(w=3, resolution=1, minval=1, maxval=8,
periodic=True, forced=True)
self.assertEqual(4.5,
encoder.topDownCompute(encoder.encode(4.5))[0].scalar)
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testReadWrite(self):
"""Test ScalarEncoder Cap'n Proto serialization implementation."""
originalValue = self._l.encode(1)
proto1 = ScalarEncoderProto.new_message()
self._l.write(proto1)
# Write the proto to a temp file and read it back into a new proto
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = ScalarEncoderProto.read(f)
encoder = ScalarEncoder.read(proto2)
self.assertIsInstance(encoder, ScalarEncoder)
self.assertEqual(encoder.w, self._l.w)
self.assertEqual(encoder.minval, self._l.minval)
self.assertEqual(encoder.maxval, self._l.maxval)
self.assertEqual(encoder.periodic, self._l.periodic)
self.assertEqual(encoder.n, self._l.n)
self.assertEqual(encoder.radius, self._l.radius)
self.assertEqual(encoder.resolution, self._l.resolution)
self.assertEqual(encoder.name, self._l.name)
self.assertEqual(encoder.verbosity, self._l.verbosity)
self.assertEqual(encoder.clipInput, self._l.clipInput)
self.assertTrue(numpy.array_equal(encoder.encode(1), originalValue))
self.assertEqual(self._l.decode(encoder.encode(1)),
encoder.decode(self._l.encode(1)))
# Feed in a new value and ensure the encodings match
result1 = self._l.encode(7)
result2 = encoder.encode(7)
self.assertTrue(numpy.array_equal(result1, result2))
def testSettingNWithMaxvalMinvalNone(self):
"""Setting n when maxval/minval = None creates instance."""
encoder = ScalarEncoder(3, None, None, name="scalar",
n=14, radius=0, resolution=0, forced=True)
self.assertIsInstance(encoder, ScalarEncoder)
def testSettingScalarAndResolution(self):
"""Setting both scalar and resolution not allowed."""
with self.assertRaises(ValueError):
ScalarEncoder(3, None, None, name="scalar", n=0, radius=None,
resolution=0.5, forced=True)
def testSettingRadiusWithMaxvalMinvalNone(self):
"""If radius when maxval/minval = None creates instance."""
encoder = ScalarEncoder(3, None, None, name="scalar",
n=0, radius=1.5, resolution=0, forced=True)
self.assertIsInstance(encoder, ScalarEncoder)
if __name__ == "__main__":
unittest.main()
| 17,309 | Python | .py | 374 | 39.390374 | 79 | 0.627714 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,142 | examples_test.py | numenta_nupic-legacy/tests/unit/nupic/docs/examples_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for all quick-start examples in the NuPIC docs."""
import os
import sys
import unittest2 as unittest
import numpy as np
from numpy.testing import assert_approx_equal
import random
MAX_PREDICTIONS = 100
SEED = 42
random.seed(SEED)
np.random.seed(SEED)
def _getPredictionsGenerator(examplesDir, exampleName):
"""
Get predictions generator for one of the quick-start example.
.. note::
The examples are not part of the nupic package so we need to manually
append the example module path to syspath.
:param examplesDir:
(str) path to the example parent directory.
:param exampleName:
(str) name of the example. E.g: "opf", "network", "algo".
:return predictionsGenerator:
(function) predictions generator functions.
"""
sys.path.insert(0, os.path.join(examplesDir, exampleName))
modName = "complete-%s-example" % exampleName
mod = __import__(modName, fromlist=["runHotgym"])
return getattr(mod, "runHotgym")
class ExamplesTest(unittest.TestCase):
"""Unit tests for all quick-start examples."""
examples = ["opf", "network", "algo"]
oneStepPredictions = {example: [] for example in examples}
oneStepConfidences = {example: [] for example in examples}
fiveStepPredictions = {example: [] for example in examples}
fiveStepConfidences = {example: [] for example in examples}
docsTestsPath = os.path.dirname(os.path.abspath(__file__))
examplesDir = os.path.join(docsTestsPath, os.path.pardir,
os.path.pardir, os.path.pardir,
os.path.pardir, "docs", "examples")
@classmethod
def setUpClass(cls):
"""Get the predictions and prediction confidences for all examples."""
for example in cls.examples:
predictionGenerator = _getPredictionsGenerator(cls.examplesDir, example)
for prediction in predictionGenerator(MAX_PREDICTIONS):
cls.oneStepPredictions[example].append(prediction[0])
cls.oneStepConfidences[example].append(prediction[1])
cls.fiveStepPredictions[example].append(prediction[2])
cls.fiveStepConfidences[example].append(prediction[3])
def testExamplesDirExists(self):
"""Make sure the examples directory is in the correct location"""
failMsg = "Path to examples does not exist: %s" % ExamplesTest.examplesDir
self.assertTrue(os.path.exists(ExamplesTest.examplesDir), failMsg)
def testNumberOfOneStepPredictions(self):
"""Make sure all examples output the same number of oneStepPredictions."""
self.assertEquals(len(ExamplesTest.oneStepPredictions["opf"]),
len(ExamplesTest.oneStepPredictions["algo"]))
self.assertEquals(len(ExamplesTest.oneStepPredictions["opf"]),
len(ExamplesTest.oneStepPredictions["network"]))
@unittest.expectedFailure
def testOneStepPredictionsOpfVsAlgo(self):
"""Make sure one-step predictions are the same for OPF and Algo API."""
for resultPair in zip(self.oneStepPredictions["opf"],
self.oneStepPredictions["algo"]):
assert_approx_equal(*resultPair,
err_msg="one-step 'opf' and 'algo' differ")
@unittest.expectedFailure
def testOneStepPredictionsOpfVsNetwork(self):
"""Make sure one-step predictions are the same for OPF and Network API."""
for resultPair in zip(self.oneStepPredictions["opf"],
self.oneStepPredictions["network"]):
assert_approx_equal(*resultPair,
err_msg="one-step 'opf' and 'network' differ")
@unittest.expectedFailure
def testOneStepPredictionsAlgoVsNetwork(self):
"""Make sure one-step predictions are the same for Algo and Network API."""
for resultPair in zip(self.oneStepPredictions["algo"],
self.oneStepPredictions["network"]):
assert_approx_equal(*resultPair,
err_msg="one-step 'algo' and 'network' differ")
@unittest.expectedFailure
def testFiveStepPredictionsOpfVsNetwork(self):
"""Make sure five-step predictions are the same for OPF and Network API."""
for resultPair in zip(self.fiveStepPredictions["opf"],
self.fiveStepPredictions["network"]):
assert_approx_equal(*resultPair,
err_msg="five-step 'opf' and 'network' differ")
@unittest.expectedFailure
def testOneStepConfidencesOpfVsAlgo(self):
"""Make sure one-step confidences are the same for OPF and Algo API."""
for resultPair in zip(self.oneStepConfidences["opf"],
self.oneStepConfidences["algo"]):
assert_approx_equal(*resultPair,
err_msg="one-step 'opf' and 'algo' differ")
@unittest.expectedFailure
def testOneStepConfidencesOpfVsNetwork(self):
"""Make sure one-step confidences are the same for OPF and Network API."""
for resultPair in zip(self.oneStepConfidences["opf"],
self.oneStepConfidences["network"]):
assert_approx_equal(*resultPair,
err_msg="one-step 'opf' and 'network' differ")
@unittest.expectedFailure
def testOneStepConfidencesAlgoVsNetwork(self):
"""Make sure one-step confidences are the same for Algo and Network API."""
for resultPair in zip(self.oneStepConfidences["algo"],
self.oneStepConfidences["network"]):
assert_approx_equal(*resultPair,
err_msg="one-step 'algo' and 'network' differ")
@unittest.expectedFailure
def testFiveStepConfidencesOpfVsNetwork(self):
"""Make sure five-step confidences are the same for OPF and Network API."""
for resultPair in zip(self.fiveStepConfidences["opf"],
self.fiveStepConfidences["network"]):
assert_approx_equal(*resultPair,
err_msg="five-step 'opf' and 'network' differ")
if __name__ == '__main__':
unittest.main()
| 6,927 | Python | .py | 137 | 43.394161 | 79 | 0.684919 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,143 | lgamma_test.py | numenta_nupic-legacy/tests/unit/nupic/math/lgamma_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for Cells4."""
import sys
import unittest2 as unittest
from nupic.math import lgamma
class LGammaTest(unittest.TestCase):
@unittest.skipIf(sys.platform.startswith("win32"),
"Skipping failed test on Windows.")
def testLgamma(self):
items = (
(0.1, 2.25271265),
(0.2, 1.52406382),
(0.3, 1.09579799),
(0.4, 0.79667782),
(0.5, 0.57236494),
(0.6, 0.39823386),
(0.7, 0.26086725),
(0.8, 0.15205968),
(0.9, 0.06637624),
(1.0, 0.00000000),
(1.1, -0.04987244),
(1.2, -0.08537409),
(1.3, -0.10817481),
(1.4, -0.11961291),
(1.5, -0.12078224),
(1.6, -0.11259177),
(1.7, -0.09580770),
(1.8, -0.07108387),
(1.9, -0.03898428),
(2.0, 0.00000000),
(2.1, 0.04543774),
(2.2, 0.09694747),
(2.3, 0.15418945),
(2.4, 0.21685932),
(2.5, 0.28468287),
(2.6, 0.35741186),
(2.7, 0.43482055),
(2.8, 0.51670279),
(2.9, 0.60286961),
(3.0, 0.69314718),
)
for v, lg in items:
print v, lg, lgamma(v)
self.assertLessEqual(abs(lgamma(v) - lg), 1.0e-8,
"log Gamma(%f) = %f; lgamma(%f) -> %f" % (
v, lg, v, lgamma(v)))
if __name__ == "__main__":
unittest.main()
| 2,331 | Python | .py | 67 | 29.358209 | 72 | 0.566548 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,144 | topology_test.py | numenta_nupic-legacy/tests/unit/nupic/math/topology_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Topology unit tests"""
import unittest
from nupic.math.topology import (coordinatesFromIndex,
indexFromCoordinates,
neighborhood,
wrappingNeighborhood)
class TestTopology(unittest.TestCase):
def testIndexFromCoordinates(self):
self.assertEquals(0, indexFromCoordinates((0,), (100,)))
self.assertEquals(50, indexFromCoordinates((50,), (100,)))
self.assertEquals(99, indexFromCoordinates((99,), (100,)))
self.assertEquals(0, indexFromCoordinates((0, 0), (100, 80)))
self.assertEquals(10, indexFromCoordinates((0, 10), (100, 80)))
self.assertEquals(80, indexFromCoordinates((1, 0), (100, 80)))
self.assertEquals(90, indexFromCoordinates((1, 10), (100, 80)))
self.assertEquals(0, indexFromCoordinates((0, 0, 0), (100, 10, 8)))
self.assertEquals(7, indexFromCoordinates((0, 0, 7), (100, 10, 8)))
self.assertEquals(8, indexFromCoordinates((0, 1, 0), (100, 10, 8)))
self.assertEquals(80, indexFromCoordinates((1, 0, 0), (100, 10, 8)))
self.assertEquals(88, indexFromCoordinates((1, 1, 0), (100, 10, 8)))
self.assertEquals(89, indexFromCoordinates((1, 1, 1), (100, 10, 8)))
def testCoordinatesFromIndex(self):
self.assertEquals([0], coordinatesFromIndex(0, [100]));
self.assertEquals([50], coordinatesFromIndex(50, [100]));
self.assertEquals([99], coordinatesFromIndex(99, [100]));
self.assertEquals([0, 0], coordinatesFromIndex(0, [100, 80]));
self.assertEquals([0, 10], coordinatesFromIndex(10, [100, 80]));
self.assertEquals([1, 0], coordinatesFromIndex(80, [100, 80]));
self.assertEquals([1, 10], coordinatesFromIndex(90, [100, 80]));
self.assertEquals([0, 0, 0], coordinatesFromIndex(0, [100, 10, 8]));
self.assertEquals([0, 0, 7], coordinatesFromIndex(7, [100, 10, 8]));
self.assertEquals([0, 1, 0], coordinatesFromIndex(8, [100, 10, 8]));
self.assertEquals([1, 0, 0], coordinatesFromIndex(80, [100, 10, 8]));
self.assertEquals([1, 1, 0], coordinatesFromIndex(88, [100, 10, 8]));
self.assertEquals([1, 1, 1], coordinatesFromIndex(89, [100, 10, 8]));
# ===========================================================================
# NEIGHBORHOOD
# ===========================================================================
def expectNeighborhoodIndices(self, centerCoords, radius, dimensions, expected):
centerIndex = indexFromCoordinates(centerCoords, dimensions)
numIndices = 0
for index, expectedIndex in zip(neighborhood(centerIndex, radius,
dimensions),
expected):
numIndices += 1
self.assertEquals(index, expectedIndex)
self.assertEquals(numIndices, len(expected))
def expectNeighborhoodCoords(self, centerCoords, radius, dimensions, expected):
centerIndex = indexFromCoordinates(centerCoords, dimensions)
numIndices = 0
for index, expectedIndex in zip(neighborhood(centerIndex, radius,
dimensions),
expected):
numIndices += 1
self.assertEquals(index, indexFromCoordinates(expectedIndex, dimensions))
self.assertEquals(numIndices, len(expected))
def testNeighborhoodOfOrigin1D(self):
self.expectNeighborhoodIndices(
centerCoords = (0,),
dimensions = (100,),
radius = 2,
expected = (0, 1, 2))
def testNeighborhoodOfOrigin2D(self):
self.expectNeighborhoodCoords(
centerCoords = (0, 0),
dimensions = (100, 80),
radius = 2,
expected = ((0, 0), (0, 1), (0, 2),
(1, 0), (1, 1), (1, 2),
(2, 0), (2, 1), (2, 2)))
def testNeighborhoodOfOrigin3D(self):
self.expectNeighborhoodCoords(
centerCoords = (0, 0, 0),
dimensions = (100, 80, 60),
radius = 1,
expected = ((0, 0, 0), (0, 0, 1),
(0, 1, 0), (0, 1, 1),
(1, 0, 0), (1, 0, 1),
(1, 1, 0), (1, 1, 1)))
def testNeighborhoodInMiddle1D(self):
self.expectNeighborhoodIndices(
centerCoords = (50,),
dimensions = (100,),
radius = 1,
expected = (49, 50, 51))
def testNeighborhoodOfMiddle2D(self):
self.expectNeighborhoodCoords(
centerCoords = (50, 50),
dimensions = (100, 80),
radius = 1,
expected = ((49, 49), (49, 50), (49, 51),
(50, 49), (50, 50), (50, 51),
(51, 49), (51, 50), (51, 51)))
def testNeighborhoodOfEnd2D(self):
self.expectNeighborhoodCoords(
centerCoords = (99, 79),
dimensions = (100, 80),
radius = 2,
expected = ((97, 77), (97, 78), (97, 79),
(98, 77), (98, 78), (98, 79),
(99, 77), (99, 78), (99, 79)))
def testNeighborhoodWiderThanWorld(self):
self.expectNeighborhoodCoords(
centerCoords = (0, 0),
dimensions = (3, 2),
radius = 3,
expected = ((0, 0), (0, 1),
(1, 0), (1, 1),
(2, 0), (2, 1)))
def testNeighborhoodRadiusZero(self):
self.expectNeighborhoodIndices(
centerCoords = (0,),
dimensions = (100,),
radius = 0,
expected = (0,))
self.expectNeighborhoodCoords(
centerCoords = (0, 0),
dimensions = (100, 80),
radius = 0,
expected = ((0, 0),))
self.expectNeighborhoodCoords(
centerCoords = (0, 0, 0),
dimensions = (100, 80, 60),
radius = 0,
expected = ((0, 0, 0),))
def testNeighborhoodDimensionOne(self):
self.expectNeighborhoodCoords(
centerCoords = (5, 0),
dimensions = (10, 1),
radius = 1,
expected = ((4, 0), (5, 0), (6, 0)))
self.expectNeighborhoodCoords(
centerCoords = (5, 0, 0),
dimensions = (10, 1, 1),
radius = 1,
expected = ((4, 0, 0), (5, 0, 0), (6, 0, 0)))
# ===========================================================================
# WRAPPING NEIGHBORHOOD
# ===========================================================================
def expectWrappingNeighborhoodIndices(self, centerCoords, radius, dimensions,
expected):
centerIndex = indexFromCoordinates(centerCoords, dimensions)
numIndices = 0
for index, expectedIndex in zip(wrappingNeighborhood(centerIndex, radius,
dimensions),
expected):
numIndices += 1
self.assertEquals(index, expectedIndex)
self.assertEquals(numIndices, len(expected))
def expectWrappingNeighborhoodCoords(self, centerCoords, radius, dimensions,
expected):
centerIndex = indexFromCoordinates(centerCoords, dimensions)
numIndices = 0
for index, expectedIndex in zip(wrappingNeighborhood(centerIndex, radius,
dimensions),
expected):
numIndices += 1
self.assertEquals(index, indexFromCoordinates(expectedIndex, dimensions))
self.assertEquals(numIndices, len(expected))
def testWrappingNeighborhoodOfOrigin1D(self):
self.expectWrappingNeighborhoodIndices(
centerCoords = (0,),
dimensions = (100,),
radius = 1,
expected = (99, 0, 1))
def testWrappingNeighborhoodOfOrigin2D(self):
self.expectWrappingNeighborhoodCoords(
centerCoords = (0, 0),
dimensions = (100, 80),
radius = 1,
expected = ((99, 79), (99, 0), (99, 1),
(0, 79), (0, 0), (0, 1),
(1, 79), (1, 0), (1, 1)))
def testWrappingNeighborhoodOfOrigin3D(self):
self.expectWrappingNeighborhoodCoords(
centerCoords = (0, 0, 0),
dimensions = (100, 80, 60),
radius = 1,
expected = ((99, 79, 59), (99, 79, 0), (99, 79, 1),
(99, 0, 59), (99, 0, 0), (99, 0, 1),
(99, 1, 59), (99, 1, 0), (99, 1, 1),
(0, 79, 59), (0, 79, 0), (0, 79, 1),
(0, 0, 59), (0, 0, 0), (0, 0, 1),
(0, 1, 59), (0, 1, 0), (0, 1, 1),
(1, 79, 59), (1, 79, 0), (1, 79, 1),
(1, 0, 59), (1, 0, 0), (1, 0, 1),
(1, 1, 59), (1, 1, 0), (1, 1, 1),))
def testWrappingNeighborhoodInMiddle1D(self):
self.expectWrappingNeighborhoodIndices(
centerCoords = (50,),
dimensions = (100,),
radius = 1,
expected = (49, 50, 51))
def testWrappingNeighborhoodOfMiddle2D(self):
self.expectWrappingNeighborhoodCoords(
centerCoords = (50, 50),
dimensions = (100, 80),
radius = 1,
expected = ((49, 49), (49, 50), (49, 51),
(50, 49), (50, 50), (50, 51),
(51, 49), (51, 50), (51, 51)))
def testWrappingNeighborhoodOfEnd2D(self):
self.expectWrappingNeighborhoodCoords(
centerCoords = (99, 79),
dimensions = (100, 80),
radius = 1,
expected = ((98, 78), (98, 79), (98, 0),
(99, 78), (99, 79), (99, 0),
(0, 78), (0, 79), (0, 0)))
def testWrappingNeighborhoodWiderThanWorld(self):
# The order is weird because it starts walking from {-3, -3} and avoids
# walking the same point twice.
self.expectWrappingNeighborhoodCoords(
centerCoords = (0, 0),
dimensions = (3, 2),
radius = 3,
expected = ((0, 1), (0, 0),
(1, 1), (1, 0),
(2, 1), (2, 0)))
def testWrappingNeighborhoodRadiusZero(self):
self.expectWrappingNeighborhoodIndices(
centerCoords = (0,),
dimensions = (100,),
radius = 0,
expected = (0,))
self.expectWrappingNeighborhoodCoords(
centerCoords = (0, 0),
dimensions = (100, 80),
radius = 0,
expected = ((0, 0),))
self.expectWrappingNeighborhoodCoords(
centerCoords = (0, 0, 0),
dimensions = (100, 80, 60),
radius = 0,
expected = ((0, 0, 0),))
def testWrappingNeighborhoodDimensionOne(self):
self.expectWrappingNeighborhoodCoords(
centerCoords = (5, 0),
dimensions = (10, 1),
radius = 1,
expected = ((4, 0), (5, 0), (6, 0)))
self.expectWrappingNeighborhoodCoords(
centerCoords = (5, 0, 0),
dimensions = (10, 1, 1),
radius = 1,
expected = ((4, 0, 0), (5, 0, 0), (6, 0, 0)))
| 11,486 | Python | .py | 266 | 34.466165 | 82 | 0.562994 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,145 | run_opf_benchmarks_test.py | numenta_nupic-legacy/tests/regression/run_opf_benchmarks_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Run OPF benchmarks to ensure that changes don't degrade prediction accuracy.
This is done using a set of standard experiments with thresholds for the
prediction metrics. Limiting the number of permutations can cause the test to
fail if it results in lower accuracy.
"""
import sys
import os
import time
import imp
import json
import shutil
import tempfile
from optparse import OptionParser
from multiprocessing import Process, Queue
from Queue import Empty
from collections import deque
from nupic.database import client_jobs_dao as cjdao
from nupic.swarming.exp_generator import experiment_generator
from nupic.frameworks.opf.opf_utils import InferenceType
from nupic.support.configuration import Configuration
from nupic.support.unittesthelpers.testcasebase import unittest
from nupic.swarming import permutations_runner
from nupic.swarming.utils import generatePersistentJobGUID
class OPFBenchmarkRunner(unittest.TestCase):
# AWS tests attribute required for tagging via automatic test discovery via
# nosetests
engineAWSClusterTest = 1
allBenchmarks = ["hotgym",
"sine", "twovars",
"twovars2", "threevars",
"fourvars", "categories",
"sawtooth", "hotgymsc"]
BRANCHING_PROP = "NTA_CONF_PROP_nupic_hypersearch_max_field_branching"
PARTICLE_PROP = "NTA_CONF_PROP_nupic_hypersearch_minParticlesPerSwarm"
# Common experiment parameters for all benchmarks
EXP_COMMON = {"runBaselines":True,
"inferenceType":"MultiStep",
"inferenceArgs":{
"predictedField": None,
"predictionSteps":[1],
}
}
datadir = None
outdir = None
benchmarkDB = {}
resultDB = {}
splits={}
descriptions={}
testQ = []
__doV2noTerm = None
__doV2Term = None
__doClusterDef = None
__doEnsemble = False
__maxNumWorkers = 2
__recordsToProcess = -1
__timeout = 120
__maxPermutations = -1
__clusterRunning = False
__procs = []
__numRunningProcs = 0
__resultQ = Queue()
__resultList = []
runBenchmarks = None
__failures=0
__metaOptimize=False
__trainFraction=None
__expJobMap=Queue()
swarmJobIDProductionJobIDMap={}
maxBranchings = None
maxParticles = None
iterations = 1
filesOnly = False
maxConcurrentJobs = 1
isSaveResults = True
@classmethod
def setUpClass(cls):
cls.setupBenchmarks()
@classmethod
def getTests(cls, tests):
found = False
#Ignore case when matching
tests = tests.lower()
if('cluster_default' in tests):
found = True
cls.__doClusterDef = True
if('v2noterm' in tests):
found = True
cls.__doV2noTerm = True
if('v2term' in tests):
found = True
cls.__doV2Term = True
return found
def __updateProcessCounter(self):
""" Function that iterates through the running Processes
and counts the number of processes that are currently alive.
Sets numRunningProcs to this count
"""
newcounter = 0
for job in self.__procs:
if job.is_alive():
newcounter+=1
self.__numRunningProcs = newcounter
return newcounter
def cancelJobs(self):
""" Function that cancels all the jobs in the
process queue.
"""
print "Terminating all Jobs due to reaching timeout"
for proc in self.__procs:
if not proc.is_alive():
proc.terminate()
print "All jobs have been terminated"
def runJobs(self, maxJobs):
""" Function that launched Hypersearch benchmark jobs.
Runs jobs contained in self.testQ, until maxJobs are running
in parallel at which point it waits until some jobs finish.
"""
jobsrunning = self.__numRunningProcs
if(maxJobs > 1):
jobsindx = 0
while(jobsindx<len(self.testQ) or jobsrunning>0):
if(jobsindx<len(self.testQ) and jobsrunning<maxJobs):
curJob = self.testQ[jobsindx]
p = Process(target = curJob[0], args = curJob[1])
p.start()
self.__procs.append(p)
jobsindx+=1
if jobsrunning >= maxJobs:
time.sleep(30)
print ("Maximum number of jobs running, waiting before launching "
"new jobs")
elif jobsindx == len(self.testQ):
time.sleep(30)
print "Waiting for all scheduled tests to finish."
#Update the number of active running processes.
jobsrunning = self.__updateProcessCounter()
for proc in self.__procs:
# Check that no process has died. If one has died, then kill all
# running jobs and exit.
if proc.exitcode == 1:
self.cancelJobs()
assert False, ("Some jobs have not been able to complete in the "
"allotted time.")
# Check that each test satisfied the benchmark
try:
while True:
result = self.__resultQ.get(True, 5)
self.assertBenchmarks(result)
except Empty:
pass
@classmethod
def setupTrainTestSplits(cls):
cls.splits['hotgym'] = int(round(cls.__trainFraction*87843))
cls.splits['sine'] = int(round(cls.__trainFraction*3019))
cls.splits['twovars'] = int(round(cls.__trainFraction*2003))
cls.splits['twovars2'] = int(round(cls.__trainFraction*2003))
cls.splits['threevars'] = int(round(cls.__trainFraction*2003))
cls.splits['fourvars'] = int(round(cls.__trainFraction*2003))
cls.splits['categories'] = int(round(cls.__trainFraction*2003))
cls.splits['sawtooth'] = int(round(cls.__trainFraction*1531))
# Only the first gym
cls.splits['hotgymsc'] = int(round(cls.__trainFraction*17500))
@classmethod
def setupBenchmarks(cls):
# BenchmarkDB stores error/margin pairs
# Margin is in fraction difference. Thus .1 would mean a max of a
# 10% difference
cls.benchmarkDB['hotgym' + ',' + 'v2NoTerm'] = (15.69, .1)
cls.benchmarkDB['hotgym' + ',' + 'v2Term'] = (15.69, .1)
cls.benchmarkDB['hotgym' + ',' + 'cluster_default'] = (15.69, .1)
cls.benchmarkDB['sine' + ',' + 'v2NoTerm'] = (0.054, .1)
cls.benchmarkDB['sine' + ',' + 'v2Term'] = (0.054, .1)
cls.benchmarkDB['sine' + ',' + 'cluster_default'] = (0.054, .1)
# TODO: Convert these to altMAPE scores...
cls.benchmarkDB['twovars' + ',' + 'v2NoTerm'] = (2.5, .1)
cls.benchmarkDB['twovars' + ',' + 'v2Term'] = (2.5, .1)
cls.benchmarkDB['twovars' + ',' + 'cluster_default'] = (2.5, .1)
# TODO: Convert these to altMAPE scores...
cls.benchmarkDB['twovars2' + ',' + 'v2NoTerm'] = (2.5, .1)
cls.benchmarkDB['twovars2' + ',' + 'v2Term'] = (2.5, .1)
cls.benchmarkDB['twovars2' + ',' + 'cluster_default'] = (2.5, .1)
# TODO: Convert these to altMAPE scores...
cls.benchmarkDB['threevars' + ',' + 'v2NoTerm'] = (2.5, .1)
cls.benchmarkDB['threevars' + ',' + 'v2Term'] = (2.5, .1)
cls.benchmarkDB['threevars' + ',' + 'cluster_default'] = (2.5, .1)
# TODO: Convert these to altMAPE scores...
cls.benchmarkDB['fourvars' + ',' + 'v2NoTerm'] = (2.5, .1)
cls.benchmarkDB['fourvars' + ',' + 'v2Term'] = (2.5, .1)
cls.benchmarkDB['fourvars' + ',' + 'cluster_default'] = (2.5, .1)
# TODO: Convert these to altMAPE scores...
cls.benchmarkDB['categories' + ',' + 'v2NoTerm'] = (1, .1)
cls.benchmarkDB['categories' + ',' + 'v2Term'] = (1, .1)
cls.benchmarkDB['categories' + ',' + 'cluster_default'] = (1, .1)
# TODO: Convert these to altMAPE scores...
cls.benchmarkDB['sawtooth' + ',' + 'v2NoTerm'] = (100, .1)
cls.benchmarkDB['sawtooth' + ',' + 'v2Term'] = (100, .1)
cls.benchmarkDB['sawtooth' + ',' + 'cluster_default'] = (100, .1)
# HotGym using spatial classification
cls.benchmarkDB['hotgymsc' + ',' + 'v2NoTerm'] = (21.1, .1)
cls.benchmarkDB['hotgymsc' + ',' + 'v2Term'] = (21.1, .1)
cls.benchmarkDB['hotgymsc' + ',' + 'cluster_default'] = (21.1, .1)
def generatePrependPath(self, prependDict):
prep = ""
if 'iteration' in prependDict:
prep = os.path.join(prep, str(prependDict["iteration"]))
if (self.BRANCHING_PROP in prependDict and
len(self.maxBranchings.split(",")) > 1):
prep = os.path.join(prep, "maxBranch_%s" %
prependDict[self.BRANCHING_PROP])
if (self.PARTICLE_PROP in prependDict and
len(self.maxParticles.split(",")) > 1):
prep = os.path.join(prep, "maxParticles_%s" %
prependDict[self.PARTICLE_PROP])
return prep
@classmethod
def setMaxNumWorkers(cls, n):
# Safety check to make sure not too many workers run on a local machine
if(n is None):
if(not cls.onCluster()):
cls.__maxNumWorkers = 2
else:
cls.__maxNumWorkers = 20
else:
cls.__maxNumWorkers = n
@classmethod
def setNumRecords(cls, n):
cls.__recordsToProcess = n
def waitForProductionWorkers(self):
jobsDB = cjdao.ClientJobsDAO.get()
done=False
while(not done):
done=True
for jobID in self.swarmJobIDProductionJobIDMap.keys():
if (jobsDB.jobGetFields(self.swarmJobIDProductionJobIDMap[jobID],
["status",])[0] != 'completed'):
done=False
time.sleep(10)
#When the production workers are done, get and store their results
for jobRes in self.__resultList:
swarmjobID = jobRes['jobID']
prodjobID = self.swarmJobIDProductionJobIDMap[swarmjobID]
prodResults = json.loads(jobsDB.jobGetFields(prodjobID, ['results'])[0])
jobRes['prodMetric'] = str(prodResults['bestValue'])
def submitProductionJob(self, modelID, dataSet):
jobsDB = cjdao.ClientJobsDAO.get()
outputDir=self.descriptions[dataSet][0]
streamDef = self.descriptions[dataSet][1]["streamDef"]
#streamDef["streams"][0]["first_record"]=self.splits[dataSet]
streamDef["streams"][0]["last_record"]=sys.maxint
cmdLine = "$PRODUCTIONMODEL"
productionJobParams = dict(
inputStreamDef = streamDef,
#outputDir=outputDir,
modelSpec = dict(
checkpointID=jobsDB.modelsGetFields(
modelID, ("modelCheckpointId",))[0]),
)
productionJobParams['persistentJobGUID'] = generatePersistentJobGUID()
if (productionJobParams["modelSpec"]["checkpointID"] is None):
return -1
return jobsDB.jobInsert(
client="TstPW-PM",
cmdLine=cmdLine,
params=json.dumps(productionJobParams),
minimumWorkers=1,
maximumWorkers=1,
jobType = jobsDB.JOB_TYPE_PM)
def runProductionWorkers(self):
jobsDB = cjdao.ClientJobsDAO.get()
print "Starting Production Worker Jobs"
print "__expJobMap " + str(self.__expJobMap) + str(id(self.__expJobMap))
while not self.__expJobMap.empty():
(dataSet, jobID) = self.__expJobMap.get()
modelCounterPairs = jobsDB.modelsGetUpdateCounters(jobID)
modelIDs = tuple(x[0] for x in modelCounterPairs)
for modelID in modelIDs:
prodID=self.submitProductionJob(modelID, dataSet)
if(prodID!=-1):
self.swarmJobIDProductionJobIDMap[jobID] = prodID
@classmethod
def setTrainFraction(cls, x):
if x == None:
cls.__trainFraction==1.0
elif x > 1.0 or x < 0.0:
raise Exception("Invalid training fraction")
else:
cls.__trainFraction=x
@classmethod
def setDoEnsemble(cls):
cls.__doEnsemble = True
@classmethod
def setTimeout(cls, n):
cls.__timeout = n
@classmethod
def setMaxPermutations(cls, n):
cls.__maxPermutations = n
def setEnsemble(self, ensemble):
if(ensemble):
os.environ['NTA_CONF_PROP_nupic_hypersearch_ensemble'] = "True"
@classmethod
def setMetaOptimize(cls, paramString):
print paramString
if paramString is None:
cls.__metaOptimize = False
else:
cls.__metaOptimize = True
paramsDict=json.loads(paramString)
if(paramsDict.has_key("inertia")):
os.environ['NTA_CONF_PROP_nupic_hypersearch_inertia'] = \
str(paramsDict['inertia'])
if(paramsDict.has_key('socRate')):
os.environ['NTA_CONF_PROP_nupic_hypersearch_socRate'] = \
str(paramsDict['socRate'])
if(paramsDict.has_key('cogRate')):
os.environ['NTA_CONF_PROP_nupic_hypersearch_cogRate'] = \
str(paramsDict['cogRate'])
if(paramsDict.has_key('minParticlesPerSwarm')):
os.environ['NTA_CONF_PROP_nupic_hypersearch_minParticlesPerSwarm'] = \
str(paramsDict['minParticlesPerSwarm'])
def setUpExportDicts(self):
"""
Setup up a dict of branchings and particles
"""
ret = []
if self.maxBranchings is None:
self.maxBranchings = [None]
else:
self.maxBranchings = self.maxBranchings.split(',')
if self.maxParticles is None:
self.maxParticles = [None]
else:
self.maxParticles = self.maxParticles.split(",")
for branch in self.maxBranchings:
for part in self.maxParticles:
curdict = dict()
if not branch is None:
curdict[self.BRANCHING_PROP] = branch
if not part is None:
curdict[self.PARTICLE_PROP] = part
ret+=[curdict]
return ret
def addExportsToResults(self, results, exports):
if self.BRANCHING_PROP in exports:
results['maxBranching'] = exports[self.BRANCHING_PROP]
if self.PARTICLE_PROP in exports:
results['maxParticles'] = exports[self.PARTICLE_PROP]
def syncFiles(self):
if(self.onCluster()):
os.system("syncDataFiles %s" % self.outdir)
return
def removeTmpDirs(self):
print "Removing temporary directory <%s>" % self.outdir
if(self.onCluster()):
os.system("onall rm -r %s" % self.outdir)
else :
os.system("rm -r %s" % self.outdir)
@classmethod
def onCluster(cls):
return (Configuration.get('nupic.cluster.database.host') != 'localhost')
def createResultList(self):
try:
while 1:
self.__resultList.append(self.__resultQ.get(True, 5))
except Empty:
pass
def printResults(self):
jobsDB = cjdao.ClientJobsDAO.get()
productionError=-1
for key in sorted(self.resultDB.keys()):
restup = self.resultDB[key]
if(self.__trainFraction<1.0):
productionError=json.loads(jobsDB.jobGetFields(
self.swarmJobIDProductionJobIDMap[restup["jobID"]],
["results",])[0])['bestValue']
print ("Test: %10s Expected: %10.4f Swarm Error: %10.4f "
"ProductionError: %10.4f TotalModelWallTime: %8d "
"RecordsProcessed: %10d Status: %10s") % \
(key, self.benchmarkDB[key][0], restup['metric'],
productionError, restup['totalModelWallTime'],
restup["totalNumRecordsProcessed"], restup['status'])
if self.__metaOptimize:
lineResults=str(key)+", "+str(self.benchmarkDB[key][0])+", "+ \
str(restup['metric'])+", "+str(restup['totalModelWallTime'])+", "+ \
str(restup["totalNumRecordsProcessed"])+", "+str(restup['status'])
lineMeta=Configuration.get("nupic.hypersearch.minParticlesPerSwarm")+\
", "+Configuration.get("nupic.hypersearch.inertia")+", "+\
Configuration.get("nupic.hypersearch.cogRate")+", "+\
Configuration.get("nupic.hypersearch.socRate")+", "+\
str(productionError)+", "+str(self.__trainFraction)+"\n"
print lineMeta
with open("allResults.csv", "a") as results:
results.write(lineResults+", "+lineMeta)
def saveResults(self):
outpath = os.path.join(self.outdir, "BenchmarkResults.csv")
csv = open(outpath, 'w')
optionalKeys = ['maxBranching', 'maxParticles']
print >> csv , (
"JobID, Output Directory, Benchmark, Search, Swarm Error Metric,"
" Prod. Error Metric, encoders, TotalModelElapsedTime(s), "
"TotalCpuTime(s), JobWallTime, RecordsProcessed, Completion Status"),
addstr = ""
for key in optionalKeys:
addstr+= ",%s" % key
print >> csv, addstr
for result in self.__resultList:
print >> csv, "%d,%s,%s,%s,%f,%s,%s,%d,%f,%s,%d,%s" % (result['jobID'],
result['outDir'], result['expName'], result['searchType'], \
result["metric"], result["prodMetric"], \
result['encoders'], \
result["totalModelWallTime"], \
result['totalModelCpuTime'], str(result['jobTime']), \
result["totalNumRecordsProcessed"], result['status']),
addstr = ""
for key in optionalKeys:
if key in result:
addstr+= ",%s" % str(result[key])
else:
addstr+= ",None"
print >> csv, addstr
csv.close()
def readModelWallTime(self, modelInfo):
startTime = modelInfo.startTime
if(modelInfo.status == cjdao.ClientJobsDAO.STATUS_COMPLETED):
endTime = modelInfo.endTime
return (endTime - startTime).seconds
return 0
def readNumRecordsProcessed(self, modelInfo):
return modelInfo.numRecords
def readModelCpuTime(self, modelInfo):
return modelInfo.cpuTime
def getResultsFromJobDB(self, jobID, expname, searchtype, basedir):
ret = {}
jobsDB = cjdao.ClientJobsDAO.get()
jobInfo = jobsDB.jobInfo(jobID)
res = jobInfo.results
results = json.loads(res)
bestModel = results["bestModel"]
modelIds = jobsDB.jobGetModelIDs(jobID)
modelInfos = jobsDB.modelsInfo(modelIds)
totalModelWallTime = 0
totalNumRecordsProcessed = 0
totalModelCpuTime = 0.0
for modelInfo in modelInfos:
if modelInfo.modelId == bestModel:
metrics = json.loads(modelInfo.results)[0]
bestmetric = json.loads(modelInfo.results)[1].keys()[0]
for key in metrics.keys():
if "nupicScore" in key and "moving" in key:
ret["nupicScore"] = ret[key] = metrics[key]
ret[key] = metrics[key]
ret["encoders"] = (
json.loads(modelInfo.params)["particleState"]["swarmId"])
totalModelWallTime += self.readModelWallTime(modelInfo)
totalNumRecordsProcessed += self.readNumRecordsProcessed(modelInfo)
totalModelCpuTime += self.readModelCpuTime(modelInfo)
ret['outDir'] = basedir
ret['jobID'] = jobID
ret['status'] = jobInfo.workerCompletionReason
ret['metric'] = results['bestValue']
#ret['jobTime'] = jobTime
ret['totalModelCpuTime'] = totalModelCpuTime
ret['totalModelWallTime'] = totalModelWallTime
ret['totalNumRecordsProcessed'] = totalNumRecordsProcessed
ret['expName'] = expname
ret['searchType'] = searchtype
ret['prodMetric'] = ""
return ret
def benchmarkHotGym(self):
"""Try running a basic experiment and permutations."""
# Form the stream definition
dataPath = os.path.join(self.datadir, "hotgym", "hotgym.csv")
streamDef = dict(
version=1,
info="hotgym benchmark test",
streams=[
dict(source="file://%s" % (dataPath),
info="hotgym.csv",
# NOTE: Limiting number of records to work around a bug in the
# Streams Mgr present as of Dec 7, 2011 that shows up if you have
# more than 50K records.
# last_record = 49000,
columns=["gym", "timestamp", "consumption"],
last_record=self.splits['hotgym'],)
],
aggregation={
'hours' : 1,
'fields' : [
('consumption', 'sum'),
('gym', 'first'),
]
},
)
# Generate the experiment description
expDesc = OPFBenchmarkRunner.EXP_COMMON.copy()
expDesc["inferenceArgs"]["predictedField"] = "consumption"
expDesc.update({
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "timestamp",
"fieldType": "datetime"
},
{ "fieldName": "consumption",
"fieldType": "float",
"minValue": 1.1,
"maxValue": 44.72,
},
{ "fieldName": "gym",
"fieldType": "string",
},
],
"iterationCount": self.__recordsToProcess,
})
# set the experiment name to put the experiment files in different folders
expdir = os.path.join(self.outdir, "hotgym")
self.generateModules(expDesc, expdir)
self.descriptions["hotgym"]=(expdir, expDesc)
return expdir
def benchmarkSine(self):
""" Try running a basic experiment and permutations
"""
# Form the stream definition
dataPath = os.path.join(self.datadir, "sine", "sine.csv")
streamDef = dict(
version=1,
info="hotgym benchmark test",
streams=[
dict(source="file://%s" % (dataPath),
info="sine.csv",
columns=["Sine","angle"],
last_record=self.splits['sine']),
],
)
# Generate the experiment description
expDesc = OPFBenchmarkRunner.EXP_COMMON.copy()
expDesc["inferenceArgs"]["predictedField"] = "Sine"
expDesc.update({
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "Sine",
"fieldType": "float",
"minValue": -1.0,
"maxValue": 1.0,
},
{ "fieldName": "angle",
"fieldType": "float",
"minValue": 0.0,
"maxValue": 25.0,
},
],
"iterationCount": self.__recordsToProcess,
})
# set the experiment name to put the experiment files in different folders
expdir = os.path.join(self.outdir, "sine")
self.generateModules(expDesc, expdir)
self.descriptions["sine"]=(expdir, expDesc)
return expdir
def benchmarkTwoVars(self):
""" Try running a basic experiment and permutations
"""
# Form the stream definition
dataPath = os.path.join(self.datadir, "generated", "spatial",
"linear_two_fields", "sample2.csv")
streamDef = dict(
version=1,
info="two fields test",
streams=[
dict(source="file://%s" % (dataPath),
info="linear_two_fields",
columns=["field1","field2"],
last_record=self.splits['twovars'],),
],
)
# Generate the experiment description
expDesc = OPFBenchmarkRunner.EXP_COMMON.copy()
expDesc["inferenceArgs"]["predictedField"] = "field1"
expDesc.update({
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "field1",
"fieldType": "int",
"minValue": -10,
"maxValue": 110,
},
{ "fieldName": "field2",
"fieldType": "int",
"minValue": -10,
"maxValue": 110,
},
],
"iterationCount": self.__recordsToProcess,
})
# set the experiment name to put the experiment files in different folders
expdir = os.path.join(self.outdir, "twovars")
self.generateModules(expDesc, expdir)
self.descriptions["twovars"]=(expdir, expDesc)
return expdir
def benchmarkThreeVars(self):
""" Try running a basic experiment and permutations
"""
# Form the stream definition
dataPath = os.path.join(self.datadir, "generated", "spatial",
"linear_two_plus_one_fields", "sample1.csv")
streamDef = dict(
version=1,
info="three fields test",
streams=[
dict(source="file://%s" % (dataPath),
info="linear_two_plus_one_fields",
columns=["field1","field2","field3"],
last_record=self.splits['threevars']),
],
)
# Generate the experiment description
expDesc = OPFBenchmarkRunner.EXP_COMMON.copy()
expDesc["inferenceArgs"]["predictedField"] = "field1"
expDesc.update({
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "field1",
"fieldType": "int",
"minValue": -10,
"maxValue": 110,
},
{ "fieldName": "field2",
"fieldType": "int",
"minValue": -10,
"maxValue": 110,
},
{ "fieldName": "field3",
"fieldType": "int",
"minValue": -10,
"maxValue": 110,
}
],
"iterationCount": self.__recordsToProcess,
})
# set the experiment name to put the experiment files in different folders
expdir = os.path.join(self.outdir, "threevars")
self.generateModules(expDesc, expdir)
self.descriptions["threevars"]=(expdir, expDesc)
return expdir
def benchmarkFourVars(self):
""" Try running a basic experiment and permutations
"""
# Form the stream definition
dataPath = os.path.join(self.datadir, "generated", "spatial",
"sum_two_fields_plus_extra_field", "sample1.csv")
streamDef = dict(
version=1,
info="four fields test",
streams=[
dict(source="file://%s" % (dataPath),
info="linear_two_plus_one_fields",
columns=["field1","field2","field3","field4"],
last_record=self.splits['fourvars']),
],
)
# Generate the experiment description
expDesc = OPFBenchmarkRunner.EXP_COMMON.copy()
expDesc["inferenceArgs"]["predictedField"] = "field1"
expDesc.update({
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "field1",
"fieldType": "int",
"minValue": -10,
"maxValue": 210,
},
{ "fieldName": "field2",
"fieldType": "int",
"minValue": -10,
"maxValue": 110,
},
{ "fieldName": "field3",
"fieldType": "int",
"minValue": -10,
"maxValue": 110,
},
{ "fieldName": "field4",
"fieldType": "int",
"minValue": -10,
"maxValue": 110,
}
],
"iterationCount": self.__recordsToProcess,
})
# set the experiment name to put the experiment files in different folders
expdir = os.path.join(self.outdir, "fourvars")
self.generateModules(expDesc, expdir)
self.descriptions["fourvars"]=(expdir, expDesc)
return expdir
def benchmarkCategories(self):
""" Try running a basic experiment and permutations
"""
# Form the stream definition
dataPath = os.path.join(self.datadir, "generated", "temporal",
"categories", "sample1.csv")
streamDef = dict(
version=1,
info="categories test",
streams=[
dict(source="file://%s" % (dataPath),
info="categories",
columns=["field1","field2"],
last_record=self.splits['categories']),
],
)
# Generate the experiment description
expDesc = OPFBenchmarkRunner.EXP_COMMON.copy()
expDesc["inferenceArgs"]["predictedField"] = "field2"
expDesc.update({
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "field1",
"fieldType": "string",
},
{ "fieldName": "field2",
"fieldType": "string",
}
],
"iterationCount": self.__recordsToProcess,
})
# set the experiment name to put the experiment files in different folders
expdir = os.path.join(self.outdir, "categories")
self.generateModules(expDesc, expdir)
self.descriptions["categories"]=(expdir, expDesc)
return expdir
def benchmarkTwoVarsSquare(self):
""" Try running a basic experiment and permutations
"""
# Form the stream definition
dataPath = os.path.join(self.datadir, "generated", "spatial",
"linear_two_fields", "sample3.csv")
streamDef = dict(
version=1,
info="three fields test",
streams=[
dict(source="file://%s" % (dataPath),
info="linear_two_fields",
columns=["field1","field2"],
last_record=self.splits['twovars2']),
],
)
# Generate the experiment description
expDesc = OPFBenchmarkRunner.EXP_COMMON.copy()
expDesc["inferenceArgs"]["predictedField"] = "field1"
expDesc.update({
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "field1",
"fieldType": "int",
"minValue": -10,
"maxValue": 110,
},
{ "fieldName": "field2",
"fieldType": "int",
"minValue": -10,
"maxValue": 10010,
}
],
"iterationCount": self.__recordsToProcess,
})
# set the experiment name to put the experiment files in different folders
expdir = os.path.join(self.outdir, "twovars2")
self.generateModules(expDesc, expdir)
self.descriptions["twovars2"]=(expdir, expDesc)
return expdir
def benchmarkSawtooth(self):
""" Try running a basic experiment and permutations
"""
# Form the stream definition
dataPath = os.path.join(self.datadir, "sawtooth", "sawtooth.csv")
streamDef = dict(
version=1,
info="sawtooth test",
streams=[
dict(source="file://%s" % (dataPath),
info="sawtooth",
columns=["value"],
last_record=self.splits['sawtooth'],),
],
)
# Generate the experiment description
expDesc = OPFBenchmarkRunner.EXP_COMMON.copy()
expDesc["inferenceArgs"]["predictedField"] = "value"
expDesc.update({
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "value",
"fieldType": "int",
"runDelta":True,
},
],
"iterationCount": self.__recordsToProcess,
})
# set the experiment name to put the experiment files in different folders
expdir = os.path.join(self.outdir, "sawtooth")
self.generateModules(expDesc, expdir)
self.descriptions["sawtooth"]=(expdir, expDesc)
return expdir
def benchmarkHotGymSC(self):
""" The HotGym dataset, only the first gym, solved using spatial
classification. This model learns the association between the date/time
stamp and the consumption - the model does not get consumption fed in at
the bottom.
"""
# Form the stream definition
dataPath = os.path.join(self.datadir, "hotgym", "hotgym.csv")
streamDef = dict(
version=1,
info="hotgym spatial classification benchmark test",
streams=[
dict(source="file://%s" % (dataPath),
info="hotgym.csv",
# NOTE: Limiting number of records to work around a bug in the
# Streams Mgr present as of Dec 7, 2011 that shows up if you have
# more than 50K records.
# last_record = 49000,
columns=["gym", "timestamp", "consumption"],
last_record=self.splits['hotgymsc'],)
],
aggregation={
'hours' : 1,
'fields' : [
('consumption', 'sum'),
('gym', 'first'),
]
},
)
# Generate the experiment description
expDesc = OPFBenchmarkRunner.EXP_COMMON.copy()
expDesc["inferenceArgs"]["predictedField"] = "consumption"
expDesc["inferenceArgs"]["predictionSteps"] = [0]
expDesc.update({
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "timestamp",
"fieldType": "datetime"
},
{ "fieldName": "consumption",
"fieldType": "float",
"minValue": 0,
"maxValue": 100,
},
{ "fieldName": "gym",
"fieldType": "string",
},
],
"iterationCount": self.__recordsToProcess,
})
# set the experiment name to put the experiment files in different folders
expdir = os.path.join(self.outdir, "hotgymsc")
self.generateModules(expDesc, expdir)
self.descriptions["hotgymsc"]=(expdir, expDesc)
return expdir
def generateModules(self, expDesc, outdir):
""" This calls ExpGenerator to generate a base description file and
permutations file from expDesc.
Parameters:
-------------------------------------------------------------------
expDesc: Experiment description dict
outDir: Which output directory to use
"""
# Print out example JSON for documentation purposes
# TODO: jobParams is unused
jobParams = dict(
desription=expDesc
)
# Call ExpGenerator to generate the base description and permutations
# files.
shutil.rmtree(outdir, ignore_errors=True)
# TODO: outdirv2term is not used
outdirv2term = os.path.join(outdir, "v2Term", "base")
outdirv2noterm = os.path.join(outdir, "v2NoTerm", "base")
outdirdef = os.path.join(outdir, "cluster_default", "base")
if self.__doV2Term:
# TODO BUG: args passed to expGenerator is not defined yet
experiment_generator.expGenerator(args)
args = [
"--description=%s" % (json.dumps(expDesc)),
"--version=v2",
"--outDir=%s" % (outdirv2noterm)
]
if self.__doV2noTerm:
experiment_generator.expGenerator(args)
args = [
"--description=%s" % (json.dumps(expDesc)),
"--version=v2",
"--outDir=%s" % (outdirdef)
]
if self.__doClusterDef:
experiment_generator.expGenerator(args)
def runV2noTerm(self, basedir, expname, searchtype, exportdict):
v2path = os.path.join(basedir, "v2NoTerm", "base", "permutations.py")
maxWorkers = "--maxWorkers=%d" % self.__maxNumWorkers
searchMethod = "--searchMethod=v2"
useTerms = "--useTerminators"
exports = "--exports=%s" % json.dumps(exportdict)
runString = [v2path, maxWorkers, searchMethod, exports]
if self.__maxPermutations > 0:
maxPermutations = "--maxPermutations=%d" % self.__maxPermutations
runString.append(maxPermutations)
if self.__timeout != None:
timeout = "--timeout=%d" % self.__timeout
runString.append(timeout)
if self.__doEnsemble:
ensemble = "--ensemble"
runString.append(ensemble)
# Disabling maxPermutations
# if(self.__maxPermutations > 0):
# maxPermutations = "--maxPermutations=%d" % self.__maxPermutations
# pr = permutations_runner.runPermutations([v2path, maxWorkers,
# searchMethod, maxPermutations, exports, timeout])
# else:
# pr = permutations_runner.runPermutations([v2path, maxWorkers,
# searchMethod, exports, timeout])
pr = permutations_runner.runPermutations(runString)
#Store results
resultdict = self.getResultsFromJobDB(pr, expname, searchtype, basedir)
#Save the exported custom environment variables
self.addExportsToResults(resultdict, exportdict)
#Store the results in the asynchronous queue
self.__resultQ.put(resultdict)
self.__expJobMap.put((expname, pr))
return resultdict
def runDefault(self, basedir, expname, searchtype, exportdict):
path = os.path.join(basedir, "cluster_default", "base", "permutations.py")
maxWorkers = "--maxWorkers=%d" % self.__maxNumWorkers
searchMethod = "--searchMethod=v2"
clusterDefault = "--clusterDefault"
exports = "--exports=%s" % json.dumps(exportdict)
runString = [path, maxWorkers, clusterDefault, exports]
if self.__maxPermutations > 0:
maxPermutations = "--maxPermutations=%d" % self.__maxPermutations
runString.append(maxPermutations)
if self.__timeout != None:
timeout = "--timeout=%d" % self.__timeout
runString.append(timeout)
if self.__doEnsemble:
ensemble = "--ensemble"
runString.append(ensemble)
# Disabling maxPermutations
# if(self.__maxPermutations > 0):
# maxPermutations = "--maxPermutations=%d" % self.__maxPermutations
# pr = permutations_runner.runPermutations([path, maxWorkers,
# clusterDefault, maxPermutations, exports, timeout])
# else:
# pr = permutations_runner.runPermutations([path, maxWorkers,
# clusterDefault, exports, timeout])
pr = permutations_runner.runPermutations(runString)
resultdict = self.getResultsFromJobDB(pr, expname, searchtype, basedir)
#Save the exported custom environment variables
self.addExportsToResults(resultdict, exportdict)
#Store the results in the asynchronous queue
self.__resultQ.put(resultdict)
self.__expJobMap.put((expname, pr))
return resultdict
def runV2Term(self, basedir, expname, searchtype, exportdict):
v2path = os.path.join(basedir, "v2Term", "base", "permutations.py")
maxWorkers = "--maxWorkers=%d" % self.__maxNumWorkers
searchMethod = "--searchMethod=v2"
useTerms = "--useTerminators"
exports = "--exports=%s" % json.dumps(exportdict)
runString = [v2path, maxWorkers, searchMethod, useTerms, exports]
if self.__maxPermutations > 0:
maxPermutations = "--maxPermutations=%d" % self.__maxPermutations
runString.append(maxPermutations)
if self.__timeout != None:
timeout = "--timeout=%d" % self.__timeout
runString.append(timeout)
if self.__doEnsemble:
ensemble = "--ensemble"
runString.append(ensemble)
# Disabling maxPermutations
# if(self.__maxPermutations > 0):
# maxPermutations = "--maxPermutations=%d" % self.__maxPermutations
# pr = permutations_runner.runPermutations([v2path, maxWorkers,
# searchMethod, maxPermutations, useTerms, exports])
# else:
# pr = permutations_runner.runPermutations([v2path, maxWorkers,
# searchMethod, useTerms, exports])
pr = permutations_runner.runPermutations(runString)
resultdict = self.getResultsFromJobDB(pr, expname, searchtype, basedir)
#Save the exported custom environment variables
self.addExportsToResults(resultdict, exportdict)
#Store the results in the asynchronous queue
self.__resultQ.put(resultdict)
self.__expJobMap.put((expname, pr))
return resultdict
def runBenchmarksSerial(self, basedir, expname, exportdict):
# Run the Benchmarks inProc and serially
if(self.__doV2Term):
self.runV2Term(basedir, expname, "v2Term", exportdict)
if(self.__doV2noTerm):
self.runV2noTerm(basedir, expname, "v2NoTerm", exportdict)
if(self.__doClusterDef):
self.runDefault(basedir, expname, "cluster_default", exportdict)
return True
def runBenchmarksParallel(self, basedir, expname, exportdict):
# Place the tests in a job queue
if(self.__doV2Term):
v2termres = self.testQ.append((self.runV2Term, [basedir, expname,
"v2Term", exportdict]))
if(self.__doV2noTerm):
v2notermres = self.testQ.append((self.runV2noTerm,
[basedir, expname, "v2NoTerm",
exportdict]))
if(self.__doClusterDef):
v2cldef = self.testQ.append((self.runDefault,
[basedir, expname, "cluster_default",
exportdict]))
return True
def compareBenchmarks(self, expname, searchMethod, result):
benchmark = self.benchmarkDB[str([expname, searchMethod])]
self.resultDB[str([expname, searchMethod])] = results
# Make sure results are within 2.2x of the desired result.
# This is only temporary before
# we establish the actual desired ranges
# TODO resulttuple is NOT defined
return (resulttuple.metric / benchmark) < 2.20
def assertResults(self):
self.assertEqual(self.__failures, 0,
"Some benchmarks failed to meet error criteria.")
def assertBenchmarks(self, resultdict):
expname = resultdict['expName']
searchMethod = resultdict['searchType']
benchmark = self.benchmarkDB[expname + "," + searchMethod]
self.resultDB[expname + ',' + searchMethod] = resultdict
self.__resultList.append(resultdict)
if (resultdict['metric'] / benchmark[0]) > (1+benchmark[1]):
print "HyperSearch %s on %s benchmark did not match " \
"the expected value. (Expected: %f Observed: %f)" % \
(searchMethod, expname, benchmark[0], resultdict['metric'])
self.__failures+=1
return
def getJobParamsFromJobDB(self, jobID):
jobInfo = cjdao.ClientJobsDAO.get().jobInfo(jobID)
pars = jobInfo.params
params = json.loads(pars)
return params
def checkPythonScript(self, scriptAbsPath):
assert os.path.isabs(scriptAbsPath)
assert os.path.isfile(scriptAbsPath) , (
"Expected python script to be present here: <%s>" % scriptAbsPath)
# Test viability of the file as a python script by loading it
# An exception will be raised if this fails
mod = imp.load_source('test', scriptAbsPath)
return mod
def testOPFBenchmarks(self):
"""Run the entire set of OPF benchmark experiments
"""
# Check for benchmark misspellings
for bm in self.listOfBenchmarks:
if not bm in self.allBenchmarks:
raise Exception("Unknown benchmark %s" % bm)
# Set up FIFO queue for handling the different directories that are created
# for the tests
fifodirs = deque()
baseoutdir = self.outdir
iterations = self.iterations
exportDicts = self.setUpExportDicts()
for iter in range(iterations):
for exports in exportDicts:
if len(exportDicts)>1:
prependDict = exports
else:
prependDict = dict()
if self.iterations > 1:
prependDict["iteration"] = iter
prepend = self.generatePrependPath(prependDict)
self.outdir = os.path.join(baseoutdir, prepend)
if("sine" in self.listOfBenchmarks):
tmpsine = self.benchmarkSine()
fifodirs.append(tmpsine)
if("hotgym" in self.listOfBenchmarks):
tmphotgym = self.benchmarkHotGym()
fifodirs.append(tmphotgym)
if("twovars" in self.listOfBenchmarks):
tmptwovars = self.benchmarkTwoVars()
fifodirs.append(tmptwovars)
if("twovars2" in self.listOfBenchmarks):
tmptwovars2 = self.benchmarkTwoVarsSquare()
fifodirs.append(tmptwovars2)
if("threevars" in self.listOfBenchmarks):
tmpthreevars = self.benchmarkThreeVars()
fifodirs.append(tmpthreevars)
if("fourvars" in self.listOfBenchmarks):
tmpfourvars = self.benchmarkFourVars()
fifodirs.append(tmpfourvars)
if("categories" in self.listOfBenchmarks):
tmpcategories = self.benchmarkCategories()
fifodirs.append(tmpcategories)
if("sawtooth" in self.listOfBenchmarks):
tmpcategories = self.benchmarkSawtooth()
fifodirs.append(tmpcategories)
if("hotgymsc" in self.listOfBenchmarks):
tmphotgymsc = self.benchmarkHotGymSC()
fifodirs.append(tmphotgymsc)
self.outdir = baseoutdir
self.syncFiles()
if self.filesOnly:
return
if(self.maxConcurrentJobs==1):
self.runBenchmarks = self.runBenchmarksSerial
else:
self.runBenchmarks = self.runBenchmarksParallel
for iter in range(iterations):
for exports in exportDicts:
if("sine" in self.listOfBenchmarks):
assert(self.runBenchmarks(fifodirs.popleft(), "sine", exports))
if("hotgym" in self.listOfBenchmarks):
assert(self.runBenchmarks(fifodirs.popleft(), "hotgym", exports))
if("twovars" in self.listOfBenchmarks):
assert(self.runBenchmarks(fifodirs.popleft(), "twovars", exports))
if("twovars2" in self.listOfBenchmarks):
assert(self.runBenchmarks(fifodirs.popleft(), "twovars2", exports))
if("threevars" in self.listOfBenchmarks):
assert(self.runBenchmarks(fifodirs.popleft(), "threevars", exports))
if("fourvars" in self.listOfBenchmarks):
assert(self.runBenchmarks(fifodirs.popleft(), "fourvars", exports))
if("categories" in self.listOfBenchmarks):
assert(self.runBenchmarks(fifodirs.popleft(), "categories", exports))
if("sawtooth" in self.listOfBenchmarks):
assert(self.runBenchmarks(fifodirs.popleft(), "sawtooth", exports))
if("hotgymsc" in self.listOfBenchmarks):
assert(self.runBenchmarks(fifodirs.popleft(), "hotgymsc", exports))
# Poll processes until they all finish.
self.runJobs(self.maxConcurrentJobs)
# Disabled removing the temporary directory
if self.__trainFraction < 1.0:
self.runProductionWorkers()
self.waitForProductionWorkers()
self.printResults()
self.assertResults()
def tearDown(self):
if self.isSaveResults:
self.saveResults()
else:
self.removeTmpDirs()
print "Done with all tests"
if __name__ == "__main__":
helpString = (
"Usage: \n\n"
"runOPFBenchmarks --outdir DIRNAME "
"// for simple v2 with Terminators benchmarks \n"
"runOPFBenchmarks --outdir DIRNAME --searches=v2noTerm, v2Term "
"// to run all searches \n"
"Specify a DIRNAME if you want to keep the results, otherwise it will "
"be done in a temp directory \n"
)
# ArgParser
parser = OptionParser(usage=helpString)
parser.add_option("--outdir", dest="outdir", default="artifacts",
type="string", help = "Specify a dirname if you want to"
"keep the results [default=%default].")
parser.add_option( "--searches", dest="searches", default="v2NoTerm",
type="string", help="Which searches to run,"
"specify as a list "
"can be composed of v2noTerm, v2Term, cluster_default"
"(ie. --searches=v2noTerm)"
"[default: %default].")
parser.add_option("--maxPermutations", dest="maxPermutations", default= -1,
type="int", help="Maximum number of models to search."
"-1 for no limit to the number of models. "
"[default: %default].")
parser.add_option("--recsToProcess", dest="recsToProcess", default= -1,
type="int", help="Maximum number of records to use as data"
" from each experiment. "
"-1 for the entire dateset [default: %default].")
parser.add_option("--maxConcurrentJobs", dest="maxConcurrentJobs",
default= 4, type="int",
help="Maximum number of tests to run in parallel"
"each will be allocated maxWorkers number of workers. "
"[default: %default].")
parser.add_option("--maxWorkers", dest="maxWorkers", default=None, type="int",
help="Maximum number of workers to use simultaneously"
"[default: %default].")
parser.add_option("--benchmarks", dest="benchmarks",
default="hotgymsc",
type="string",
help="Which tests to run choose from "
"hotgym, sine, "
"hotgymsc [default: %default].")
parser.add_option("--maxBranchings", dest="maxBranchings", default=None,
type="string", help="What is the maximum number of fields "
"to add per sprint. This dictates how many fields"
"are used at each sprint to generate the new swarms."
" Can be a comma separated list for the "
"different branching limits that you want to test. "
"All means no limit on branching, None is config default "
"[default: %default].")
parser.add_option("--maxParticles", dest="maxParticles", default=None,
type="string", help="Maximum number of particles per "
"swarm to launch. None is config default"
"[default: %default].")
parser.add_option("--iterations", dest="iterations", default=1, type="int",
help="Number of times to run each test"
"[default: %default].")
parser.add_option("--generateFilesOnly", dest="filesOnly", default=False,
action="store_true", help="Setting this to true will only "
"generate the permutations and description files."
" No searches will be run. [default: %default].")
parser.add_option("--useReconstruction", dest="useReconstruction",
action="store_true", help="Setting this to true will"
" use the old SP-reconstruction method to "
"make predictions. Used for side-by-side comparisons")
parser.add_option("--timeout", dest="timeout", default=25, type="int",
help="The timeout for each individual search measured "
"in minutes. If a search reaches this timeout all searches"
" are cancelled")
parser.add_option("--metaOptimize", dest="metaOptimize", default=None,
type="string", help="Dictionary of default swarm "
"parameters you want to modify. Options are inertia, "
"cogRate, socRate, minParticlesPerSwarm "
"[default: %default].")
parser.add_option("--trainFraction", dest="trainFraction", default=1.0,
type="float", help="Setting this to true will swarm on"
" x*100% of the data and run a production worker on "
"(1-x)*100% of the data. This is to see if the swarm is "
"overfitting [default: %default].")
parser.add_option("--ensemble", dest="ensemble", default=False,
action="store_true", help="Run an ensemble instead of HS"
" for this job [default: %default].")
options, remainingArgs = parser.parse_args()
# Set up module
print "\nCURRENT DIRECTORY:", os.getcwd()
if not os.path.isdir(options.outdir):
options.outdir = tempfile.mkdtemp()
print "Provided directory to store Benchmark files is invalid.",
print "Now storing in <%s> and then deleting" % options.outdir
OPFBenchmarkRunner.isSaveResults = False
OPFBenchmarkRunner.outdir = os.path.abspath(os.path.join(options.outdir,
"BenchmarkFiles"))
if os.path.isdir(OPFBenchmarkRunner.outdir):
shutil.rmtree(OPFBenchmarkRunner.outdir)
os.mkdir(OPFBenchmarkRunner.outdir)
OPFBenchmarkRunner.setMetaOptimize(options.metaOptimize)
OPFBenchmarkRunner.setMaxNumWorkers(options.maxWorkers)
OPFBenchmarkRunner.setTrainFraction(options.trainFraction)
OPFBenchmarkRunner.setNumRecords(options.recsToProcess)
OPFBenchmarkRunner.setTimeout(options.timeout)
OPFBenchmarkRunner.setMaxPermutations(options.maxPermutations)
if options.ensemble:
OPFBenchmarkRunner.setDoEnsemble()
if options.useReconstruction:
OPFBenchmarkRunner.EXP_COMMON["inferenceType"] = \
InferenceType.TemporalNextStep
OPFBenchmarkRunner.setupTrainTestSplits()
OPFBenchmarkRunner.datadir = os.path.join('extra')
tests = options.searches
if not OPFBenchmarkRunner.getTests(tests):
raise Exception("Incorrect formatting of option \n %s" % helpString)
OPFBenchmarkRunner.listOfBenchmarks = options.benchmarks.lower().split(',')
OPFBenchmarkRunner.filesOnly = options.filesOnly
OPFBenchmarkRunner.maxParticles = options.maxParticles
OPFBenchmarkRunner.maxBranchings = options.maxBranchings
OPFBenchmarkRunner.iterations = options.iterations
OPFBenchmarkRunner.maxConcurrentJobs = options.maxConcurrentJobs
unittest.main(argv=[sys.argv[0]] + remainingArgs)
| 52,577 | Python | .py | 1,279 | 33.338546 | 80 | 0.634769 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,146 | __init__.py | numenta_nupic-legacy/tests/integration/__init__.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
| 976 | Python | .py | 20 | 47.8 | 72 | 0.665272 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,147 | __init__.py | numenta_nupic-legacy/tests/integration/nupic/__init__.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
| 976 | Python | .py | 20 | 47.8 | 72 | 0.665272 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,148 | aggregation_test.py | numenta_nupic-legacy/tests/integration/nupic/data/aggregation_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import datetime
import os
import tempfile
from pkg_resources import resource_filename
from nupic.data.file_record_stream import FileRecordStream
from nupic.data.aggregator import Aggregator, generateDataset
from nupic import support as nupic_support
from nupic.support.unittesthelpers.testcasebase import (unittest,
TestCaseBase as HelperTestCaseBase)
def _aggregate(input, options, output, timeFieldName):
""" Aggregate the input stream and write aggregated records to the output
stream
"""
aggregator = Aggregator(aggregationInfo=options,
inputFields=input.getFields(),
timeFieldName=timeFieldName)
while True:
inRecord = input.getNextRecord()
print "Feeding in: ", inRecord
(outRecord, aggBookmark) = aggregator.next(record = inRecord,
curInputBookmark = None)
print "Record out: ", outRecord
if outRecord is not None:
output.appendRecord(outRecord, None)
if inRecord is None and outRecord is None:
break
class DataInputList(object):
"""
Wrapper for list as input
"""
_list = None
def __init__(self, list, fields):
self._list = list
self._fields = fields
self._recNo = 0
def getNextRecord(self):
try:
if self._recNo >= len(self._list):
return None
ret = self._list[self._recNo]
self._recNo += 1
except:
ret = None
return ret
def getCurPos(self):
return 0
def getFields(self):
return self._fields
class DataOutputList(object):
"""
List wrapper for output
"""
metaProvider = None
def __init__(self, file):
self._store = []
pass
def appendRecord(self, record, inputRef=None):
self._store.append(record)
def close(self):
pass
class DataOutputMyFile(object):
"""
File wrapper for output
"""
_file = None
metaProvider = None
def __init__(self, file):
self._file = file
def appendRecord(self, record, inputRef):
if self._file == None:
print 'No File'
self._file.appendRecord(record)
def close(self):
self._file.close()
#def makeDataset(self, years=0, months=0, days=0, hours=0, minutes=0, seconds=0,
# irregular=False, gaps=False, reset=False, sequenceId=False):
# """Make dataset with certain characteristics
#
# - years, months, days, hours. minutes, seconds : the time interval between
# consecutive records
# - irregular: if True introduce irregular intervals between records
# - gaps: if True introduce gaps (missing records)
# - reset: if reset is True, will generate a reset signal=1 in the 1st and 9th
# records (meaning a second sequence started in the 9th). Otherwise only the
# the 1st record will have a reset=1 meaning all the records belong to the
# same sequence
# - sequenceId: if sequenceId=True, will generate a sequenceId=1 for all the
# records until the 8th record. All the records starting with the 9th will
# have sequenceId=2 (meaning a second sequence started in the 9th).
# Always generates 16 records to a file named test.csv
# If irregular the 6rd and 7th records will be in the same period
# If gaps there will be a gap of 3 periods between the 12th and 13th records
# """
# d = datetime.datetime(1,1,1)
#
# period = datetime.timedelta(days=days, hours=hours, minutes=minutes, seconds=seconds)
#
# # Verify that all variables are either 0 or 1
# assert all(x in (0, 1) for x in (years, months,days, hours, minutes, seconds))
# # Verify that only one time unit is periodized (easier to manage)
# assert sum((years, months,days, hours, minutes, seconds)) == 1
#
#
# fields = [('reset', 'int', 'R'),
# ('sequenceId', 'int', 'S'),
# ('timestamp', 'datetime', 'T'),
# ('a', 'int', ''),
# ('b', 'int', ''),
# ('c', 'int', ''),
# ('d', 'int', '')]
#
# with File('test.csv', fields) as f:
# for i in range(4):
# for j in range(4):
# index = 4 * i + j
# if irregular and index == 5:
# y = 0
# m = 0
# p = None
# elif gaps and index == 11:
# y = years * 3
# m = months * 3
# y += m / 12
# m = m % 12
# p = period * 3
# else:
# y = years
# m = months
# p = period
#
# if y > 0 or m > 0:
# year = d.year + y + (d.month - 1 + m) / 12
# month = (d.month - 1 + m) % 12 + 1
# d = d.replace(year=year, month=month)
# if p is not None:
# d += p
#
# if index == 0 or (index == 8 and reset):
# resetSignal = 1
# else:
# resetSignal = 0
#
# if index < 8:
# seqId = 1
# elif sequenceId:
# seqId = 2
#
#
# #line = '%d,%d,%s,%d,%d,%d\n' % (resetSignal, seqId, str(d), i, j, i * 100)
# #print line
# record = [resetSignal, seqId, d, i, j, i * 100]
# f.write(record)
#
# return
#def test():
# average = lambda x: sum(x) / float(len(x))
#
# #class TestParser(BaseParser):
# # def __init__(self):
# # def parseTimestamp(s):
# # d,t = s.split()
# # year, month, day = [int(x) for x in d.split('-')]
# # hour, minute, second = [int(x) for x in t.split(':')]
# # return datetime.datetime(year, month, day, hour, minute, second)
# #
# # BaseParser.__init__(self,
# #
# # [('reset', int),
# # ('sequenceId', int),
# # ('timestamp', parseTimestamp),
# # ('a', int),
# # ('b', int),
# # ('c', int),
# # ('d', int)],
# # delimiter=',')
# # def parse(self, line):
# # values = BaseParser.parse(self, line)
# # return values
#
# from nupic.support import title
#
#
# fields = [('timestamp', 'datetime', ''), ('b', 'float', ''), ('c', 'int', '')]
#
# #-------------------------------
# #
# # Regular intervals every minute
# #
# #-------------------------------
# makeDataset(minutes=1)
#
# title('Write entire file to standard output (16 records)')
# with open('test.csv') as f:
# lines = f.readlines()
# assert len(lines) == 19
# for line in lines:
# print line[:-1]
#
# title('Aggregate every 4 minutes (expecting 4 records)')
#
#
# options = dict(
# timeField=File('test.csv').fieldNames.index('timestamp'),
# fields=[
# # custom aggregate function
# ('b', lambda seq: sum(seq) / float(len(seq))),
# # built-in aggregate function
# ('c',sum)],
# minutes=4)
#
#
# with File('test.csv') as f:
#
#
#
# with File('test.bin', fields) as out:
# # writing the file with fields b, c
# _aggregate(f, options, out)
#
#
# for i, r in enumerate(File('test.bin')):
# timestamp, b, c = r
# print "timestamp = %s" % timestamp
# assert b == 1.5 # average
# assert c == 400 * i
#
# title('Aggregate every 2 minutes (expecting 8 records)')
# options['minutes'] = 2
#
# with File('test.csv') as f:
# with File('test.bin', fields) as out:
# _aggregate(f, options, out)
#
# for i, r in enumerate(File('test.bin')):
# print line
# timestamp, b, c = r
# assert b == 0.5 if (i % 2 == 0) else 2.5 # average
# assert c == 200 * (i / 2)
#
# #-------------------------------
# #
# # Regular intervals every month
# #
# #-------------------------------
# makeDataset(months=1)
# title('Write entire file to standard output (16 records)')
# with File('test.csv') as f:
# lines = f.readlines()
# assert len(lines) == 16
# for line in lines:
# print line[:-1]
#
# title('Aggregate every 3 months (expecting 5 records)')
#
# options['months'] = 3
# options['minutes'] = 0
#
# with File('test.csv') as f:
# with File('test.bin', fields) as out:
# _aggregate(f, options, out)
#
# values = []
# for i, r in enumerate(File('test.bin')):
# print line
# timestamp, b, c = r
# values.append((b, c))
#
# assert values[0] == (1.0, 0)
# assert values[3] == (2.0, 600)
# assert values[4] == (1.0, 900)
# assert values[5] == (3.0, 300) # aggregation of last record only
#
# #-------------------------------
# #
# # Irregular intervals every second
# #
# #-------------------------------
# makeDataset(seconds=1, irregular=True)
# title('Write entire file to standard output (16 records)')
# with File('test.csv') as f:
# lines = f.readlines()
# assert len(lines) == 16
# for line in lines:
# print line[:-1]
#
# title('Aggregate every second (expecting 15 records)')
#
#
# options['months'] = 0
# options['seconds'] = 1
#
# with File('test.csv') as f:
# with File('test.bin', fields) as out:
# _aggregate(f, options, out)
#
# values = []
# for i, r in enumerate(File('test.bin')):
# print line
# timestamp, b, c = r
# values.append((b, c))
#
# assert values[0] == (0.0, 0)
# assert values[4] == (0.5, 200) # aggregate of two records
# assert values[5] == (2.0, 100)
# assert values[14] == (3.0, 300)
#
#
# title('Aggregate every 8 seconds (expecting 2 records)')
# options['seconds'] = 8
#
# with File('test.csv') as f:
# with File('test.bin', fields) as out:
# _aggregate(f, options, out)
#
# values = []
# for i, r in enumerate(File('test.bin')):
# print line
# timestamp, b, c = r
# values.append((b, c))
#
# assert values[0][1] == 600
# assert values[1][1] == 1800
#
#
# #-------------------------------
# #
# # Annual intervals with a gap
# #
# #-------------------------------
# makeDataset(years=1, gaps=1)
# title('Write entire file to standard output (16 records)')
# with File('test.csv') as f:
# lines = f.readlines()
# assert len(lines) == 16
# for line in lines:
# print line[:-1]
#
# title('Aggregate two years (expecting 9 records)')
#
# options['years'] = 2
# options['seconds'] = 0
#
# with File('test.csv') as f:
# with File('test.bin', fields) as out:
# _aggregate(f, options, out)
#
# values = []
# for i, r in enumerate(File('test.bin')):
# print line
# timestamp, b, c = r
# values.append((b, c))
#
# assert values[5] == (2.0, 200) # aggregated just a single record due to the gap
# assert values[6] == (3.0, 200) # aggregated just a single record due to the gap
# assert values[7] == (0.5, 600)
#
#
# #------------------------------------
# #
# # Daily intervals with reset signal
# #
# #-----------------------------------
# makeDataset(days=1, reset=True)
# title('Write entire file to standard output (16 records)')
# with File('test.csv') as f:
# lines = f.readlines()
# assert len(lines) == 16
# for line in lines:
# print line[:-1]
#
# title('Aggregate 6 days (expecting 4 records)')
# #
# options['years'] = 0
# options['resetField'] = 'reset'
# options['days'] = 6
#
# with File('test.csv') as f:
# with File('test.bin', fields) as out:
# _aggregate(f, options, out)
#
# values = []
# for i, r in enumerate(File('test.bin')):
# print line
# timestamp, b, c = r
# values.append((b, c))
#
# assert values[0][1] == 200 # records 0 through 5
# assert values[1] == (2.5, 200) # records 6 & 7
# assert values[2][1] == 1400 # records 8 through 13
# assert values[3] == (2.5, 600) # records 14 & 15
#
# #------------------------------------
# #
# # Daily intervals with sequence id
# #
# #-----------------------------------
# makeDataset(days=1, sequenceId=True)
# title('Write entire file to standard output (16 records)')
# with open('test.csv') as f:
# lines = f.readlines()
# assert len(lines) == 16
# for line in lines:
# print line[:-1]
#
# title('Aggregate 6 days (expecting 4 records)')
# #
# options['years'] = 0
# options['resetField'] = None
# options['sequenceIdField'] = 'sequenceId'
# options['days'] = 6
#
# with open('test.csv') as f:
# with File('test.bin', fields) as out:
# _aggregate(f, options, out)
#
# values = []
# for i, r in enumerate(File('test.bin')):
# print line
# timestamp, b, c = r
# values.append((b, c))
#
# assert values[0][1] == 200 # records 0 through 5
# assert values[1] == (2.5, 200) # records 6 & 7
# assert values[2][1] == 1400 # records 8 through 13
# assert values[3] == (2.5, 600) # records 14 & 15
#
#
# #-------------------------------
# #
# # Hourly intervals with a gap
# #
# #-------------------------------
# makeDataset(hours=1, gaps=1)
# title('Write entire file to standard output (16 records)')
# with open('test.csv') as f:
# lines = f.readlines()
# assert len(lines) == 16
# for line in lines:
# print line[:-1]
#
# title('Aggregate tow hours (expecting 9 records)')
#
# options['hours'] = 2
# options['days'] = 0
#
# with open('test.csv') as f:
# with File('test.bin', fields) as out:
# _aggregate(f, options, out)
#
# values = []
# for i, r in enumerate(File('test.bin')):
# print line
# timestamp, b, c = r
# values.append((b, c))
#
# assert values[5] == (2.0, 200) # aggregated just a single record due to the gap
# assert values[6] == (3.0, 200) # aggregated just a single record due to the gap
# assert values[7] == (0.5, 600)
class AggregationTests(HelperTestCaseBase):
def setUp(self):
""" Method called to prepare the test fixture. This is called immediately
before calling the test method; any exception raised by this method will be
considered an error rather than a test failure. The default implementation
does nothing.
NOTE: this is called once for every sub-test and a new AggregationTests
instance is constructed for every sub-test.
"""
# Insert newline before each sub-test's output
print
return
def test_GymAggregateWithOldData(self):
filename = resource_filename(
"nupic.datafiles", "extra/gym/gym.csv"
)
input = []
gymFields = None
with FileRecordStream(filename) as f:
gymFields = f.getFields()
for i in range(10):
input.append(f.getNextRecord())
#Append the records from the beginning to the end of the dataset
input.extend(input[0:3])
for h in (1,3):
aggregationOptions = dict(
fields=[
('timestamp', lambda x: x[0],),
('attendeeCount', sum),
('consumption', sum)],
hours=h
)
handle = \
tempfile.NamedTemporaryFile(prefix='test',
suffix='.bin')
outputFile = handle.name
handle.close()
dataInput = DataInputList(input, gymFields)
dataOutput = DataOutputList(None)
_aggregate(input=dataInput, options=aggregationOptions,
timeFieldName='timestamp', output=dataOutput)
dataOutput.close()
outputRecords = dataOutput._store
timeFieldIdx = [f[0] for f in gymFields].index('timestamp')
diffs = []
for i in range(1,len(outputRecords)):
diffs.append(outputRecords[i][timeFieldIdx] - \
outputRecords[i-1][timeFieldIdx])
positiveTimeFlow = map((lambda x: x < datetime.timedelta(seconds=0)),
diffs)
#Make sure that old records are in the aggregated output and at the same
#time make sure that they are in consecutive order after being inserted
self.assertEquals(sum(positiveTimeFlow), 1)
return
def test_GymAggregate(self):
filename = resource_filename(
"nupic.datafiles", "extra/gym/gym.csv"
)
input = []
gymFields = None
with FileRecordStream(filename) as f:
gymFields = f.getFields()
for i in range(10):
input.append(f.getNextRecord())
for h in (1,3):
aggregationOptions = dict(
fields=[
('timestamp', lambda x: x[0],),
('attendeeCount', sum),
('consumption', sum)],
hours=h
)
handle = \
tempfile.NamedTemporaryFile(prefix='test',
suffix='.bin')
outputFile = handle.name
handle.close()
dataInput = DataInputList(input, gymFields)
dataOutput = DataOutputMyFile(FileRecordStream(outputFile, write=True,
fields=gymFields))
_aggregate(input=dataInput, options=aggregationOptions,
timeFieldName='timestamp', output=dataOutput)
dataOutput.close()
for r in FileRecordStream(outputFile):
print r
print '-' * 30
return
def test_GenerateDataset(self):
dataset = 'extra/gym/gym.csv'
print "Using input dataset: ", dataset
filename = resource_filename("nupic.datafiles", dataset)
with FileRecordStream(filename) as f:
gymFields = f.getFieldNames()
aggregationOptions = dict(
timeField=gymFields.index('timestamp'),
fields=[('attendeeCount', sum),
('consumption', sum),
('timestamp', lambda x: x[0])],
hours=5
)
handle = \
tempfile.NamedTemporaryFile(prefix='agg_gym_hours_5',
suffix='.csv',
dir=os.path.dirname(
resource_filename("nupic.datafiles", dataset)
)
)
outputFile = handle.name
handle.close()
print "Expected outputFile path: ", outputFile
print "Files in the destination folder before the test:"
print os.listdir(os.path.abspath(os.path.dirname(
resource_filename("nupic.datafiles", dataset)))
)
if os.path.isfile(outputFile):
print "Removing existing outputFile: ", outputFile
os.remove(outputFile)
self.assertFalse(os.path.exists(outputFile),
msg="Shouldn't exist, but does: " + str(outputFile))
result = generateDataset(aggregationOptions, dataset, outputFile)
print "generateDataset() returned: ", result
f1 = os.path.abspath(os.path.normpath(result))
print "normalized generateDataset() result path: ", f1
f2 = os.path.normpath(outputFile)
print "normalized outputFile path: ", f2
self.assertEqual(f1, f2)
print "Checking for presence of outputFile: ", outputFile
self.assertTrue(
os.path.isfile(outputFile),
msg="Missing outputFile: %r; normalized generateDataset() result: %r" % (
outputFile, f1))
print "Files in the destination folder after the test:"
print os.listdir(os.path.abspath(os.path.dirname(
resource_filename("nupic.datafiles", dataset)
)))
print result
print '-' * 30
return
def test_GapsInIrregularData(self):
# Cleanup previous files if exist
import glob
for f in glob.glob('gap.*'):
print 'Removing', f
os.remove(f)
#class TestParser(BaseParser):
# def __init__(self):
# def parseTimestamp(s):
# d,t = s.split()
# year, month, day = [int(x) for x in d.split('-')]
# hour, minute, second = [int(x) for x in t.split(':')]
# return datetime.datetime(year, month, day, hour, minute, second)
#
# BaseParser.__init__(self,
# [('dateTime', parseTimestamp),
# ('sequenceId', int),
# ('cardtype', int),
# ('fraud', bool),
# ('amount', float)],
# delimiter=',')
# def parse(self, line):
# values = BaseParser.parse(self, line)
# return values
#dateTime,cardnum,cardtype,fraud,amount
data = """\
2009-04-03 19:05:06,129.3
2009-04-04 15:19:12,46.6
2009-04-07 02:54:04,30.32
2009-04-07 06:27:12,84.52
2009-04-07 06:42:21,21.1
2009-04-09 01:01:14,29.24
2009-04-09 06:47:42,99.76
2009-04-11 18:06:11,29.66
2009-04-11 18:12:53,148.32
2009-04-11 19:15:08,61.03
2009-04-15 19:25:40,53.14
2009-05-04 21:07:02,816.75
2009-05-04 21:08:27,686.07
2009-05-06 20:40:04,489.08
2009-05-06 20:40:42,586.9
2009-05-06 20:41:15,554.3
2009-05-06 20:41:51,652.11"""
fields = [('timestamp', 'datetime', 'T'), ('amount', 'float', '')]
with FileRecordStream(resource_filename('nupic.datafiles', 'gap.csv'), write=True, fields=fields) as f:
lines = data.split('\n')
for line in lines:
t, a = line.split(',')
components = t.split()
yyyy, mm, dd = [int(x) for x in components[0].split('-')]
h, m, s = [int(x) for x in components[1].split(':')]
t = datetime.datetime(yyyy, mm, dd, h, m, s)
a = float(a)
f.appendRecord([t, a])
aggregationOptions = dict(
timeField='timestamp',
fields=[('timestamp', lambda x: x[0]),
('amount', sum)],
hours=24
)
handle = \
tempfile.NamedTemporaryFile(prefix='agg_gap_hours_24',
suffix='.csv',
dir='.')
outputFile = handle.name
handle.close()
if os.path.isfile(outputFile):
os.remove(outputFile)
self.assertFalse(os.path.exists(outputFile),
msg="shouldn't exist, but does: " + str(outputFile))
result = generateDataset(aggregationOptions, 'gap.csv', outputFile)
self.assertEqual(
os.path.normpath(os.path.abspath(outputFile)), os.path.normpath(result),
msg="result = '%s'; outputFile = '%s'" % (result, outputFile))
self.assertTrue(os.path.isfile(outputFile),
msg="outputFile missing or is not file: %r" % (outputFile))
print outputFile
print '-' * 30
s = ''
for r in FileRecordStream(outputFile):
s += ', '.join([str(x) for x in r]) + '\n'
expected = """\
2009-04-03 19:05:06, 175.9
2009-04-06 19:05:06, 135.94
2009-04-08 19:05:06, 129.0
2009-04-10 19:05:06, 177.98
2009-04-11 19:05:06, 61.03
2009-04-15 19:05:06, 53.14
2009-05-04 19:05:06, 1502.82
2009-05-06 19:05:06, 2282.39
"""
self.assertEqual(s, expected)
return
def test_AutoSpecialFields(self):
# Cleanup old files
#for f in glob.glob('*.*'):
# if 'auto_specials' in f:
# os.remove(f)
fields = [('dummy', 'string', ''),
('timestamp', 'datetime', 'T'),
('reset', 'int', 'R'),
('sid', 'int', 'S'),
]
records = (
['dummy-1', datetime.datetime(2000, 3, 1), 1, 1],
['dummy-2', datetime.datetime(2000, 3, 2), 0, 1],
['dummy-3', datetime.datetime(2000, 3, 3), 0, 1],
['dummy-4', datetime.datetime(2000, 3, 4), 1, 2],
['dummy-5', datetime.datetime(2000, 3, 5), 0, 2],
)
with FileRecordStream(resource_filename('nupic.datafiles', 'auto_specials.csv'), write=True, fields=fields) \
as o:
for r in records:
o.appendRecord(r)
# Aggregate just the dummy field, all the specials should be added
ai = dict(
fields=[('dummy', lambda x: x[0])],
weeks=3
)
handle = \
tempfile.NamedTemporaryFile(prefix='auto_specials',
suffix='.csv',
dir='.')
tempFile = handle.name
handle.close()
outputFile = generateDataset(ai, 'auto_specials.csv', tempFile)
result = []
with FileRecordStream(outputFile) as f:
print f.getFields()
for r in f:
result.append(r)
self.assertEqual(result[0][2], 1) # reset
self.assertEqual(result[0][3], 1) # seq id
self.assertEqual(result[0][0], 'dummy-1')
self.assertEqual(result[1][2], 1) # reset
self.assertEqual(result[1][3], 2) # seq id
self.assertEqual(result[1][0], 'dummy-4')
return
def test_WeightedMean(self):
# Cleanup old files
#for f in glob.glob('*.*'):
# if 'auto_specials' in f:
# os.remove(f)
fields = [('dummy1', 'int', ''),
('dummy2', 'int', ''),
('timestamp', 'datetime', 'T'),
]
records = (
[10, 1, datetime.datetime(2000, 3, 1)],
[5, 2, datetime.datetime(2000, 3, 2)],
[1, 100, datetime.datetime(2000, 3, 3)],
[2, 4, datetime.datetime(2000, 3, 4)],
[4, 1, datetime.datetime(2000, 3, 5)],
[4, 0, datetime.datetime(2000, 3, 6)],
[5, 0, datetime.datetime(2000, 3, 7)],
[6, 0, datetime.datetime(2000, 3, 8)],
)
with FileRecordStream(resource_filename('nupic.datafiles', 'weighted_mean.csv'), write=True, fields=fields) \
as o:
for r in records:
o.appendRecord(r)
# Aggregate just the dummy field, all the specials should be added
ai = dict(
fields=[('dummy1', 'wmean:dummy2', None),
('dummy2', 'mean', None)],
days=2
)
handle = \
tempfile.NamedTemporaryFile(prefix='weighted_mean',
suffix='.csv',
dir='.')
tempFile = handle.name
handle.close()
outputFile = generateDataset(ai, 'weighted_mean.csv', tempFile)
result = []
with FileRecordStream(outputFile) as f:
print f.getFields()
for r in f:
result.append(r)
self.assertEqual(result[0][0], 6.0)
self.assertEqual(result[0][1], 1.0)
self.assertEqual(result[1][0], 1.0)
self.assertEqual(result[1][1], 52.0)
self.assertEqual(result[2][0], 4.0)
self.assertEqual(result[2][1], 0.0)
self.assertEqual(result[3][0], None)
self.assertEqual(result[3][1], 0.0)
return
if __name__=='__main__':
nupic_support.initLogging()
# Add verbosity to unittest output (so it prints a header for each test)
#sys.argv.append("--verbose")
# Run the test
unittest.TestProgram()
| 26,573 | Python | .py | 810 | 28.904938 | 113 | 0.589616 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,149 | multiclass_knn_test.py | numenta_nupic-legacy/tests/integration/nupic/regions/multiclass_knn_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import tempfile
import unittest
from datetime import datetime
from nupic.data.file_record_stream import FileRecordStream
from nupic.encoders import MultiEncoder, ScalarEncoder
from nupic.engine import Network
def _getTempFileName():
"""Creates unique test csv file name."""
handle = tempfile.NamedTemporaryFile(prefix="test", suffix=".csv", dir=".")
filename = handle.name
handle.close()
return filename
class MulticlassKNNTest(unittest.TestCase):
"""
A simple end to end to end test of a RecordSensor->KNNClassifier network,
where the data records each contain multiple categories.
"""
def testSimpleMulticlassNetwork(self):
# Setup data record stream of fake data (with three categories)
filename = _getTempFileName()
fields = [("timestamp", "datetime", "T"),
("value", "float", ""),
("reset", "int", "R"),
("sid", "int", "S"),
("categories", "list", "C")]
records = (
[datetime(day=1, month=3, year=2010), 0.0, 1, 0, ""],
[datetime(day=2, month=3, year=2010), 1.0, 0, 0, "1 2"],
[datetime(day=3, month=3, year=2010), 1.0, 0, 0, "1 2"],
[datetime(day=4, month=3, year=2010), 2.0, 0, 0, "0"],
[datetime(day=5, month=3, year=2010), 3.0, 0, 0, "1 2"],
[datetime(day=6, month=3, year=2010), 5.0, 0, 0, "1 2"],
[datetime(day=7, month=3, year=2010), 8.0, 0, 0, "0"],
[datetime(day=8, month=3, year=2010), 13.0, 0, 0, "1 2"])
dataSource = FileRecordStream(streamID=filename, write=True, fields=fields)
for r in records:
dataSource.appendRecord(list(r))
# Create the network and get region instances.
net = Network()
net.addRegion("sensor", "py.RecordSensor", "{'numCategories': 3}")
net.addRegion("classifier","py.KNNClassifierRegion",
"{'k': 2,'distThreshold': 0,'maxCategoryCount': 3}")
net.link("sensor", "classifier", "UniformLink", "",
srcOutput = "dataOut", destInput = "bottomUpIn")
net.link("sensor", "classifier", "UniformLink", "",
srcOutput = "categoryOut", destInput = "categoryIn")
sensor = net.regions["sensor"]
classifier = net.regions["classifier"]
# Setup sensor region encoder and data stream.
dataSource.close()
dataSource = FileRecordStream(filename)
sensorRegion = sensor.getSelf()
sensorRegion.encoder = MultiEncoder()
sensorRegion.encoder.addEncoder(
"value", ScalarEncoder(21, 0.0, 13.0, n=256, name="value"))
sensorRegion.dataSource = dataSource
# Get ready to run.
net.initialize()
# Train the network (by default learning is ON in the classifier, but assert
# anyway) and then turn off learning and turn on inference mode.
self.assertEqual(classifier.getParameter("learningMode"), 1)
net.run(8)
classifier.setParameter("inferenceMode", 1)
classifier.setParameter("learningMode", 0)
# Assert learning is OFF and that the classifier learned the dataset.
self.assertEqual(classifier.getParameter("learningMode"), 0,
"Learning mode is not turned off.")
self.assertEqual(classifier.getParameter("inferenceMode"), 1,
"Inference mode is not turned on.")
self.assertEqual(classifier.getParameter("categoryCount"), 3,
"The classifier should count three total categories.")
# classififer learns 12 patterns b/c there are 12 categories amongst the
# records:
self.assertEqual(classifier.getParameter("patternCount"), 12,
"The classifier should've learned 12 samples in total.")
# Test the network on the same data as it trained on; should classify with
# 100% accuracy.
expectedCats = ([0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5])
dataSource.rewind()
for i in xrange(8):
net.run(1)
inferredCats = classifier.getOutputData("categoriesOut")
self.assertSequenceEqual(expectedCats[i], inferredCats.tolist(),
"Classififer did not infer expected category probabilites for record "
"number {}.".format(i))
# Close data stream, delete file.
dataSource.close()
os.remove(filename)
if __name__ == "__main__":
unittest.main()
| 5,408 | Python | .py | 118 | 39.70339 | 80 | 0.646622 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,150 | single_step_sdr_classifier_test.py | numenta_nupic-legacy/tests/integration/nupic/regions/single_step_sdr_classifier_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from operator import itemgetter
import os
import tempfile
import unittest
import numpy as np
from datetime import datetime
from nupic.data.file_record_stream import FileRecordStream
from nupic.encoders import MultiEncoder, ScalarEncoder
from nupic.engine import Network
from nupic.frameworks.opf.model_factory import ModelFactory
def _getTempFileName():
"""Creates unique test csv file name."""
handle = tempfile.NamedTemporaryFile(prefix="test", suffix=".csv", dir=".")
filename = handle.name
handle.close()
return filename
class SingleStepSDRClassifierTest(unittest.TestCase):
"""
A simple end to end to end test of a RecordSensor->SDR Classifier network,
where the data records each contain multiple categories.
"""
def testSimpleMulticlassNetworkPY(self):
# Setup data record stream of fake data (with three categories)
filename = _getTempFileName()
fields = [("timestamp", "datetime", "T"),
("value", "float", ""),
("reset", "int", "R"),
("sid", "int", "S"),
("categories", "list", "C")]
records = (
[datetime(day=1, month=3, year=2010), 0.0, 1, 0, "0"],
[datetime(day=2, month=3, year=2010), 1.0, 0, 0, "1"],
[datetime(day=3, month=3, year=2010), 0.0, 0, 0, "0"],
[datetime(day=4, month=3, year=2010), 1.0, 0, 0, "1"],
[datetime(day=5, month=3, year=2010), 0.0, 0, 0, "0"],
[datetime(day=6, month=3, year=2010), 1.0, 0, 0, "1"],
[datetime(day=7, month=3, year=2010), 0.0, 0, 0, "0"],
[datetime(day=8, month=3, year=2010), 1.0, 0, 0, "1"])
dataSource = FileRecordStream(streamID=filename, write=True, fields=fields)
for r in records:
dataSource.appendRecord(list(r))
# Create the network and get region instances.
net = Network()
net.addRegion("sensor", "py.RecordSensor", "{'numCategories': 3}")
net.addRegion("classifier", "py.SDRClassifierRegion",
"{steps: '0', alpha: 0.001, implementation: 'py'}")
net.link("sensor", "classifier", "UniformLink", "",
srcOutput="dataOut", destInput="bottomUpIn")
net.link("sensor", "classifier", "UniformLink", "",
srcOutput="categoryOut", destInput="categoryIn")
sensor = net.regions["sensor"]
classifier = net.regions["classifier"]
# Setup sensor region encoder and data stream.
dataSource.close()
dataSource = FileRecordStream(filename)
sensorRegion = sensor.getSelf()
sensorRegion.encoder = MultiEncoder()
sensorRegion.encoder.addEncoder(
"value", ScalarEncoder(21, 0.0, 13.0, n=256, name="value"))
sensorRegion.dataSource = dataSource
# Get ready to run.
net.initialize()
# Train the network (by default learning is ON in the classifier, but assert
# anyway) and then turn off learning and turn on inference mode.
self.assertEqual(classifier.getParameter("learningMode"), 1)
net.run(8)
# Test the network on the same data as it trained on; should classify with
# 100% accuracy.
classifier.setParameter("inferenceMode", 1)
classifier.setParameter("learningMode", 0)
# Assert learning is OFF and that the classifier learned the dataset.
self.assertEqual(classifier.getParameter("learningMode"), 0,
"Learning mode is not turned off.")
self.assertEqual(classifier.getParameter("inferenceMode"), 1,
"Inference mode is not turned on.")
# make sure we can access all the parameters with getParameter
self.assertEqual(classifier.getParameter("maxCategoryCount"), 2000)
self.assertAlmostEqual(float(classifier.getParameter("alpha")), 0.001)
self.assertEqual(int(classifier.getParameter("steps")), 0)
self.assertTrue(classifier.getParameter("implementation") == "py")
self.assertEqual(classifier.getParameter("verbosity"), 0)
expectedCats = ([0.0], [1.0], [0.0], [1.0], [0.0], [1.0], [0.0], [1.0],)
dataSource.rewind()
for i in xrange(8):
net.run(1)
inferredCats = classifier.getOutputData("categoriesOut")
self.assertSequenceEqual(expectedCats[i], inferredCats.tolist(),
"Classififer did not infer expected category "
"for record number {}.".format(i))
# Close data stream, delete file.
dataSource.close()
os.remove(filename)
def testSimpleMulticlassNetworkCPP(self):
# Setup data record stream of fake data (with three categories)
filename = _getTempFileName()
fields = [("timestamp", "datetime", "T"),
("value", "float", ""),
("reset", "int", "R"),
("sid", "int", "S"),
("categories", "list", "C")]
records = (
[datetime(day=1, month=3, year=2010), 0.0, 1, 0, "0"],
[datetime(day=2, month=3, year=2010), 1.0, 0, 0, "1"],
[datetime(day=3, month=3, year=2010), 0.0, 0, 0, "0"],
[datetime(day=4, month=3, year=2010), 1.0, 0, 0, "1"],
[datetime(day=5, month=3, year=2010), 0.0, 0, 0, "0"],
[datetime(day=6, month=3, year=2010), 1.0, 0, 0, "1"],
[datetime(day=7, month=3, year=2010), 0.0, 0, 0, "0"],
[datetime(day=8, month=3, year=2010), 1.0, 0, 0, "1"])
dataSource = FileRecordStream(streamID=filename, write=True,
fields=fields)
for r in records:
dataSource.appendRecord(list(r))
# Create the network and get region instances.
net = Network()
net.addRegion("sensor", "py.RecordSensor", "{'numCategories': 3}")
net.addRegion("classifier", "py.SDRClassifierRegion",
"{steps: '0', alpha: 0.001, implementation: 'cpp'}")
net.link("sensor", "classifier", "UniformLink", "",
srcOutput="dataOut", destInput="bottomUpIn")
net.link("sensor", "classifier", "UniformLink", "",
srcOutput="categoryOut", destInput="categoryIn")
sensor = net.regions["sensor"]
classifier = net.regions["classifier"]
# Setup sensor region encoder and data stream.
dataSource.close()
dataSource = FileRecordStream(filename)
sensorRegion = sensor.getSelf()
sensorRegion.encoder = MultiEncoder()
sensorRegion.encoder.addEncoder(
"value", ScalarEncoder(21, 0.0, 13.0, n=256, name="value"))
sensorRegion.dataSource = dataSource
# Get ready to run.
net.initialize()
# Train the network (by default learning is ON in the classifier, but assert
# anyway) and then turn off learning and turn on inference mode.
self.assertEqual(classifier.getParameter("learningMode"), 1)
net.run(8)
# Test the network on the same data as it trained on; should classify with
# 100% accuracy.
classifier.setParameter("inferenceMode", 1)
classifier.setParameter("learningMode", 0)
# Assert learning is OFF and that the classifier learned the dataset.
self.assertEqual(classifier.getParameter("learningMode"), 0,
"Learning mode is not turned off.")
self.assertEqual(classifier.getParameter("inferenceMode"), 1,
"Inference mode is not turned on.")
# make sure we can access all the parameters with getParameter
self.assertEqual(classifier.getParameter("maxCategoryCount"), 2000)
self.assertAlmostEqual(float(classifier.getParameter("alpha")), 0.001)
self.assertEqual(int(classifier.getParameter("steps")), 0)
self.assertTrue(classifier.getParameter("implementation") == "cpp")
self.assertEqual(classifier.getParameter("verbosity"), 0)
expectedCats = ([0.0], [1.0], [0.0], [1.0], [0.0], [1.0], [0.0], [1.0],)
dataSource.rewind()
for i in xrange(8):
net.run(1)
inferredCats = classifier.getOutputData("categoriesOut")
self.assertSequenceEqual(expectedCats[i], inferredCats.tolist(),
"Classifier did not infer expected category "
"for record number {}.".format(i))
# Close data stream, delete file.
dataSource.close()
os.remove(filename)
def testHelloWorldPrediction(self):
text = 'hello world.'
categories = list("abcdefghijklmnopqrstuvwxyz 1234567890.")
colsPerChar = 11
numColumns = (len(categories) + 1) * colsPerChar
MODEL_PARAMS = {
"model": "HTMPrediction",
"version": 1,
"predictAheadTime": None,
"modelParams": {
"inferenceType": "TemporalMultiStep",
"sensorParams": {
"verbosity": 0,
"encoders": {
"token": {
"fieldname": u"token",
"name": u"token",
"type": "CategoryEncoder",
"categoryList": categories,
"w": colsPerChar,
"forced": True,
}
},
"sensorAutoReset": None,
},
"spEnable": False,
"spParams": {
"spVerbosity": 0,
"globalInhibition": 1,
"columnCount": 2048,
"inputWidth": 0,
"numActiveColumnsPerInhArea": 40,
"seed": 1956,
"columnDimensions": 0.5,
"synPermConnected": 0.1,
"synPermActiveInc": 0.1,
"synPermInactiveDec": 0.01,
"boostStrength": 0.0,
},
"tmEnable": True,
"tmParams": {
"verbosity": 0,
"columnCount": numColumns,
"cellsPerColumn": 16,
"inputWidth": numColumns,
"seed": 1960,
"temporalImp": "tm_cpp",
"newSynapseCount": 6,
"maxSynapsesPerSegment": 11,
"maxSegmentsPerCell": 32,
"initialPerm": 0.21,
"permanenceInc": 0.1,
"permanenceDec": 0.05,
"globalDecay": 0.0,
"maxAge": 0,
"minThreshold": 3,
"activationThreshold": 5,
"outputType": "normal",
},
"clParams": {
"implementation": "py",
"regionName": "SDRClassifierRegion",
"verbosity": 0,
"alpha": 0.1,
"steps": "1",
},
"trainSPNetOnlyIfRequested": False,
},
}
model = ModelFactory.create(MODEL_PARAMS)
model.enableInference({"predictedField": "token"})
model.enableLearning()
# train
prediction = None
for rpt in xrange(20):
for token in text:
if prediction is not None:
if rpt > 15:
self.assertEqual(prediction, token)
modelInput = {"token": token}
result = model.run(modelInput)
prediction = sorted(result.inferences["multiStepPredictions"][1].items(),
key=itemgetter(1), reverse=True)[0][0]
model.resetSequenceStates()
prediction = None
def testSimpleScalarPredictionNetworkPY(self):
# Setup data record stream of fake data (with three categories)
filename = _getTempFileName()
fields = [("timestamp", "datetime", "T"),
("value", "float", ""),
("reset", "int", "R"),
("sid", "int", "S"),
("categories", "list", "C")]
records = (
[datetime(day=1, month=3, year=2010), 0.5, 1, 0, "0"],
[datetime(day=2, month=3, year=2010), 1.5, 0, 0, "1"],
[datetime(day=3, month=3, year=2010), 0.5, 0, 0, "0"],
[datetime(day=4, month=3, year=2010), 1.5, 0, 0, "1"],
[datetime(day=5, month=3, year=2010), 0.5, 0, 0, "0"],
[datetime(day=6, month=3, year=2010), 1.5, 0, 0, "1"],
[datetime(day=7, month=3, year=2010), 0.5, 0, 0, "0"],
[datetime(day=8, month=3, year=2010), 1.5, 0, 0, "1"])
dataSource = FileRecordStream(streamID=filename, write=True, fields=fields)
for r in records:
dataSource.appendRecord(list(r))
# Create the network and get region instances.
net = Network()
net.addRegion("sensor", "py.RecordSensor", "{'numCategories': 3}")
net.addRegion("classifier", "py.SDRClassifierRegion",
"{steps: '0', alpha: 0.001, implementation: 'py'}")
net.link("sensor", "classifier", "UniformLink", "",
srcOutput="dataOut", destInput="bottomUpIn")
net.link("sensor", "classifier", "UniformLink", "",
srcOutput="bucketIdxOut", destInput="bucketIdxIn")
net.link("sensor", "classifier", "UniformLink", "",
srcOutput="actValueOut", destInput="actValueIn")
net.link("sensor", "classifier", "UniformLink", "",
srcOutput="categoryOut", destInput="categoryIn")
sensor = net.regions["sensor"]
sensor.setParameter('predictedField', 'value')
classifier = net.regions["classifier"]
# Setup sensor region encoder and data stream.
dataSource.close()
dataSource = FileRecordStream(filename)
sensorRegion = sensor.getSelf()
sensorRegion.encoder = MultiEncoder()
sensorRegion.encoder.addEncoder(
"value", ScalarEncoder(21, 0.0, 13.0, n=256, name="value"))
sensorRegion.dataSource = dataSource
# Get ready to run.
net.initialize()
# Train the network (by default learning is ON in the classifier, but assert
# anyway) and then turn off learning and turn on inference mode.
self.assertEqual(classifier.getParameter("learningMode"), 1)
net.run(8)
# Test the network on the same data as it trained on; should classify with
# 100% accuracy.
classifier.setParameter("inferenceMode", 1)
classifier.setParameter("learningMode", 0)
# Assert learning is OFF and that the classifier learned the dataset.
self.assertEqual(classifier.getParameter("learningMode"), 0,
"Learning mode is not turned off.")
self.assertEqual(classifier.getParameter("inferenceMode"), 1,
"Inference mode is not turned on.")
# make sure we can access all the parameters with getParameter
self.assertEqual(classifier.getParameter("maxCategoryCount"), 2000)
self.assertAlmostEqual(float(classifier.getParameter("alpha")), 0.001)
self.assertEqual(int(classifier.getParameter("steps")), 0)
self.assertTrue(classifier.getParameter("implementation") == "py")
self.assertEqual(classifier.getParameter("verbosity"), 0)
expectedValues = ([0.5], [1.5], [0.5], [1.5], [0.5], [1.5], [0.5], [1.5],)
dataSource.rewind()
for i in xrange(8):
net.run(1)
predictedValue = classifier.getOutputData("categoriesOut")
self.assertAlmostEqual(expectedValues[i], predictedValue[0],
"Classififer did not make correct prediction "
"for record number {}.".format(i))
# Close data stream, delete file.
dataSource.close()
os.remove(filename)
@unittest.skip("Currently there is a difference between the CPP and Python "
"implementations")
def testSimpleScalarPredictionNetworkDiff(self):
# Setup data record stream of fake data (with three categories)
filename = _getTempFileName()
fields = [("timestamp", "datetime", "T"),
("value", "float", ""),
("reset", "int", "R"),
("sid", "int", "S"),
("categories", "list", "C")]
records = (
[datetime(day=1, month=3, year=2010), 0.5, 1, 0, "0"],
[datetime(day=2, month=3, year=2010), 1.5, 0, 0, "1"],
[datetime(day=3, month=3, year=2010), 0.5, 0, 0, "0"],
[datetime(day=4, month=3, year=2010), 1.5, 0, 0, "1"],
[datetime(day=5, month=3, year=2010), 0.5, 0, 0, "0"],
[datetime(day=6, month=3, year=2010), 1.5, 0, 0, "1"],
[datetime(day=7, month=3, year=2010), 0.5, 0, 0, "0"],
[datetime(day=8, month=3, year=2010), 1.5, 0, 0, "1"])
dataSource = FileRecordStream(streamID=filename, write=True, fields=fields)
for r in records:
dataSource.appendRecord(list(r))
# Create the network and get region instances.
net = Network()
net.addRegion("sensor", "py.RecordSensor", "{'numCategories': 3}")
net.addRegion("classifier", "py.SDRClassifierRegion",
"{steps: '0', alpha: 0.001, implementation: 'diff'}")
net.link("sensor", "classifier", "UniformLink", "",
srcOutput="dataOut", destInput="bottomUpIn")
net.link("sensor", "classifier", "UniformLink", "",
srcOutput="bucketIdxOut", destInput="bucketIdxIn")
net.link("sensor", "classifier", "UniformLink", "",
srcOutput="actValueOut", destInput="actValueIn")
net.link("sensor", "classifier", "UniformLink", "",
srcOutput="categoryOut", destInput="categoryIn")
sensor = net.regions["sensor"]
sensor.setParameter('predictedField', 'value')
classifier = net.regions["classifier"]
# Setup sensor region encoder and data stream.
dataSource.close()
dataSource = FileRecordStream(filename)
sensorRegion = sensor.getSelf()
sensorRegion.encoder = MultiEncoder()
sensorRegion.encoder.addEncoder(
"value", ScalarEncoder(21, 0.0, 13.0, n=256, name="value"))
sensorRegion.dataSource = dataSource
# Get ready to run.
net.initialize()
# Configure serialization frequency
classifierRegion = classifier.getSelf()
classifierRegion._sdrClassifier._callsPerSerialize = 1
# Train the network (by default learning is ON in the classifier, but assert
# anyway) and then turn off learning and turn on inference mode.
self.assertEqual(classifier.getParameter("learningMode"), 1)
net.run(8)
# Test the network on the same data as it trained on; should classify with
# 100% accuracy.
classifier.setParameter("inferenceMode", 1)
classifier.setParameter("learningMode", 0)
# Assert learning is OFF and that the classifier learned the dataset.
self.assertEqual(classifier.getParameter("learningMode"), 0,
"Learning mode is not turned off.")
self.assertEqual(classifier.getParameter("inferenceMode"), 1,
"Inference mode is not turned on.")
# make sure we can access all the parameters with getParameter
self.assertEqual(classifier.getParameter("maxCategoryCount"), 2000)
self.assertAlmostEqual(float(classifier.getParameter("alpha")), 0.001)
self.assertEqual(int(classifier.getParameter("steps")), 0)
self.assertTrue(classifier.getParameter("implementation") == "diff")
self.assertEqual(classifier.getParameter("verbosity"), 0)
expectedValues = ([0.5], [1.5], [0.5], [1.5], [0.5], [1.5], [0.5], [1.5],)
dataSource.rewind()
for i in xrange(8):
net.run(1)
predictedValue = classifier.getOutputData("categoriesOut")
self.assertAlmostEqual(expectedValues[i], predictedValue[0],
"Classififer did not make correct prediction "
"for record number {}.".format(i))
# Close data stream, delete file.
dataSource.close()
os.remove(filename)
if __name__ == "__main__":
unittest.main()
| 19,797 | Python | .py | 428 | 38.607477 | 81 | 0.633585 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,151 | network_testnode_interchangeability.py | numenta_nupic-legacy/tests/integration/nupic/engine/network_testnode_interchangeability.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""This test verifies that the C++ test node and py.TestNode
It creates the same two node network with all four combinations
of TestNode and py.TestNode:
1. TestNode, TestNode
2. TestNode, py.TestNode
3. py.TestNode, TestNode
4. py.TestNode, py.TestNode
Then it performs the same tests as the twonode_network demo (except the error
messages tests for the three node network):
- Can add regions to network and set dimensions
- Linking induces dimensions correctly
- Network computation happens in correct order
- Direct (zero-copy) access to outputs
- Linking correctly maps outputs to inputs
"""
import logging
import unittest2 as unittest
from nupic.engine import Network, Dimensions
LOGGER = logging.getLogger(__name__)
class NetworkTestNodeInterchangeabilityTest(unittest.TestCase):
def testNodesPyTestNodeAndTestNode(self):
self.runNodesTest('py.TestNode', 'TestNode')
def testNodesTestNodeAndPyTestNode(self):
self.runNodesTest('TestNode', 'py.TestNode')
def testNodesTestNodeAndTestNode(self):
self.runNodesTest('TestNode', 'TestNode')
def testNodesPyTestNodeAndPyTestNode(self):
self.runNodesTest('py.TestNode', 'py.TestNode')
def runNodesTest(self, nodeType1, nodeType2):
# =====================================================
# Build and run the network
# =====================================================
LOGGER.info('test(level1: %s, level2: %s)', nodeType1, nodeType2)
net = Network()
level1 = net.addRegion("level1", nodeType1, "{int32Param: 15}")
dims = Dimensions([6, 4])
level1.setDimensions(dims)
level2 = net.addRegion("level2", nodeType2, "{real64Param: 128.23}")
net.link("level1", "level2", "TestFanIn2", "")
# Could call initialize here, but not necessary as net.run()
# initializes implicitly.
# net.initialize()
net.run(1)
LOGGER.info("Successfully created network and ran for one iteration")
# =====================================================
# Check everything
# =====================================================
dims = level1.getDimensions()
self.assertEqual(len(dims), 2)
self.assertEqual(dims[0], 6)
self.assertEqual(dims[1], 4)
dims = level2.getDimensions()
self.assertEqual(len(dims), 2)
self.assertEqual(dims[0], 3)
self.assertEqual(dims[1], 2)
# Check L1 output. "False" means don't copy, i.e.
# get a pointer to the actual output
# Actual output values are determined by the TestNode
# compute() behavior.
l1output = level1.getOutputData("bottomUpOut")
self.assertEqual(len(l1output), 48) # 24 nodes; 2 values per node
for i in xrange(24):
self.assertEqual(l1output[2*i], 0) # size of input to each node is 0
self.assertEqual(l1output[2*i+1], i) # node number
# check L2 output.
l2output = level2.getOutputData("bottomUpOut")
self.assertEqual(len(l2output), 12) # 6 nodes; 2 values per node
# Output val = node number + sum(inputs)
# Can compute from knowing L1 layout
#
# 00 01 | 02 03 | 04 05
# 06 07 | 08 09 | 10 11
# ---------------------
# 12 13 | 14 15 | 16 17
# 18 19 | 20 21 | 22 23
outputVals = []
outputVals.append(0 + (0 + 1 + 6 + 7))
outputVals.append(1 + (2 + 3 + 8 + 9))
outputVals.append(2 + (4 + 5 + 10 + 11))
outputVals.append(3 + (12 + 13 + 18 + 19))
outputVals.append(4 + (14 + 15 + 20 + 21))
outputVals.append(5 + (16 + 17 + 22 + 23))
for i in xrange(6):
if l2output[2*i] != 8:
LOGGER.info(l2output[2*i])
# from dbgp.client import brk; brk(port=9019)
self.assertEqual(l2output[2*i], 8) # size of input for each node is 8
self.assertEqual(l2output[2*i+1], outputVals[i])
# =====================================================
# Run for one more iteration
# =====================================================
LOGGER.info("Running for a second iteration")
net.run(1)
# =====================================================
# Check everything again
# =====================================================
# Outputs are all the same except that the first output is
# incremented by the iteration number
for i in xrange(24):
self.assertEqual(l1output[2*i], 1)
self.assertEqual(l1output[2*i+1], i)
for i in xrange(6):
self.assertEqual(l2output[2*i], 9)
self.assertEqual(l2output[2*i+1], outputVals[i] + 4)
# =====================================================
# Demonstrate a few other features
# =====================================================
#
# Linking can induce dimensions downward
#
net = Network()
level1 = net.addRegion("level1", nodeType1, "")
level2 = net.addRegion("level2", nodeType2, "")
dims = Dimensions([3, 2])
level2.setDimensions(dims)
net.link("level1", "level2", "TestFanIn2", "")
net.initialize()
# Level1 should now have dimensions [6, 4]
self.assertEqual(level1.getDimensions()[0], 6)
self.assertEqual(level1.getDimensions()[1], 4)
if __name__ == "__main__":
unittest.main()
| 6,133 | Python | .py | 142 | 38.901408 | 80 | 0.612073 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,152 | network_checkpoint_test.py | numenta_nupic-legacy/tests/integration/nupic/engine/network_checkpoint_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import unittest
import numpy
from nupic.regions.record_sensor import RecordSensor
from nupic.regions.sp_region import SPRegion
from nupic.regions.tm_region import TMRegion
from network_creation_common import createAndRunNetwork
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.proto import NetworkProto_capnp
class NetworkCheckpointTest(unittest.TestCase):
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testSensorRegion(self):
results1 = createAndRunNetwork(RecordSensor, "dataOut")
results2 = createAndRunNetwork(RecordSensor, "dataOut",
checkpointMidway=True)
self.compareArrayResults(results1, results2)
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testSPRegion(self):
results1 = createAndRunNetwork(SPRegion, "bottomUpOut")
results2 = createAndRunNetwork(SPRegion, "bottomUpOut",
checkpointMidway=True)
self.assertEqual(len(results1), len(results2))
for i in xrange(len(results1)):
result1 = list(results1[i].nonzero()[0])
result2 = list(results2[i].nonzero()[0])
self.assertEqual(result1, result2,
"Row {0} not equal: {1} vs. {2}".format(i, result1, result2))
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testTMRegion(self):
results1 = createAndRunNetwork(TMRegion, "bottomUpOut",
checkpointMidway=False,
temporalImp="tm_py")
results2 = createAndRunNetwork(TMRegion, "bottomUpOut",
checkpointMidway=True,
temporalImp="tm_py")
self.assertEqual(len(results1), len(results2))
for i in xrange(len(results1)):
result1 = list(results1[i].nonzero()[0])
result2 = list(results2[i].nonzero()[0])
self.assertEqual(result1, result2,
"Row {0} not equal: {1} vs. {2}".format(i, result1, result2))
def compareArrayResults(self, results1, results2):
self.assertEqual(len(results1), len(results2))
for i in xrange(len(results1)):
result1 = list(results1[i].nonzero()[0])
result2 = list(results2[i].nonzero()[0])
self.assertEqual(result1, result2,
"Row {0} not equal: {1} vs. {2}".format(i, result1, result2))
if __name__ == "__main__":
unittest.main()
| 3,497 | Python | .py | 76 | 39.486842 | 84 | 0.666765 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,153 | network_creation_common.py | numenta_nupic-legacy/tests/integration/nupic/engine/network_creation_common.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
import csv
import json
import os
import tempfile
from pkg_resources import resource_filename
from nupic.data.file_record_stream import FileRecordStream
from nupic.engine import Network
from nupic.encoders import MultiEncoder, ScalarEncoder, DateEncoder
from nupic.regions.record_sensor import RecordSensor
from nupic.regions.sp_region import SPRegion
from nupic.regions.tm_region import TMRegion
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.proto import NetworkProto_capnp
_VERBOSITY = 0 # how chatty the test should be
_SEED = 1956 # the random seed used throughout
_INPUT_FILE_PATH = resource_filename(
"nupic.datafiles", "extra/hotgym/rec-center-hourly.csv"
)
_NUM_RECORDS = 1000
# Config field for SPRegion
SP_PARAMS = {
"spVerbosity": _VERBOSITY,
"spatialImp": "cpp",
"globalInhibition": 1,
"columnCount": 2048,
# This must be set before creating the SPRegion
"inputWidth": 0,
"numActiveColumnsPerInhArea": 40,
"seed": 42,
"potentialPct": 0.8,
"synPermConnected": 0.1,
"synPermActiveInc": 0.0001,
"synPermInactiveDec": 0.0005,
"boostStrength": 0.0,
}
# Config field for TMRegion
TM_PARAMS = {
"verbosity": _VERBOSITY,
"columnCount": 2048,
"cellsPerColumn": 32,
"inputWidth": 2048,
"seed": 1960,
"temporalImp": "cpp",
"newSynapseCount": 20,
"maxSynapsesPerSegment": 32,
"maxSegmentsPerCell": 128,
"initialPerm": 0.21,
"permanenceInc": 0.1,
"permanenceDec": 0.1,
"predictedSegmentDecrement": .01,
"globalDecay": 0.0,
"maxAge": 0,
"minThreshold": 9,
"activationThreshold": 12,
"outputType": "normal",
"pamLength": 3,
}
def createEncoder():
"""Create the encoder instance for our test and return it."""
consumption_encoder = ScalarEncoder(21, 0.0, 100.0, n=50, name="consumption",
clipInput=True)
time_encoder = DateEncoder(timeOfDay=(21, 9.5), name="timestamp_timeOfDay")
encoder = MultiEncoder()
encoder.addEncoder("consumption", consumption_encoder)
encoder.addEncoder("timestamp", time_encoder)
return encoder
def createNetwork(dataSource, enableTP=False, temporalImp="py"):
"""Create the Network instance.
The network has a sensor region reading data from `dataSource` and passing
the encoded representation to an SPRegion. The SPRegion output is passed to
a TMRegion.
:param dataSource: a RecordStream instance to get data from
:returns: a Network instance ready to run
"""
network = Network()
# Our input is sensor data from the gym file. The RecordSensor region
# allows us to specify a file record stream as the input source via the
# dataSource attribute.
network.addRegion("sensor", "py.RecordSensor",
json.dumps({"verbosity": _VERBOSITY}))
sensor = network.regions["sensor"].getSelf()
# The RecordSensor needs to know how to encode the input values
sensor.encoder = createEncoder()
# Specify the dataSource as a file record stream instance
sensor.dataSource = dataSource
# Create the spatial pooler region
SP_PARAMS["inputWidth"] = sensor.encoder.getWidth()
network.addRegion("spatialPoolerRegion", "py.SPRegion", json.dumps(SP_PARAMS))
# Link the SP region to the sensor input
network.link("sensor", "spatialPoolerRegion", "UniformLink", "")
network.link("sensor", "spatialPoolerRegion", "UniformLink", "",
srcOutput="resetOut", destInput="resetIn")
network.link("spatialPoolerRegion", "sensor", "UniformLink", "",
srcOutput="spatialTopDownOut", destInput="spatialTopDownIn")
network.link("spatialPoolerRegion", "sensor", "UniformLink", "",
srcOutput="temporalTopDownOut", destInput="temporalTopDownIn")
if enableTP:
# Add the TMRegion on top of the SPRegion
TM_PARAMS["temporalImp"] = temporalImp
network.addRegion("temporalPoolerRegion", "py.TMRegion",
json.dumps(TM_PARAMS))
network.link("spatialPoolerRegion", "temporalPoolerRegion", "UniformLink", "")
network.link("temporalPoolerRegion", "spatialPoolerRegion", "UniformLink", "",
srcOutput="topDownOut", destInput="topDownIn")
spatialPoolerRegion = network.regions["spatialPoolerRegion"]
# Make sure learning is enabled
spatialPoolerRegion.setParameter("learningMode", True)
# We want temporal anomalies so disable anomalyMode in the SP. This mode is
# used for computing anomalies in a non-temporal model.
spatialPoolerRegion.setParameter("anomalyMode", False)
if enableTP:
temporalPoolerRegion = network.regions["temporalPoolerRegion"]
# Enable topDownMode to get the predicted columns output
temporalPoolerRegion.setParameter("topDownMode", True)
# Make sure learning is enabled (this is the default)
temporalPoolerRegion.setParameter("learningMode", True)
# Enable inference mode so we get predictions
temporalPoolerRegion.setParameter("inferenceMode", True)
# Enable anomalyMode to compute the anomaly score. This actually doesn't work
# now so doesn't matter. We instead compute the anomaly score based on
# topDownOut (predicted columns) and SP bottomUpOut (active columns).
temporalPoolerRegion.setParameter("anomalyMode", True)
return network
def saveAndLoadNetwork(network):
# Save network
proto1 = NetworkProto_capnp.NetworkProto.new_message()
network.write(proto1)
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
# Load network
proto2 = NetworkProto_capnp.NetworkProto.read(f)
loadedNetwork = Network.read(proto2)
# Set loaded network's datasource
sensor = network.regions["sensor"].getSelf()
loadedSensor = loadedNetwork.regions["sensor"].getSelf()
loadedSensor.dataSource = sensor.dataSource
# Initialize loaded network
loadedNetwork.initialize()
return loadedNetwork
def createAndRunNetwork(testRegionType, testOutputName,
checkpointMidway=False,
temporalImp=None):
dataSource = FileRecordStream(streamID=_INPUT_FILE_PATH)
if temporalImp is None:
network = createNetwork(dataSource)
else:
network = createNetwork(dataSource,
enableTP=True,
temporalImp=temporalImp)
network.initialize()
results = []
for i in xrange(_NUM_RECORDS):
if checkpointMidway and i == (_NUM_RECORDS / 2):
network = saveAndLoadNetwork(network)
# Run the network for a single iteration
network.run(1)
testRegion = network.getRegionsByType(testRegionType)[0]
output = testRegion.getOutputData(testOutputName).copy()
results.append(output)
return results
| 7,721 | Python | .py | 186 | 36.854839 | 82 | 0.719605 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,154 | network_twonode_test.py | numenta_nupic-legacy/tests/integration/nupic/engine/network_twonode_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This test demonstrates building and running
a two node network. Some features demonstrated include:
- Can add regions to network and set dimensions
- Linking induces dimensions correctly
- Network computation happens in correct order
- Direct (zero-copy) access to outputs
- Linking correctly maps outputs to inputs
"""
import logging
import unittest2 as unittest
from nupic.engine import Network, Dimensions
LOGGER = logging.getLogger(__name__)
class NetworkTwoNodeTest(unittest.TestCase):
def testTwoNode(self):
# =====================================================
# Build and run the network
# =====================================================
net = Network()
level1 = net.addRegion("level1", "TestNode", "{int32Param: 15}")
dims = Dimensions([6, 4])
level1.setDimensions(dims)
level2 = net.addRegion("level2", "TestNode", "{real64Param: 128.23}")
net.link("level1", "level2", "TestFanIn2", "")
# Could call initialize here, but not necessary as net.run()
# initializes implicitly.
# net.initialize()
net.run(1)
LOGGER.info("Successfully created network and ran for one iteration")
# =====================================================
# Check everything
# =====================================================
dims = level1.getDimensions()
self.assertEquals(len(dims), 2)
self.assertEquals(dims[0], 6)
self.assertEquals(dims[1], 4)
dims = level2.getDimensions()
self.assertEquals(len(dims), 2)
self.assertEquals(dims[0], 3)
self.assertEquals(dims[1], 2)
# Check L1 output. "False" means don't copy, i.e.
# get a pointer to the actual output
# Actual output values are determined by the TestNode
# compute() behavior.
l1output = level1.getOutputData("bottomUpOut")
self.assertEquals(len(l1output), 48) # 24 nodes; 2 values per node
for i in xrange(24):
self.assertEquals(l1output[2*i], 0) # size of input to each node is 0
self.assertEquals(l1output[2*i+1], i) # node number
# check L2 output.
l2output = level2.getOutputData("bottomUpOut", )
self.assertEquals(len(l2output), 12) # 6 nodes; 2 values per node
# Output val = node number + sum(inputs)
# Can compute from knowing L1 layout
#
# 00 01 | 02 03 | 04 05
# 06 07 | 08 09 | 10 11
# ---------------------
# 12 13 | 14 15 | 16 17
# 18 19 | 20 21 | 22 23
outputVals = []
outputVals.append(0 + (0 + 1 + 6 + 7))
outputVals.append(1 + (2 + 3 + 8 + 9))
outputVals.append(2 + (4 + 5 + 10 + 11))
outputVals.append(3 + (12 + 13 + 18 + 19))
outputVals.append(4 + (14 + 15 + 20 + 21))
outputVals.append(5 + (16 + 17 + 22 + 23))
for i in xrange(6):
self.assertEquals(l2output[2*i], 8) # size of input for each node is 8
self.assertEquals(l2output[2*i+1], outputVals[i])
# =====================================================
# Run for one more iteration
# =====================================================
LOGGER.info("Running for a second iteration")
net.run(1)
# =====================================================
# Check everything again
# =====================================================
# Outputs are all the same except that the first output is
# incremented by the iteration number
for i in xrange(24):
self.assertEquals(l1output[2*i], 1)
self.assertEquals(l1output[2*i+1], i)
for i in xrange(6):
self.assertEquals(l2output[2*i], 9)
self.assertEquals(l2output[2*i+1], outputVals[i] + 4)
def testLinkingDownwardDimensions(self):
#
# Linking can induce dimensions downward
#
net = Network()
level1 = net.addRegion("level1", "TestNode", "")
level2 = net.addRegion("level2", "TestNode", "")
dims = Dimensions([3, 2])
level2.setDimensions(dims)
net.link("level1", "level2", "TestFanIn2", "")
net.initialize()
# Level1 should now have dimensions [6, 4]
self.assertEquals(level1.getDimensions()[0], 6)
self.assertEquals(level1.getDimensions()[1], 4)
#
# We get nice error messages when network can't be initialized
#
LOGGER.info("=====")
LOGGER.info("Creating a 3 level network in which levels 1 and 2 have")
LOGGER.info("dimensions but network initialization will fail because")
LOGGER.info("level3 does not have dimensions")
LOGGER.info("Error message follows:")
net = Network()
level1 = net.addRegion("level1", "TestNode", "")
level2 = net.addRegion("level2", "TestNode", "")
_level3 = net.addRegion("level3", "TestNode", "")
dims = Dimensions([6, 4])
level1.setDimensions(dims)
net.link("level1", "level2", "TestFanIn2", "")
self.assertRaises(RuntimeError, net.initialize)
LOGGER.info("=====")
LOGGER.info("======")
LOGGER.info("Creating a link with incompatible dimensions. \
Error message follows")
net.link("level2", "level3", "TestFanIn2", "")
self.assertRaises(RuntimeError, net.initialize)
if __name__ == "__main__":
unittest.main()
| 6,093 | Python | .py | 144 | 37.923611 | 80 | 0.613252 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,155 | vector_file_sensor_test.py | numenta_nupic-legacy/tests/integration/nupic/engine/vector_file_sensor_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
## @file
This file tests VectorFileSensor exhaustively using the sessions interface.
Need to add tests for parameters:
loading and appending CSV files
test for recentFile
"""
import os
import pkg_resources
import unittest2 as unittest
from nupic.engine import Array, Dimensions, Network
g_filename = pkg_resources.resource_filename(__name__, "data/vectorfile.nta")
g_dataFile = pkg_resources.resource_filename(__name__,
"data/vectortestdata.txt")
g_dataFile2 = pkg_resources.resource_filename(__name__,
"data/vectortestdata2.txt")
g_dataFileCSV = pkg_resources.resource_filename(__name__,
"data/vectortestdata.csv")
g_dataFileCSV2 = pkg_resources.resource_filename(__name__,
"data/vectortestdata2.csv")
g_dataFileCSV3 = pkg_resources.resource_filename(__name__,
"data/vectortestdata3.csv")
g_dataFileLF4 = pkg_resources.resource_filename(__name__,
"data/vectortestdata.lf4")
g_dataFileBF4 = pkg_resources.resource_filename(__name__,
"data/vectortestdata.bf4")
g_dataFileIDX = pkg_resources.resource_filename(__name__,
"data/vectortestdata.idx")
class VectorFileSensorTest(unittest.TestCase):
"""Class for testing the VectorFileSensor plugin by loading a known network
with a single VectorFileSensor node and a known data file."""
def setUp(self):
self.filename = g_filename
self.nodeName = "TestSensor"
self.sensorName = "VectorFileSensor"
self.dataFile = g_dataFile
self.dataFile2 = g_dataFile2
self.dataFile3a = g_dataFileCSV
self.dataFile3b = g_dataFileCSV2
self.dataFile3c = g_dataFileCSV3
self.dataFile4 = g_dataFileLF4
self.dataFile5 = g_dataFileBF4
self.dataFile6 = g_dataFileIDX
self.numTests = 333
self.testsPassed = 0
self.testFailures = []
self.sensor = None
def testAll(self):
"""Run all the tests in our suite, catching any exceptions that might be
thrown.
"""
print 'VectorFileSensorTest parameters:'
print 'PYTHONPATH: %s' % os.environ.get('PYTHONPATH', 'NOT SET')
print 'filename: %s' % self.filename
self._testRunWithoutFile()
self._testNetLoad()
self._testFakeLoadFile()
self._testRepeatCount()
self._testUnknownCommand()
# Test maxOutput and activeOutputCount
self._testOutputCounts(0)
self._testLoadFile(self.dataFile, '0', '0')
self._testOutputCounts(5)
# Test a sequence of loads, runs, appends, etc.
self._testLoadFile(self.dataFile, '0', '0')
self._testRun()
self._testLoadFile(self.dataFile2, '', '0')
self._testRun()
self._testLoadFile(self.dataFile2, '2', '0')
self._testRun()
self._testLoadFile(self.dataFile3a, '3', '0')
self._testRun()
self._testLoadFile(self.dataFile4, '4', '0')
self._testRun()
self._testLoadFile(self.dataFile5, '5', '0')
self._testRun()
self._testLoadFile(self.dataFile6, '6', '0')
self._testRun()
self._testPosition()
self._testAppendFile(self.dataFile2, '2', '1', 10)
self._testAppendFile(self.dataFile, '0', '1', 15)
self._testRun()
self._testScaling(self.dataFile3b, '3')
# Test optional categoryOut and resetOut
self.sensor.setParameter('hasCategoryOut', 1)
self.sensor.setParameter('hasResetOut', 1)
self._testLoadFile(self.dataFile3c, '3', '0')
self._testOptionalOutputs()
self.sensor.setParameter('hasCategoryOut', 0)
self.sensor.setParameter('hasResetOut', 0)
def _testNetLoad(self):
"""Test loading a network with this sensor in it."""
n = Network()
r = n.addRegion(self.nodeName, self.sensorName, '{ activeOutputCount: 11}')
r.dimensions = Dimensions([1])
n.save(self.filename)
n = Network(self.filename)
n.initialize()
self.testsPassed += 1
# Check that vectorCount parameter is zero
r = n.regions[self.nodeName]
res = r.getParameter('vectorCount')
self.assertEqual(
res, 0, "getting vectorCount:\n Expected '0', got back '%d'\n" % res)
self.sensor = r
def _testFakeLoadFile(self):
"""Test reading in a fake file."""
# Loading a fake file should throw an exception
with self.assertRaises(RuntimeError):
self.sensor.executeCommand(['loadFile', 'ExistenceIsAnIllusion.txt', '0'])
def _testRunWithoutFile(self):
"""Test running the network without a file loaded. This should be run
before any file has been loaded in!"""
with self.assertRaises(AttributeError):
self.sensor.compute()
def _testRepeatCount(self):
"""Test setting and getting repeat count using parameters."""
# Check default repeat count
n = Network(self.filename)
sensor = n.regions[self.nodeName]
res = sensor.executeCommand(['dump'])
expected = self.sensorName + \
' isLabeled = 0 repeatCount = 1 vectorCount = 0 iterations = 0\n'
self.assertEqual(
res, expected,
"repeat count test:\n expected '%s'\n got '%s'\n" %
(expected, res))
# Set to 42, check it and return it back to 1
sensor.setParameter('repeatCount', 42)
res = sensor.getParameter('repeatCount')
self.assertEqual(
res, 42, "set repeatCount to 42:\n got back '%d'\n" % res)
res = sensor.executeCommand(['dump'])
expected = (self.sensorName +
' isLabeled = 0 repeatCount = 42 vectorCount = 0 '
'iterations = 0\n')
self.assertEqual(
res, expected,
"set to 42 test:\n expected '%s'\n got '%s'\n" %
(expected, res))
sensor.setParameter('repeatCount', 1)
def _testLoadFile(self, dataFile, fileFormat= '', iterations=''):
"""Test reading our sample vector file. The sample file
has 5 vectors of the correct length, plus one with incorrect length.
The sensor should ignore the last line."""
# Now load a real file
if fileFormat != '':
res = self.sensor.executeCommand(['loadFile', dataFile, fileFormat])
else:
res = self.sensor.executeCommand(['loadFile', dataFile])
self.assertTrue(res == '' or
res.startswith('VectorFileSensor read in file'),
'loading a real file: %s' % str(res))
# Check recent file
res = self.sensor.getParameter('recentFile')
self.assertEqual(res, dataFile, 'recent file, got: %s' % (res))
# Check summary of file contents
res = self.sensor.executeCommand(['dump'])
expected = (self.sensorName +
' isLabeled = 0 repeatCount = 1 vectorCount = 5 iterations = ' +
iterations + '\n')
self.assertEqual(res, expected,
'file summary:\n expected "%s"\n got "%s"\n' %
(expected, res))
def _testAppendFile(self, dataFile, fileFormat= '', iterations='',
numVecs=''):
"""Test appending our sample vector file. The sample file
has 5 vectors of the correct length, plus one with incorrect length.
The sensor should ignore the last line."""
# Now load a real file
if fileFormat != '':
res = self.sensor.executeCommand(['appendFile', dataFile, fileFormat])
else:
res = self.sensor.executeCommand(['appendFile', dataFile])
self.assertTrue(res == '' or
res.startswith('VectorFileSensor read in file'),
'loading a real file: %s' % str(res))
# Check recent file
res = self.sensor.getParameter('recentFile')
self.assertEqual(res, dataFile, 'recent file, got: %s' % res)
# Check summary of file contents
res = self.sensor.executeCommand(['dump'])
expected = self.sensorName + ' isLabeled = 0 repeatCount = 1' + \
' vectorCount = '+str(numVecs)+' iterations = ' + iterations + '\n'
self.assertEqual(res, expected,
'file summary:\n expected "%s"\n got "%s"\n' %
(expected, res))
# Check vectorCount parameter
res = self.sensor.getParameter('vectorCount')
self.assertEqual(res, numVecs,
'getting position:\n Expected ' + str(numVecs) +
', got back "%s"\n' % res)
def _testRun(self):
"""This is the basic workhorse test routine. It runs the net several times
to ensure the sensor is outputting the correct values. The routine tests
looping, tests each vector, and tests repeat count. """
# Set repeat count to 3
self.sensor.setParameter('repeatCount', 3)
self.sensor.setParameter('position', 0)
# Run the sensor several times to ensure it is outputting the correct
# values.
for _epoch in [1, 2]: # test looping
for vec in [0, 1, 2, 3, 4]: # test each vector
for _rc in [1, 2, 3]: # test repeatCount
# Run and get outputs
self.sensor.compute()
outputs = self.sensor.getOutputData('dataOut')
# Check outputs
#sum = reduce(lambda x,y:int(x)+int(y),outputs)
self.assertEqual(outputs[vec], vec+1, 'output = %s' % str(outputs))
self.assertEqual(sum(outputs), vec+1, 'output = %s' % str(outputs))
# Set repeat count back to 1
self.sensor.setParameter('repeatCount', 1)
def _testOutputCounts(self, vectorCount):
"""Test maxOutputVectorCount with different repeat counts."""
# Test maxOutput with different repeat counts.
res = self.sensor.getParameter('maxOutputVectorCount')
self.assertEqual(res, vectorCount,
"getting maxOutputVectorCount:\n Expected '" +
str(vectorCount) + "', got back '%d'\n" % (res))
self.sensor.setParameter('repeatCount', 3)
res = self.sensor.getParameter('maxOutputVectorCount')
self.assertEqual(res, 3 * vectorCount,
'getting maxOutputVectorCount:\n Expected ' +
str(3*vectorCount)+', got back "%d"\n' % res)
self.sensor.setParameter('repeatCount', 1)
# Test activeOutputCount
res = self.sensor.getParameter('activeOutputCount')
self.assertEqual(
res, 11,
'getting activeOutputCount :\n Expected 11, got back "%d"\n' % res)
def _testPosition(self):
"""Test setting and getting position parameter. Run compute once to verify
it went to the right position."""
self.sensor.setParameter('position', 2)
self.sensor.compute()
outputs = self.sensor.getOutputData('dataOut')
self.assertEqual(outputs[2], 3, 'output = %s' % str(outputs))
self.assertEqual(sum(outputs), 3, 'output = %s' % str(outputs))
# Now it should have incremented the position
res = self.sensor.getParameter('position')
self.assertEqual(res, 3,
'getting position:\n Expected "3", got back "%d"\n' %
res)
def _testScaling(self, dataFile, fileFormat= ''):
"""Specific tests for setScaleVector, setOffsetVector, and scalingMode"""
# Retrieve scalingMode after a netLoad. Should be 'none'
res = self.sensor.getParameter('scalingMode')
self.assertEqual(res, 'none',
'Getting scalingMode:\n Expected "none", got back "%s"\n' %
res)
# Retrieve scaling and offset after netLoad - should be 1 and zero
# respectively.
a = Array('Real32', 11)
self.sensor.getParameterArray('scaleVector', a)
self.assertEqual(str(a), '[ 1 1 1 1 1 1 1 1 1 1 1 ]',
'Error getting ones scaleVector:\n Got back "%s"\n' %
str(res))
self.sensor.getParameterArray('offsetVector', a)
self.assertEqual(str(a), '[ 0 0 0 0 0 0 0 0 0 0 0 ]',
'Error getting zero offsetVector:\n Got back "%s"\n' %
str(res))
# load data file, set scaling and offset to standardForm and check
self.sensor.executeCommand(['loadFile', dataFile, fileFormat])
self.sensor.setParameter('scalingMode', 'standardForm')
self.sensor.getParameterArray('scaleVector', a)
s = ('[ 2.23607 1.11803 0.745356 0.559017 0.447214 2.23607 1.11803 '
'0.745356 0.559017 0.447214 2.23607 ]')
self.assertEqual(
str(a), s,
'Error getting standardForm scaleVector:\n Got back "%s"\n' % res)
o = '[ -0.2 -0.4 -0.6 -0.8 -1 -0.2 -0.4 -0.6 -0.8 -1 -0.2 ]'
self.sensor.getParameterArray('offsetVector', a)
self.assertEqual(
str(a), o,
'Error getting standardForm offsetVector:\n Got back "%s"\n' % res)
# set to custom value and check
scaleVector = Array('Real32', 11)
for i, x in enumerate((1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1)):
scaleVector[i] = x
self.sensor.setParameterArray('scaleVector', scaleVector)
self.sensor.getParameterArray('scaleVector', a)
self.assertEqual(str(a), str(scaleVector),
'Error getting modified scaleVector:\n Got back "%s"\n' %
str(res))
offsetVector = Array('Real32', 11)
for i, x in enumerate((1, 2, 3, 4, 1, 1, 1, 1, 1, 2, 1)):
offsetVector[i] = x
self.sensor.setParameterArray('offsetVector', offsetVector)
self.sensor.getParameterArray('offsetVector', a)
self.assertEqual(str(a), str(offsetVector),
'Error getting modified offsetVector:\n Got back "%s"\n' %
str(res))
# scalingMode should now be custom
mode = self.sensor.getParameter('scalingMode')
self.assertEqual(
mode, 'custom',
'Getting scalingMode:\n Expected "custom", got back "%s"\n' % res)
# At this point we test loading a data file using loadFile. The scaling
# params should still be active and applied to the new vectors.
res = self.sensor.executeCommand(['loadFile', dataFile, fileFormat])
self.sensor.getParameterArray('offsetVector', a)
self.assertEqual(
str(a), str(offsetVector),
'Error getting modified offsetVector after loadFile:\n Got back '
'"%s"\n' % res)
self.sensor.getParameterArray('scaleVector', a)
self.assertEqual(str(a), str(scaleVector),
'Error getting modified scaleVector after loadFile:\n '
'Got back "%s"\n' % res)
# Set scaling mode back to none and retrieve scaling and offset - should
# be 1 and zero respectively.
self.sensor.setParameter('scalingMode', 'none')
self.sensor.getParameterArray('scaleVector', a)
noScaling = Array('Real32', 11)
for i in range(11):
noScaling[i] = 1
self.assertEqual(str(a), str(noScaling),
'Error getting ones scaleVector:\n Got back "%s"\n' % res)
noOffset = Array('Real32', 11)
for i in range(11):
noOffset[i] = 0
self.sensor.getParameterArray('offsetVector', a)
self.assertEqual(str(a), str(noOffset),
'Error getting zero offsetVector:\n Got back "%s"\n' % res)
def _testUnknownCommand(self):
"""Test that exception is thrown when unknown execute command sent."""
with self.assertRaises(RuntimeError):
self.sensor.executeCommand(['nonExistentCommand'])
def _testOptionalOutputs(self):
"""This is the basic workhorse test routine. It runs the net several times
to ensure the sensor is outputting the correct values. The routine tests
looping, tests each vector, and tests repeat count. """
# Set repeat count to 3
self.sensor.setParameter('repeatCount', 3)
self.sensor.setParameter('position', 0)
# Run the sensor several times to ensure it is outputting the correct
# values.
categories = []
resetOuts = []
for _epoch in [1, 2]: # test looping
for vec in [0, 1, 2, 3, 4]: # test each vector
for _rc in [1, 2, 3]: # test repeatCount
# Run and get outputs
self.sensor.compute()
outputs = self.sensor.getOutputData('dataOut')
a = self.sensor.getOutputData('categoryOut')
categories.append(a[0])
a = self.sensor.getOutputData('resetOut')
resetOuts.append(a[0])
# Check outputs
self.assertEqual(outputs[vec], vec+1, 'output = %s' % str(outputs))
self.assertEqual(sum(outputs), vec+1, 'output = %s' % str(outputs))
self.assertEqual(categories, 2 * ([6] * 12 + [8] * 3))
self.assertEqual(resetOuts,
2 * [1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1])
# Set repeat count back to 1
self.sensor.setParameter('repeatCount', 1)
if __name__=='__main__':
unittest.main()
| 17,732 | Python | .py | 380 | 38.876316 | 80 | 0.635742 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,156 | temporal_memory_compatibility_test.py | numenta_nupic-legacy/tests/integration/nupic/engine/temporal_memory_compatibility_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import json
import unittest
import numpy
from nupic.regions.tm_region import TMRegion
from network_creation_common import createAndRunNetwork
class TemporalMemoryCompatibilityTest(unittest.TestCase):
def testTMPyCpp(self):
"""
Test compatibility between C++ and Python TM implementation.
"""
results1 = createAndRunNetwork(TMRegion,
"bottomUpOut",
checkpointMidway=False,
temporalImp="tm_cpp")
results2 = createAndRunNetwork(TMRegion,
"bottomUpOut",
checkpointMidway=False,
temporalImp="tm_py")
self.compareArrayResults(results1, results2)
def compareArrayResults(self, results1, results2):
self.assertEqual(len(results1), len(results2))
for i in xrange(len(results1)):
result1 = list(results1[i].nonzero()[0])
result2 = list(results2[i].nonzero()[0])
self.assertEqual(result1, result2,
"Row {0} not equal: {1} vs. {2}".format(i, result1, result2))
if __name__ == "__main__":
unittest.main()
| 2,158 | Python | .py | 48 | 37.916667 | 72 | 0.63467 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,157 | extensive_tm_py_test.py | numenta_nupic-legacy/tests/integration/nupic/algorithms/extensive_tm_py_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import unittest
import nupic.algorithms.temporal_memory
from extensive_tm_test_base import ExtensiveTemporalMemoryTest
class ExtensiveTemporalMemoryTestPY(ExtensiveTemporalMemoryTest, unittest.TestCase):
def getTMClass(self):
return nupic.algorithms.temporal_memory.TemporalMemory
if __name__ == "__main__":
unittest.main()
| 1,315 | Python | .py | 28 | 45.428571 | 84 | 0.707813 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,158 | tm_segment_learning.py | numenta_nupic-legacy/tests/integration/nupic/algorithms/tm_segment_learning.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Segment Learning Tests
======================
Multi-attribute sequence tests.
SL1) Train the TM repeatedly using a single sequence plus noise. The sequence
can be relatively short, say 5 patterns. Add random noise each time a pattern is
presented. The noise should be different for each presentation and can be equal
to the number of on bits in the pattern.
Simplified patterns will be used, where each pattern consists of consecutive
bits and no two patterns share columns. The patterns that belong to the sequence
will be in the left half of the input vector. The noise bits will be in the
right half of the input vector.
After several iterations of each sequence, the TM should should achieve perfect
inference on the true sequence. There should be resets between each presentation
of the sequence. Check predictions in the sequence part only (it's ok to predict
random bits in the right half of the column space), and test with clean
sequences.
SL2) As above but train with 3 different inter-leaved sequences.
SL3) Vary percentage of bits that are signal vs noise.
SL4) Noise can be a fixed alphabet instead of being randomly generated.
SL5) Have two independent sequences, one in the left half, and one in the
right half. Both should be learned well.
"""
import numpy
import unittest2 as unittest
from nupic.algorithms import fdrutilities as fdrutils
from nupic.algorithms.backtracking_tm import BacktrackingTM
from nupic.algorithms.backtracking_tm_cpp import BacktrackingTMCPP
from nupic.support.unittesthelpers import testcasebase
g_testCPPTM = True
class ExperimentTestBaseClass(testcasebase.TestCaseBase):
""" The base class for all of our tests in this module"""
def __init__(self, testMethodName, *args, **kwargs):
# Construct the base-class instance
super(ExperimentTestBaseClass, self).__init__(testMethodName, *args,
**kwargs)
# Module specific instance variables
self._rgen = numpy.random.RandomState(g_options.seed)
def _printOneTrainingVector(self, x):
"""Print a single vector succinctly."""
print ''.join('1' if k != 0 else '.' for k in x)
def _printAllTrainingSequences(self, trainingSequences):
"""Print all vectors"""
for i, trainingSequence in enumerate(trainingSequences):
print "============= Sequence", i, "================="
for pattern in trainingSequence:
self._printOneTrainingVector(pattern)
def _setVerbosity(self, verbosity, tm, tmPy):
"""Set verbosity level on the TM"""
tm.cells4.setVerbosity(verbosity)
tm.verbosity = verbosity
tmPy.verbosity = verbosity
def _createTMs(self, numCols, fixedResources=False,
checkSynapseConsistency = True):
"""Create an instance of the appropriate temporal memory. We isolate
all parameters as constants specified here."""
# Keep these fixed:
minThreshold = 4
activationThreshold = 8
newSynapseCount = 15
initialPerm = 0.3
connectedPerm = 0.5
permanenceInc = 0.1
permanenceDec = 0.05
if fixedResources:
permanenceDec = 0.1
maxSegmentsPerCell = 5
maxSynapsesPerSegment = 15
globalDecay = 0
maxAge = 0
else:
permanenceDec = 0.05
maxSegmentsPerCell = -1
maxSynapsesPerSegment = -1
globalDecay = 0.0001
maxAge = 1
if g_testCPPTM:
if g_options.verbosity > 1:
print "Creating BacktrackingTMCPP instance"
cppTM = BacktrackingTMCPP(numberOfCols = numCols, cellsPerColumn = 4,
initialPerm = initialPerm, connectedPerm = connectedPerm,
minThreshold = minThreshold,
newSynapseCount = newSynapseCount,
permanenceInc = permanenceInc,
permanenceDec = permanenceDec,
activationThreshold = activationThreshold,
globalDecay = globalDecay, maxAge=maxAge, burnIn = 1,
seed=g_options.seed, verbosity=g_options.verbosity,
checkSynapseConsistency = checkSynapseConsistency,
pamLength = 1000,
maxSegmentsPerCell = maxSegmentsPerCell,
maxSynapsesPerSegment = maxSynapsesPerSegment,
)
# Ensure we are copying over learning states for TMDiff
cppTM.retrieveLearningStates = True
else:
cppTM = None
if g_options.verbosity > 1:
print "Creating PY TM instance"
pyTM = BacktrackingTM(numberOfCols = numCols, cellsPerColumn = 4,
initialPerm = initialPerm,
connectedPerm = connectedPerm,
minThreshold = minThreshold,
newSynapseCount = newSynapseCount,
permanenceInc = permanenceInc,
permanenceDec = permanenceDec,
activationThreshold = activationThreshold,
globalDecay = globalDecay, maxAge=maxAge, burnIn = 1,
seed=g_options.seed, verbosity=g_options.verbosity,
pamLength = 1000,
maxSegmentsPerCell = maxSegmentsPerCell,
maxSynapsesPerSegment = maxSynapsesPerSegment,
)
return cppTM, pyTM
def _getSimplePatterns(self, numOnes, numPatterns):
"""Very simple patterns. Each pattern has numOnes consecutive
bits on. There are numPatterns*numOnes bits in the vector. These patterns
are used as elements of sequences when building up a training set."""
numCols = numOnes * numPatterns
p = []
for i in xrange(numPatterns):
x = numpy.zeros(numCols, dtype='float32')
x[i*numOnes:(i+1)*numOnes] = 1
p.append(x)
return p
def _buildSegmentLearningTrainingSet(self, numOnes=10, numRepetitions= 10):
"""A simple sequence of 5 patterns. The left half of the vector contains
the pattern elements, each with numOnes consecutive bits. The right half
contains numOnes random bits. The function returns a pair:
trainingSequences: A list containing numRepetitions instances of the
above sequence
testSequence: A single clean test sequence containing the 5 patterns
but with no noise on the right half
"""
numPatterns = 5
numCols = 2 * numPatterns * numOnes
halfCols = numPatterns * numOnes
numNoiseBits = numOnes
p = self._getSimplePatterns(numOnes, numPatterns)
# Create noisy training sequence
trainingSequences = []
for i in xrange(numRepetitions):
sequence = []
for j in xrange(numPatterns):
# Make left half
v = numpy.zeros(numCols)
v[0:halfCols] = p[j]
# Select numOnes noise bits
noiseIndices = (self._rgen.permutation(halfCols)
+ halfCols)[0:numNoiseBits]
v[noiseIndices] = 1
sequence.append(v)
trainingSequences.append(sequence)
# Create a single clean test sequence
testSequence = []
for j in xrange(numPatterns):
# Make only left half
v = numpy.zeros(numCols, dtype='float32')
v[0:halfCols] = p[j]
testSequence.append(v)
if g_options.verbosity > 1:
print "\nTraining sequences"
self.printAllTrainingSequences(trainingSequences)
print "\nTest sequence"
self.printAllTrainingSequences([testSequence])
return (trainingSequences, [testSequence])
def _buildSL2TrainingSet(self, numOnes=10, numRepetitions= 10):
"""Three simple sequences, composed of the same 5 static patterns. The left
half of the vector contains the pattern elements, each with numOnes
consecutive bits. The right half contains numOnes random bits.
Sequence 1 is: p0, p1, p2, p3, p4
Sequence 2 is: p4, p3, p2, p1, p0
Sequence 3 is: p2, p0, p4, p1, p3
The function returns a pair:
trainingSequences: A list containing numRepetitions instances of the
above sequences
testSequence: Clean test sequences with no noise on the right half
"""
numPatterns = 5
numCols = 2 * numPatterns * numOnes
halfCols = numPatterns * numOnes
numNoiseBits = numOnes
p = self._getSimplePatterns(numOnes, numPatterns)
# Indices of the patterns in the underlying sequences
numSequences = 3
indices = [
[0, 1, 2, 3, 4],
[4, 3, 2, 1, 0],
[2, 0, 4, 1, 3],
]
# Create the noisy training sequence
trainingSequences = []
for i in xrange(numRepetitions*numSequences):
sequence = []
for j in xrange(numPatterns):
# Make left half
v = numpy.zeros(numCols, dtype='float32')
v[0:halfCols] = p[indices[i % numSequences][j]]
# Select numOnes noise bits
noiseIndices = (self._rgen.permutation(halfCols)
+ halfCols)[0:numNoiseBits]
v[noiseIndices] = 1
sequence.append(v)
trainingSequences.append(sequence)
# Create the clean test sequences
testSequences = []
for i in xrange(numSequences):
sequence = []
for j in xrange(numPatterns):
# Make only left half
v = numpy.zeros(numCols, dtype='float32')
v[0:halfCols] = p[indices[i % numSequences][j]]
sequence.append(v)
testSequences.append(sequence)
if g_options.verbosity > 1:
print "\nTraining sequences"
self.printAllTrainingSequences(trainingSequences)
print "\nTest sequences"
self.printAllTrainingSequences(testSequences)
return (trainingSequences, testSequences)
def _testSegmentLearningSequence(self, tms,
trainingSequences,
testSequences,
doResets = True):
"""Train the given TM once on the entire training set. on the Test a single
set of sequences once and check that individual predictions reflect the true
relative frequencies. Return a success code. Success code is 1 for pass, 0
for fail."""
# If no test sequence is specified, use the first training sequence
if testSequences == None:
testSequences = trainingSequences
cppTM, pyTM = tms[0], tms[1]
if cppTM is not None:
assert fdrutils.tmDiff2(cppTM, pyTM, g_options.verbosity) == True
#--------------------------------------------------------------------------
# Learn
if g_options.verbosity > 0:
print "============= Training ================="
print "TM parameters:"
print "CPP"
if cppTM is not None:
print cppTM.printParameters()
print "\nPY"
print pyTM.printParameters()
for sequenceNum, trainingSequence in enumerate(trainingSequences):
if g_options.verbosity > 1:
print "============= New sequence ================="
if doResets:
if cppTM is not None:
cppTM.reset()
pyTM.reset()
for t, x in enumerate(trainingSequence):
if g_options.verbosity > 1:
print "Time step", t, "sequence number", sequenceNum
print "Input: ", pyTM.printInput(x)
print "NNZ:", x.nonzero()
x = numpy.array(x).astype('float32')
if cppTM is not None:
cppTM.learn(x)
pyTM.learn(x)
if cppTM is not None:
assert fdrutils.tmDiff2(cppTM, pyTM, g_options.verbosity,
relaxSegmentTests = False) == True
if g_options.verbosity > 2:
if cppTM is not None:
print "CPP"
cppTM.printStates(printPrevious = (g_options.verbosity > 4))
print "\nPY"
pyTM.printStates(printPrevious = (g_options.verbosity > 4))
print
if g_options.verbosity > 4:
print "Sequence finished. Complete state after sequence"
if cppTM is not None:
print "CPP"
cppTM.printCells()
print "\nPY"
pyTM.printCells()
print
if g_options.verbosity > 2:
print "Calling trim segments"
if cppTM is not None:
nSegsRemovedCPP, nSynsRemovedCPP = cppTM.trimSegments()
nSegsRemoved, nSynsRemoved = pyTM.trimSegments()
if cppTM is not None:
assert nSegsRemovedCPP == nSegsRemoved
assert nSynsRemovedCPP == nSynsRemoved
if cppTM is not None:
assert fdrutils.tmDiff2(cppTM, pyTM, g_options.verbosity) == True
print "Training completed. Stats:"
info = pyTM.getSegmentInfo()
print " nSegments:", info[0]
print " nSynapses:", info[1]
if g_options.verbosity > 3:
print "Complete state:"
if cppTM is not None:
print "CPP"
cppTM.printCells()
print "\nPY"
pyTM.printCells()
#---------------------------------------------------------------------------
# Infer
if g_options.verbosity > 1:
print "============= Inference ================="
if cppTM is not None:
cppTM.collectStats = True
pyTM.collectStats = True
nPredictions = 0
cppNumCorrect, pyNumCorrect = 0, 0
for sequenceNum, testSequence in enumerate(testSequences):
if g_options.verbosity > 1:
print "============= New sequence ================="
slen = len(testSequence)
if doResets:
if cppTM is not None:
cppTM.reset()
pyTM.reset()
for t, x in enumerate(testSequence):
if g_options.verbosity >= 2:
print "Time step", t, '\nInput:'
pyTM.printInput(x)
if cppTM is not None:
cppTM.infer(x)
pyTM.infer(x)
if cppTM is not None:
assert fdrutils.tmDiff2(cppTM, pyTM, g_options.verbosity) == True
if g_options.verbosity > 2:
if cppTM is not None:
print "CPP"
cppTM.printStates(printPrevious = (g_options.verbosity > 4),
printLearnState = False)
print "\nPY"
pyTM.printStates(printPrevious = (g_options.verbosity > 4),
printLearnState = False)
if cppTM is not None:
cppScores = cppTM.getStats()
pyScores = pyTM.getStats()
if g_options.verbosity >= 2:
if cppTM is not None:
print "CPP"
print cppScores
print "\nPY"
print pyScores
if t < slen-1 and t > pyTM.burnIn:
nPredictions += 1
if cppTM is not None:
if cppScores['curPredictionScore2'] > 0.3:
cppNumCorrect += 1
if pyScores['curPredictionScore2'] > 0.3:
pyNumCorrect += 1
# Check that every inference was correct, excluding the very last inference
if cppTM is not None:
cppScores = cppTM.getStats()
pyScores = pyTM.getStats()
passTest = False
if cppTM is not None:
if cppNumCorrect == nPredictions and pyNumCorrect == nPredictions:
passTest = True
else:
if pyNumCorrect == nPredictions:
passTest = True
if not passTest:
print "CPP correct predictions:", cppNumCorrect
print "PY correct predictions:", pyNumCorrect
print "Total predictions:", nPredictions
return passTest
def _testSL1(self, numOnes = 10, numRepetitions = 6, fixedResources = False,
checkSynapseConsistency = True):
"""Test segment learning"""
if fixedResources:
testName = "TestSL1_FS"
else:
testName = "TestSL1"
print "\nRunning %s..." % testName
trainingSet, testSet = self._buildSegmentLearningTrainingSet(numOnes,
numRepetitions)
numCols = len(trainingSet[0][0])
tms = self._createTMs(numCols = numCols, fixedResources=fixedResources,
checkSynapseConsistency = checkSynapseConsistency)
testResult = self._testSegmentLearningSequence(tms, trainingSet, testSet)
if testResult:
print "%s PASS" % testName
return 1
else:
print "%s FAILED" % testName
return 0
def _testSL2(self, numOnes = 10, numRepetitions = 10, fixedResources = False,
checkSynapseConsistency = True):
"""Test segment learning"""
if fixedResources:
testName = "TestSL2_FS"
else:
testName = "TestSL2"
print "\nRunning %s..." % testName
trainingSet, testSet = self._buildSL2TrainingSet(numOnes, numRepetitions)
numCols = len(trainingSet[0][0])
tms = self._createTMs(numCols = numCols, fixedResources=fixedResources,
checkSynapseConsistency = checkSynapseConsistency)
testResult = self._testSegmentLearningSequence(tms, trainingSet, testSet)
if testResult:
print "%s PASS" % testName
return 1
else:
print "%s FAILED" % testName
return 0
class TMSegmentLearningTests(ExperimentTestBaseClass):
"""Our high level tests"""
def test_SL1NoFixedResources(self):
"""Test segment learning without fixed resources"""
self._testSL1(fixedResources=False,
checkSynapseConsistency=g_options.long)
def test_SL1WithFixedResources(self):
"""Test segment learning with fixed resources"""
if not g_options.long:
print "Test %s only enabled with the --long option" % \
(self._testMethodName)
return
self._testSL1(fixedResources=True,
checkSynapseConsistency=g_options.long)
def test_SL2NoFixedResources(self):
"""Test segment learning without fixed resources"""
if not g_options.long:
print "Test %s only enabled with the --long option" % \
(self._testMethodName)
return
self._testSL2(fixedResources=False,
checkSynapseConsistency=g_options.long)
def test_SL2WithFixedResources(self):
"""Test segment learning with fixed resources"""
if not g_options.long:
print "Test %s only enabled with the --long option" % \
(self._testMethodName)
return
self._testSL2(fixedResources=True,
checkSynapseConsistency=g_options.long)
if __name__ == "__main__":
# Process command line arguments
parser = testcasebase.TestOptionParser()
# Make the default value of the random seed 35
parser.remove_option('--seed')
parser.add_option('--seed', default=35, type='int',
help='Seed to use for random number generators '
'[default: %default].')
g_options, _ = parser.parse_args()
# Run the tests
unittest.main(verbosity=g_options.verbosity)
| 19,865 | Python | .py | 465 | 33.851613 | 89 | 0.632719 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,159 | extensive_tm_cpp_test.py | numenta_nupic-legacy/tests/integration/nupic/algorithms/extensive_tm_cpp_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import unittest
import nupic.bindings.algorithms
from extensive_tm_test_base import ExtensiveTemporalMemoryTest
class ExtensiveTemporalMemoryTestCPP(ExtensiveTemporalMemoryTest, unittest.TestCase):
def getTMClass(self):
return nupic.bindings.algorithms.TemporalMemory
if __name__ == "__main__":
unittest.main()
| 1,302 | Python | .py | 28 | 44.964286 | 85 | 0.706393 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,160 | tm_likelihood_test.py | numenta_nupic-legacy/tests/integration/nupic/algorithms/tm_likelihood_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Sequence Likelihood Tests
=========================
LI1) Present three sequences
Seq#1: A-B-C-D-E
Seq#2: A-B-C-D-F
Seq#3: A-B-C-D-G
with the relative frequencies, such as [0.1,0.7,0.2]
Test: after presenting A-B-C-D, prediction scores should reflect the transition
probabilities for E, F and G, i.e. Run the test for several different
probability combinations.
LI2) Given a TM trained with LI1, compute the prediction score across a
list of sequences.
LI3) Given the following sequence and a one cell per column TM:
Seq1: a-b-b-c-d
There should be four segments a-b
"""
import numpy
import unittest2 as unittest
from nupic.algorithms.backtracking_tm import BacktrackingTM
from nupic.algorithms.backtracking_tm_cpp import BacktrackingTMCPP
from nupic.support.unittesthelpers import testcasebase
SEED = 42
VERBOSITY = 1
LONG = True
_RGEN = numpy.random.RandomState(SEED)
def _getSimplePatterns(numOnes, numPatterns):
"""Very simple patterns. Each pattern has numOnes consecutive
bits on. There are numPatterns*numOnes bits in the vector."""
numCols = numOnes * numPatterns
p = []
for i in xrange(numPatterns):
x = numpy.zeros(numCols, dtype='float32')
x[i*numOnes:(i+1)*numOnes] = 1
p.append(x)
return p
def _buildLikelihoodTrainingSet(numOnes=5, relativeFrequencies=None):
"""Two very simple high order sequences for debugging. Each pattern in the
sequence has a series of 1's in a specific set of columns."""
numPatterns = 7
p = _getSimplePatterns(numOnes, numPatterns)
s1 = [p[0], p[1], p[2], p[3], p[4]]
s2 = [p[0], p[1], p[2], p[3], p[5]]
s3 = [p[0], p[1], p[2], p[3], p[6]]
trainingSequences = [s1, s2, s3]
allPatterns = p
return (trainingSequences, relativeFrequencies, allPatterns)
def _createTMs(numCols, cellsPerColumn=4, checkSynapseConsistency=True):
"""Create TM and BacktrackingTMCPP instances with identical parameters. """
# Keep these fixed for both TM's:
minThreshold = 4
activationThreshold = 4
newSynapseCount = 5
initialPerm = 0.6
connectedPerm = 0.5
permanenceInc = 0.1
permanenceDec = 0.001
globalDecay = 0.0
if VERBOSITY > 1:
print "Creating BacktrackingTMCPP instance"
cppTm = BacktrackingTMCPP(numberOfCols=numCols, cellsPerColumn=cellsPerColumn,
initialPerm=initialPerm, connectedPerm=connectedPerm,
minThreshold=minThreshold, newSynapseCount=newSynapseCount,
permanenceInc=permanenceInc, permanenceDec=permanenceDec,
activationThreshold=activationThreshold,
globalDecay=globalDecay, burnIn=1,
seed=SEED, verbosity=VERBOSITY,
checkSynapseConsistency=checkSynapseConsistency,
pamLength=1000)
if VERBOSITY > 1:
print "Creating PY TM instance"
pyTm = BacktrackingTM(numberOfCols=numCols, cellsPerColumn=cellsPerColumn,
initialPerm=initialPerm, connectedPerm=connectedPerm,
minThreshold=minThreshold, newSynapseCount=newSynapseCount,
permanenceInc=permanenceInc, permanenceDec=permanenceDec,
activationThreshold=activationThreshold,
globalDecay=globalDecay, burnIn=1,
seed=SEED, verbosity=VERBOSITY,
pamLength=1000)
return cppTm, pyTm
def _computeTMMetric(tm=None, sequences=None, useResets=True, verbosity=1):
"""Given a trained TM and a list of sequences, compute the temporal memory
performance metric on those sequences.
Parameters:
===========
tm: A trained temporal memory.
sequences: A list of sequences. Each sequence is a list of numpy
vectors.
useResets: If True, the TM's reset method will be called before the
the start of each new sequence.
verbosity: An integer controlling the level of printouts. The higher
the number the more debug printouts.
Return value:
============
The following pair is returned: (score, numPredictions)
score: The average prediction score per pattern.
numPredictions: The total number of predictions that were made.
"""
datasetScore = 0
numPredictions = 0
tm.resetStats()
for seqIdx, seq in enumerate(sequences):
# Feed in a reset
if useResets:
tm.reset()
seq = numpy.array(seq, dtype='uint32')
if verbosity > 2:
print "--------------------------------------------------------"
for i, inputPattern in enumerate(seq):
if verbosity > 2:
print "sequence %d, element %d," % (seqIdx, i),
print "pattern", inputPattern
# Feed this input to the TM and get the stats
y = tm.infer(inputPattern)
if verbosity > 2:
stats = tm.getStats()
if stats['curPredictionScore'] > 0:
print " patternConfidence=", stats['curPredictionScore2']
# Print some diagnostics for debugging
if verbosity > 3:
print "\n\n"
predOut = numpy.sum(tm.predictedState['t'], axis=1)
actOut = numpy.sum(tm.activeState['t'], axis=1)
outout = numpy.sum(y.reshape(tm.activeState['t'].shape), axis=1)
print "Prediction non-zeros: ", predOut.nonzero()
print "Activestate non-zero: ", actOut.nonzero()
print "input non-zeros: ", inputPattern.nonzero()
print "Output non-zeros: ", outout.nonzero()
# Print and return final stats
stats = tm.getStats()
datasetScore = stats['predictionScoreAvg2']
numPredictions = stats['nPredictions']
print "Final results: datasetScore=", datasetScore,
print "numPredictions=", numPredictions
return datasetScore, numPredictions
def _createDataset(numSequences, originalSequences, relativeFrequencies):
"""Given a set of sequences, create a dataset consisting of numSequences
sequences. The i'th pattern in this dataset is chosen from originalSequences
according to the relative frequencies specified in relativeFrequencies."""
dataSet = []
trainingCummulativeFrequencies = numpy.cumsum(relativeFrequencies)
for _ in xrange(numSequences):
# Pick a training sequence to present, based on the given training
# frequencies.
whichSequence = numpy.searchsorted(trainingCummulativeFrequencies,
_RGEN.random_sample())
dataSet.append(originalSequences[whichSequence])
return dataSet
class TMLikelihoodTest(testcasebase.TestCaseBase):
def _testSequence(self,
trainingSet,
nSequencePresentations=1,
tm=None,
testSequences=None,
doResets=True,
relativeFrequencies=None):
"""Test a single set of sequences once and check that individual
predictions reflect the true relative frequencies. Return a success code
as well as the trained TM. Success code is 1 for pass, 0 for fail.
The trainingSet is a set of 3 sequences that share the same first 4
elements but differ in the 5th element. After feeding in the first 4 elements,
we want to correctly compute the confidences for the 5th element based on
the frequency with which each sequence was presented during learning.
For example:
trainingSequences[0]: (10% probable)
pat A: (array([0, 1, 2, 3, 4]),)
pat B: (array([5, 6, 7, 8, 9]),)
pat C: (array([10, 11, 12, 13, 14]),)
pat D: (array([15, 16, 17, 18, 19]),)
pat E: (array([20, 21, 22, 23, 24]),)
trainingSequences[1]: (20% probable)
pat A: (array([0, 1, 2, 3, 4]),)
pat B: (array([5, 6, 7, 8, 9]),)
pat C: (array([10, 11, 12, 13, 14]),)
pat D: (array([15, 16, 17, 18, 19]),)
pat F: (array([25, 26, 27, 28, 29]),)
trainingSequences[2]: (70% probable)
pat A: (array([0, 1, 2, 3, 4]),)
pat B: (array([5, 6, 7, 8, 9]),)
pat C: (array([10, 11, 12, 13, 14]),)
pat D: (array([15, 16, 17, 18, 19]),)
pat G: (array([30, 31, 32, 33, 34]),)
allTrainingPatterns:
pat A: (array([0, 1, 2, 3, 4]),)
pat B: (array([5, 6, 7, 8, 9]),)
pat C: (array([10, 11, 12, 13, 14]),)
pat D: (array([15, 16, 17, 18, 19]),)
pat E: (array([20, 21, 22, 23, 24]),)
pat F: (array([25, 26, 27, 28, 29]),)
pat G: (array([30, 31, 32, 33, 34]),)
"""
trainingSequences = trainingSet[0]
trainingFrequencies = trainingSet[1]
allTrainingPatterns = trainingSet[2]
trainingCummulativeFrequencies = numpy.cumsum(trainingFrequencies)
if testSequences == None:
testSequences = trainingSequences
# Learn
if VERBOSITY > 1:
print "============= Learning ================="
for r in xrange(nSequencePresentations):
# Pick a training sequence to present, based on the given training
# frequencies.
whichSequence = numpy.searchsorted(trainingCummulativeFrequencies,
_RGEN.random_sample())
trainingSequence = trainingSequences[whichSequence]
if VERBOSITY > 2:
print "=========Presentation #%d Sequence #%d==============" % \
(r, whichSequence)
if doResets:
tm.reset()
for t, x in enumerate(trainingSequence):
if VERBOSITY > 3:
print "Time step", t
print "Input: ", tm.printInput(x)
tm.learn(x)
if VERBOSITY > 4:
tm.printStates(printPrevious=(VERBOSITY > 4))
print
if VERBOSITY > 4:
print "Sequence finished. Complete state after sequence"
tm.printCells()
print
tm.finishLearning()
if VERBOSITY > 2:
print "Training completed. Complete state:"
tm.printCells()
print
print "TM parameters:"
print tm.printParameters()
# Infer
if VERBOSITY > 1:
print "============= Inference ================="
testSequence = testSequences[0]
slen = len(testSequence)
tm.collectStats = True
tm.resetStats()
if doResets:
tm.reset()
for t, x in enumerate(testSequence):
if VERBOSITY > 2:
print "Time step", t, '\nInput:', tm.printInput(x)
tm.infer(x)
if VERBOSITY > 3:
tm.printStates(printPrevious=(VERBOSITY > 4), printLearnState=False)
print
# We will exit with the confidence score for the last element
if t == slen-2:
tmNonZeros = [pattern.nonzero()[0] for pattern in allTrainingPatterns]
predictionScore2 = tm._checkPrediction(tmNonZeros)[2]
if VERBOSITY > 0:
print "predictionScore:", predictionScore2
# The following test tests that the prediction scores for each pattern
# are within 10% of the its relative frequency. Here we check only
# the Positive Prediction Score
patternConfidenceScores = numpy.array([x[1] for x in predictionScore2])
# Normalize so that the sum is 1.0. This makes us independent of any
# potential scaling differences in the column confidence calculations of
# various TM implementations.
patternConfidenceScores /= patternConfidenceScores.sum()
msg = ('Prediction failed with predictionScore: %s. Expected %s but got %s.'
% (str(predictionScore2), str(relativeFrequencies),
str(patternConfidenceScores[4:])))
self.assertLess(abs(patternConfidenceScores[4]-relativeFrequencies[0]), 0.1,
msg=msg)
self.assertLess(abs(patternConfidenceScores[5]-relativeFrequencies[1]), 0.1,
msg=msg)
self.assertLess(abs(patternConfidenceScores[6]-relativeFrequencies[2]), 0.1,
msg=msg)
def _likelihoodTest1(self, numOnes=5, relativeFrequencies=None,
checkSynapseConsistency=True):
print "Sequence Likelihood test 1 with relativeFrequencies=",
print relativeFrequencies
trainingSet = _buildLikelihoodTrainingSet(numOnes, relativeFrequencies)
cppTm, pyTm = _createTMs(numCols=trainingSet[0][0][0].size,
checkSynapseConsistency=checkSynapseConsistency)
# Test both TM's. Currently the CPP TM has faster confidence estimation
self._testSequence(trainingSet, nSequencePresentations=200, tm=cppTm,
relativeFrequencies=relativeFrequencies)
self._testSequence(trainingSet, nSequencePresentations=500, tm=pyTm,
relativeFrequencies=relativeFrequencies)
def _likelihoodTest2(self, numOnes=5, relativeFrequencies=None,
checkSynapseConsistency=True):
print "Sequence Likelihood test 2 with relativeFrequencies=",
print relativeFrequencies
trainingSet = _buildLikelihoodTrainingSet(numOnes, relativeFrequencies)
cppTm, pyTm = _createTMs(numCols=trainingSet[0][0][0].size,
checkSynapseConsistency=checkSynapseConsistency)
# Test both TM's
for tm in [cppTm, pyTm]:
self._testSequence(trainingSet, nSequencePresentations=500, tm=tm,
relativeFrequencies=relativeFrequencies)
# Create a dataset with the same relative frequencies for testing the
# metric.
testDataSet = _createDataset(500, trainingSet[0], relativeFrequencies)
tm.collectStats = True
score, _ = _computeTMMetric(tm, testDataSet, verbosity=2)
# Create a dataset with very different relative frequencies
# This score should be lower than the one above.
testDataSet = _createDataset(500, trainingSet[0],
relativeFrequencies = [0.1, 0.1, 0.9])
score2, _ = _computeTMMetric(tm, testDataSet, verbosity=2)
self.assertLessEqual(score2, score)
def testLikelihood1Short(self):
self._likelihoodTest1(numOnes=5, relativeFrequencies=[0.1, 0.7, 0.2],
checkSynapseConsistency=LONG)
def testLikelihood1Long(self):
self._likelihoodTest1(numOnes=5, relativeFrequencies=[0.2, 0.5, 0.3])
self._likelihoodTest1(numOnes=5, relativeFrequencies=[0.5, 0.5, 0.0])
self._likelihoodTest1(numOnes=5, relativeFrequencies=[0.1, 0.5, 0.4])
def testLikelihood2Short(self):
self._likelihoodTest2(numOnes=5, relativeFrequencies=[0.1, 0.7, 0.2],
checkSynapseConsistency=LONG)
def testLikelihood2Long(self):
self._likelihoodTest2(numOnes=5, relativeFrequencies=[0.2, 0.5, 0.3])
self._likelihoodTest2(numOnes=5, relativeFrequencies=[0.5, 0.5, 0.0])
self._likelihoodTest2(numOnes=5, relativeFrequencies=[0.1, 0.5, 0.4])
if __name__ == "__main__":
unittest.main()
| 15,672 | Python | .py | 339 | 38.79056 | 87 | 0.658794 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,161 | extensive_tm_test_base.py | numenta_nupic-legacy/tests/integration/nupic/algorithms/extensive_tm_test_base.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy
import unittest
from abc import ABCMeta
from nupic.data.generators.pattern_machine import PatternMachine
from nupic.support.unittesthelpers.abstract_temporal_memory_test import AbstractTemporalMemoryTest
class ExtensiveTemporalMemoryTest(AbstractTemporalMemoryTest):
"""
==============================================================================
Basic First Order Sequences
==============================================================================
These tests ensure the most basic (first order) sequence learning mechanism is
working.
Parameters: Use a "fast learning mode": initPerm should be greater than
connectedPerm and permanenceDec should be zero. With these settings sequences
should be learned in one pass:
minThreshold = newSynapseCount
initialPermanence = 0.8
connectedPermanence = 0.7
permanenceDecrement = 0
permanenceIncrement = 0.4
Other Parameters:
columnDimensions = [100]
cellsPerColumn = 1
newSynapseCount = 11
activationThreshold = 11
Note: this is not a high order sequence, so one cell per column is fine.
Input Sequence: We train with M input sequences, each consisting of N random
patterns. Each pattern consists of a random number of bits on. The number of
1's in each pattern should be between 21 and 25 columns.
Each input pattern can optionally have an amount of spatial noise represented
by X, where X is the probability of switching an on bit with a random bit.
Training: The TM is trained with P passes of the M sequences. There
should be a reset between sequences. The total number of iterations during
training is P*N*M.
Testing: Run inference through the same set of sequences, with a reset before
each sequence. For each sequence the system should accurately predict the
pattern at the next time step up to and including the N-1'st pattern. The number
of predicted inactive cells at each time step should be reasonably low.
We can also calculate the number of synapses that should be
learned. We raise an error if too many or too few were learned.
B1) Basic sequence learner. M=1, N=100, P=1.
B2) Same as above, except P=2. Test that permanences go up and that no
additional synapses are learned. [TODO]
B3) N=300, M=1, P=1. (See how high we can go with N)
B4) N=100, M=3, P=1. (See how high we can go with N*M)
B5) Like B1 but with cellsPerColumn = 4. First order sequences should still
work just fine.
B6) Like B4 but with cellsPerColumn = 4. First order sequences should still
work just fine.
B7) Like B1 but with slower learning. Set the following parameters differently:
initialPermanence = 0.2
connectedPermanence = 0.7
permanenceIncrement = 0.2
Now we train the TM with the B1 sequence 4 times (P=4). This will increment
the permanences to be above 0.8 and at that point the inference will be correct.
This test will ensure the basic match function and segment activation rules are
working correctly.
B8) Like B7 but with 4 cells per column. Should still work.
B9) Like B7 but present the sequence less than 4 times: the inference should be
incorrect.
B10) Like B2, except that cells per column = 4. Should still add zero additional
synapses. [TODO]
B11) Like B5, but with activationThreshold = 8 and with each pattern
corrupted by a small amount of spatial noise (X = 0.05).
B12) Test accessors.
===============================================================================
High Order Sequences
===============================================================================
These tests ensure that high order sequences can be learned in a multiple cells
per column instantiation.
Parameters: Same as Basic First Order Tests above, but with varying cells per
column.
Input Sequence: We train with M input sequences, each consisting of N random
patterns. Each pattern consists of a random number of bits on. The number of
1's in each pattern should be between 21 and 25 columns. The sequences are
constructed to contain shared subsequences, such as:
A B C D E F G H I J
K L M D E F N O P Q
The position and length of shared subsequences are parameters in the tests.
Each input pattern can optionally have an amount of spatial noise represented
by X, where X is the probability of switching an on bit with a random bit.
Training: Identical to basic first order tests above.
Testing: Identical to basic first order tests above unless noted.
We can also calculate the number of segments and synapses that should be
learned. We raise an error if too many or too few were learned.
H1) Learn two sequences with a shared subsequence in the middle. Parameters
should be the same as B1. Since cellsPerColumn == 1, it should make more
predictions than necessary.
H2) Same as H1, but with cellsPerColumn == 4, and train multiple times.
It should make just the right number of predictions.
H3) Like H2, except the shared subsequence is in the beginning (e.g.
"ABCDEF" and "ABCGHIJ"). At the point where the shared subsequence ends, all
possible next patterns should be predicted. As soon as you see the first unique
pattern, the predictions should collapse to be a perfect prediction.
H4) Shared patterns. Similar to H2 except that patterns are shared between
sequences. All sequences are different shufflings of the same set of N
patterns (there is no shared subsequence).
H5) Combination of H4) and H2). Shared patterns in different sequences, with a
shared subsequence.
H6) Stress test: every other pattern is shared. [TODO]
H7) Start predicting in the middle of a sequence. [TODO]
H8) Hub capacity. How many patterns can use that hub? [TODO]
H9) Sensitivity to small amounts of spatial noise during inference (X = 0.05).
Parameters the same as B11, and sequences like H2.
H10) Higher order patterns with alternating elements.
Create the following 4 sequences:
A B A B A C
A B A B D E
A B F G H I
A J K L M N
After training we should verify that the expected transitions are in the
model. Prediction accuracy should be perfect. In addition, during inference,
after the first element is presented, the columns should not burst any more.
Need to verify, for the first sequence, that the high order representation
when presented with the second A and B is different from the representation
in the first presentation. [TODO]
"""
__metaclass__ = ABCMeta
VERBOSITY = 1
def getPatternMachine(self):
return PatternMachine(100, range(21, 26), num=300)
def getDefaultTMParams(self):
return {
"columnDimensions": (100,),
"cellsPerColumn": 1,
"initialPermanence": 0.8,
"connectedPermanence": 0.7,
"minThreshold": 11,
"maxNewSynapseCount": 11,
"permanenceIncrement": 0.4,
"permanenceDecrement": 0,
"activationThreshold": 11,
"seed": 42,
}
def testB1(self):
"""Basic sequence learner. M=1, N=100, P=1."""
self.init()
numbers = self.sequenceMachine.generateNumbers(1, 100)
sequence = self.sequenceMachine.generateFromNumbers(numbers)
self.feedTM(sequence)
self._testTM(sequence)
self.assertAllActiveWerePredicted()
self.assertAllInactiveWereUnpredicted()
def testB3(self):
"""N=300, M=1, P=1. (See how high we can go with N)"""
self.init()
numbers = self.sequenceMachine.generateNumbers(1, 300)
sequence = self.sequenceMachine.generateFromNumbers(numbers)
self.feedTM(sequence)
self._testTM(sequence)
self.assertAllActiveWerePredicted()
self.assertAllInactiveWereUnpredicted()
def testB4(self):
"""N=100, M=3, P=1. (See how high we can go with N*M)"""
self.init()
numbers = self.sequenceMachine.generateNumbers(3, 100)
sequence = self.sequenceMachine.generateFromNumbers(numbers)
self.feedTM(sequence)
self._testTM(sequence)
self.assertAllActiveWerePredicted()
def testB5(self):
"""Like B1 but with cellsPerColumn = 4.
First order sequences should still work just fine."""
self.init({"cellsPerColumn": 4})
numbers = self.sequenceMachine.generateNumbers(1, 100)
sequence = self.sequenceMachine.generateFromNumbers(numbers)
self.feedTM(sequence)
self._testTM(sequence)
self.assertAllActiveWerePredicted()
self.assertAllInactiveWereUnpredicted()
def testB6(self):
"""Like B4 but with cellsPerColumn = 4.
First order sequences should still work just fine."""
self.init({"cellsPerColumn": 4})
numbers = self.sequenceMachine.generateNumbers(3, 100)
sequence = self.sequenceMachine.generateFromNumbers(numbers)
self.feedTM(sequence)
self._testTM(sequence)
self.assertAllActiveWerePredicted()
self.assertAllInactiveWereUnpredicted()
def testB7(self):
"""Like B1 but with slower learning.
Set the following parameters differently:
initialPermanence = 0.2
connectedPermanence = 0.7
permanenceIncrement = 0.2
Now we train the TM with the B1 sequence 4 times (P=4). This will increment
the permanences to be above 0.8 and at that point the inference will be correct.
This test will ensure the basic match function and segment activation rules are
working correctly.
"""
self.init({"initialPermanence": 0.2,
"connectedPermanence": 0.7,
"permanenceIncrement": 0.2})
numbers = self.sequenceMachine.generateNumbers(1, 100)
sequence = self.sequenceMachine.generateFromNumbers(numbers)
for _ in xrange(4):
self.feedTM(sequence)
self._testTM(sequence)
self.assertAllActiveWerePredicted()
self.assertAllInactiveWereUnpredicted()
def testB8(self):
"""Like B7 but with 4 cells per column.
Should still work."""
self.init({"initialPermanence": 0.2,
"connectedPermanence": 0.7,
"permanenceIncrement": 0.2,
"cellsPerColumn": 4})
numbers = self.sequenceMachine.generateNumbers(1, 100)
sequence = self.sequenceMachine.generateFromNumbers(numbers)
for _ in xrange(4):
self.feedTM(sequence)
self._testTM(sequence)
self.assertAllActiveWerePredicted()
self.assertAllInactiveWereUnpredicted()
def testB9(self):
"""Like B7 but present the sequence less than 4 times.
The inference should be incorrect."""
self.init({"initialPermanence": 0.2,
"connectedPermanence": 0.7,
"permanenceIncrement": 0.2})
numbers = self.sequenceMachine.generateNumbers(1, 100)
sequence = self.sequenceMachine.generateFromNumbers(numbers)
for _ in xrange(3):
self.feedTM(sequence)
self._testTM(sequence)
self.assertAllActiveWereUnpredicted()
def testB11(self):
"""Like B5, but with activationThreshold = 8 and with each pattern
corrupted by a small amount of spatial noise (X = 0.05)."""
self.init({"cellsPerColumn": 4,
"activationThreshold": 8,
"minThreshold": 8})
numbers = self.sequenceMachine.generateNumbers(1, 100)
sequence = self.sequenceMachine.generateFromNumbers(numbers)
self.feedTM(sequence)
sequence = self.sequenceMachine.addSpatialNoise(sequence, 0.05)
self._testTM(sequence)
unpredictedActiveColumnsMetric = self.tm.mmGetMetricFromTrace(
self.tm.mmGetTraceUnpredictedActiveColumns())
self.assertTrue(unpredictedActiveColumnsMetric.mean < 1)
def testH1(self):
"""Learn two sequences with a short shared pattern.
Parameters should be the same as B1.
Since cellsPerColumn == 1, it should make more predictions than necessary.
"""
self.init()
numbers = self.sequenceMachine.generateNumbers(2, 20, (10, 15))
sequence = self.sequenceMachine.generateFromNumbers(numbers)
self.feedTM(sequence)
self._testTM(sequence)
self.assertAllActiveWerePredicted()
predictedInactiveColumnsMetric = self.tm.mmGetMetricFromTrace(
self.tm.mmGetTracePredictedInactiveColumns())
self.assertTrue(predictedInactiveColumnsMetric.mean > 0)
# At the end of both shared sequences, there should be
# predicted but inactive columns
self.assertTrue(
len(self.tm.mmGetTracePredictedInactiveColumns().data[15]) > 0)
self.assertTrue(
len(self.tm.mmGetTracePredictedInactiveColumns().data[35]) > 0)
def testH2(self):
"""Same as H1, but with cellsPerColumn == 4, and train multiple times.
It should make just the right number of predictions."""
self.init({"cellsPerColumn": 4})
numbers = self.sequenceMachine.generateNumbers(2, 20, (10, 15))
sequence = self.sequenceMachine.generateFromNumbers(numbers)
for _ in xrange(10):
self.feedTM(sequence)
self._testTM(sequence)
self.assertAllActiveWerePredicted()
# Without some kind of decay, expect predicted inactive columns at the
# end of the first shared sequence
predictedInactiveColumnsMetric = self.tm.mmGetMetricFromTrace(
self.tm.mmGetTracePredictedInactiveColumns())
self.assertTrue(predictedInactiveColumnsMetric.sum < 26)
# At the end of the second shared sequence, there should be no
# predicted but inactive columns
self.assertEqual(
len(self.tm.mmGetTracePredictedInactiveColumns().data[36]), 0)
def testH3(self):
"""Like H2, except the shared subsequence is in the beginning.
(e.g. "ABCDEF" and "ABCGHIJ") At the point where the shared subsequence
ends, all possible next patterns should be predicted. As soon as you see
the first unique pattern, the predictions should collapse to be a perfect
prediction."""
self.init({"cellsPerColumn": 4})
numbers = self.sequenceMachine.generateNumbers(2, 20, (0, 5))
sequence = self.sequenceMachine.generateFromNumbers(numbers)
self.feedTM(sequence)
self._testTM(sequence)
self.assertAllActiveWerePredicted()
predictedInactiveColumnsMetric = self.tm.mmGetMetricFromTrace(
self.tm.mmGetTracePredictedInactiveColumns())
self.assertTrue(predictedInactiveColumnsMetric.sum < 26 * 2)
# At the end of each shared sequence, there should be
# predicted but inactive columns
self.assertTrue(
len(self.tm.mmGetTracePredictedInactiveColumns().data[5]) > 0)
self.assertTrue(
len(self.tm.mmGetTracePredictedInactiveColumns().data[25]) > 0)
def testH4(self):
"""Shared patterns. Similar to H2 except that patterns are shared between
sequences. All sequences are different shufflings of the same set of N
patterns (there is no shared subsequence)."""
self.init({"cellsPerColumn": 4})
numbers = []
for _ in xrange(2):
numbers += self.sequenceMachine.generateNumbers(1, 20)
sequence = self.sequenceMachine.generateFromNumbers(numbers)
for _ in xrange(20):
self.feedTM(sequence)
self._testTM(sequence)
self.assertAllActiveWerePredicted()
predictedInactiveColumnsMetric = self.tm.mmGetMetricFromTrace(
self.tm.mmGetTracePredictedInactiveColumns())
self.assertTrue(predictedInactiveColumnsMetric.mean < 3)
def testH5(self):
"""Combination of H4) and H2).
Shared patterns in different sequences, with a shared subsequence."""
self.init({"cellsPerColumn": 4})
numbers = []
shared = self.sequenceMachine.generateNumbers(1, 5)[:-1]
for _ in xrange(2):
sublist = self.sequenceMachine.generateNumbers(1, 20)
sublist = [x for x in sublist if x not in xrange(5)]
numbers += sublist[0:10] + shared + sublist[10:]
sequence = self.sequenceMachine.generateFromNumbers(numbers)
for _ in xrange(20):
self.feedTM(sequence)
self._testTM(sequence)
self.assertAllActiveWerePredicted()
predictedInactiveColumnsMetric = self.tm.mmGetMetricFromTrace(
self.tm.mmGetTracePredictedInactiveColumns())
self.assertTrue(predictedInactiveColumnsMetric.mean < 3)
def testH9(self):
"""Sensitivity to small amounts of spatial noise during inference
(X = 0.05). Parameters the same as B11, and sequences like H2."""
self.init({"cellsPerColumn": 4,
"activationThreshold": 8,
"minThreshold": 8})
numbers = self.sequenceMachine.generateNumbers(2, 20, (10, 15))
sequence = self.sequenceMachine.generateFromNumbers(numbers)
for _ in xrange(10):
self.feedTM(sequence)
sequence = self.sequenceMachine.addSpatialNoise(sequence, 0.05)
self._testTM(sequence)
unpredictedActiveColumnsMetric = self.tm.mmGetMetricFromTrace(
self.tm.mmGetTraceUnpredictedActiveColumns())
self.assertTrue(unpredictedActiveColumnsMetric.mean < 3)
def testH10(self):
"""Orphan Decay mechanism reduce predicted inactive cells (extra predictions).
Test feeds in noisy sequences (X = 0.05) to TM with and without orphan decay.
TM with orphan decay should has many fewer predicted inactive columns.
Parameters the same as B11, and sequences like H9."""
# train TM on noisy sequences with orphan decay turned off
self.init({"cellsPerColumn": 4,
"activationThreshold": 8,
"minThreshold": 8})
numbers = self.sequenceMachine.generateNumbers(2, 20, (10, 15))
sequence = self.sequenceMachine.generateFromNumbers(numbers)
sequenceNoisy = dict()
for i in xrange(10):
sequenceNoisy[i] = self.sequenceMachine.addSpatialNoise(sequence, 0.05)
self.feedTM(sequenceNoisy[i])
self.tm.mmClearHistory()
self._testTM(sequence)
predictedInactiveColumnsMetric = self.tm.mmGetMetricFromTrace(
self.tm.mmGetTracePredictedInactiveColumns())
predictedActiveColumnsMetric = self.tm.mmGetMetricFromTrace(
self.tm.mmGetTracePredictedActiveColumns())
predictedInactiveColumnsMeanNoOrphanDecay = predictedInactiveColumnsMetric.mean
predictedActiveColumnsMeanNoOrphanDecay = predictedActiveColumnsMetric.mean
# train TM on the same set of noisy sequences with orphan decay turned on
self.init({"cellsPerColumn": 4,
"activationThreshold": 8,
"minThreshold": 8,
"predictedSegmentDecrement": 0.04})
for i in xrange(10):
self.feedTM(sequenceNoisy[i])
self.tm.mmClearHistory()
self._testTM(sequence)
predictedInactiveColumnsMetric = self.tm.mmGetMetricFromTrace(
self.tm.mmGetTracePredictedInactiveColumns())
predictedActiveColumnsMetric = self.tm.mmGetMetricFromTrace(
self.tm.mmGetTracePredictedActiveColumns())
predictedInactiveColumnsMeanOrphanDecay = predictedInactiveColumnsMetric.mean
predictedActiveColumnsMeanOrphanDecay = predictedActiveColumnsMetric.mean
self.assertGreater(predictedInactiveColumnsMeanNoOrphanDecay, 0)
self.assertGreater(predictedInactiveColumnsMeanNoOrphanDecay, predictedInactiveColumnsMeanOrphanDecay)
self.assertAlmostEqual(predictedActiveColumnsMeanNoOrphanDecay, predictedActiveColumnsMeanOrphanDecay)
# ==============================
# Overrides
# ==============================
def setUp(self):
super(ExtensiveTemporalMemoryTest, self).setUp()
print ("\n"
"======================================================\n"
"Test: {0} \n"
"{1}\n"
"======================================================\n"
).format(self.id(), self.shortDescription())
def feedTM(self, sequence, learn=True, num=1):
super(ExtensiveTemporalMemoryTest, self).feedTM(
sequence, learn=learn, num=num)
if self.VERBOSITY >= 2:
print self.tm.mmPrettyPrintTraces(
self.tm.mmGetDefaultTraces(verbosity=self.VERBOSITY-1))
print
if learn and self.VERBOSITY >= 3:
print self.tm.mmPrettyPrintConnections()
# ==============================
# Helper functions
# ==============================
def _testTM(self, sequence):
self.feedTM(sequence, learn=False)
print self.tm.mmPrettyPrintMetrics(self.tm.mmGetDefaultMetrics())
def assertAllActiveWerePredicted(self):
unpredictedActiveColumnsMetric = self.tm.mmGetMetricFromTrace(
self.tm.mmGetTraceUnpredictedActiveColumns())
predictedActiveColumnsMetric = self.tm.mmGetMetricFromTrace(
self.tm.mmGetTracePredictedActiveColumns())
self.assertEqual(unpredictedActiveColumnsMetric.sum, 0)
self.assertEqual(predictedActiveColumnsMetric.min, 21)
self.assertEqual(predictedActiveColumnsMetric.max, 25)
def assertAllInactiveWereUnpredicted(self):
predictedInactiveColumnsMetric = self.tm.mmGetMetricFromTrace(
self.tm.mmGetTracePredictedInactiveColumns())
self.assertEqual(predictedInactiveColumnsMetric.sum, 0)
def assertAllActiveWereUnpredicted(self):
unpredictedActiveColumnsMetric = self.tm.mmGetMetricFromTrace(
self.tm.mmGetTraceUnpredictedActiveColumns())
predictedActiveColumnsMetric = self.tm.mmGetMetricFromTrace(
self.tm.mmGetTracePredictedActiveColumns())
self.assertEqual(predictedActiveColumnsMetric.sum, 0)
self.assertEqual(unpredictedActiveColumnsMetric.min, 21)
self.assertEqual(unpredictedActiveColumnsMetric.max, 25)
| 22,199 | Python | .py | 457 | 43.170678 | 106 | 0.722276 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,162 | tm_test.py | numenta_nupic-legacy/tests/integration/nupic/algorithms/tm_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file performs a variety of tests on the reference temporal memory code.
basic_test
==========
Tests creation and serialization of the TM class. Sets parameters and ensures
they are the same after a serialization and de-serialization step. Runs learning
and inference on a small number of random patterns and ensures it doesn't crash.
===============================================================================
Basic First Order Sequences
===============================================================================
These tests ensure the most basic (first order) sequence learning mechanism is
working.
Parameters: Use a "fast learning mode": turn off global decay, temporal pooling
and hilo (make minThreshold really high). initPerm should be greater than
connectedPerm and permanenceDec should be zero. With these settings sequences
should be learned in one pass:
minThreshold = newSynapseCount
globalDecay = 0
temporalPooling = False
initialPerm = 0.8
connectedPerm = 0.7
permanenceDec = 0
permanenceInc = 0.4
Other Parameters:
numCols = 100
cellsPerCol = 1
newSynapseCount=11
activationThreshold = 8
permanenceMax = 1
Note: this is not a high order sequence, so one cell per column is fine.
Input Sequence: We train with M input sequences, each consisting of N random
patterns. Each pattern consists of a random number of bits on. The number of 1's
in each pattern should be between 21 and 25 columns. The sequences are
constructed so that consecutive patterns within a sequence don't share any
columns.
Training: The TM is trained with P passes of the M sequences. There
should be a reset between sequences. The total number of iterations during
training is P*N*M.
Testing: Run inference through the same set of sequences, with a reset before
each sequence. For each sequence the system should accurately predict the
pattern at the next time step up to and including the N-1'st pattern. A perfect
prediction consists of getting every column correct in the prediction, with no
extra columns. We report the number of columns that are incorrect and report a
failure if more than 2 columns are incorrectly predicted.
We can also calculate the number of segments and synapses that should be
learned. We raise an error if too many or too few were learned.
B1) Basic sequence learner. M=1, N=100, P=1.
B2) Same as above, except P=2. Test that permanences go up and that no
additional synapses or segments are learned.
B3) N=300, M=1, P=1. (See how high we can go with M)
B4) N=100, M=3, P=1 (See how high we can go with N*M)
B5) Like B1) but only have newSynapseCount columns ON in each pattern (instead of
between 21 and 25), and set activationThreshold to newSynapseCount.
B6) Like B1 but with cellsPerCol = 4. First order sequences should still work
just fine.
B7) Like B1 but with slower learning. Set the following parameters differently:
activationThreshold = newSynapseCount
minThreshold = activationThreshold
initialPerm = 0.2
connectedPerm = 0.7
permanenceInc = 0.2
Now we train the TM with the B1 sequence 4 times (P=4). This will increment
the permanences to be above 0.8 and at that point the inference will be correct.
This test will ensure the basic match function and segment activation rules are
working correctly.
B8) Like B7 but with 4 cells per column. Should still work.
B9) Like B7 but present the sequence less than 4 times: the inference should be
incorrect.
B10) Like B2, except that cells per column = 4. Should still add zero additional
synapses.
===============================================================================
High Order Sequences
===============================================================================
These tests ensure that high order sequences can be learned in a multiple cells
per column instantiation.
Parameters: Same as Basic First Order Tests above, but with varying cells per
column.
Input Sequence: We train with M input sequences, each consisting of N random
patterns. Each pattern consists of a random number of bits on. The number of 1's
in each pattern should be between 21 and 25 columns (except for H0). The
sequences are constructed so that consecutive patterns within a sequence don't
share any columns. The sequences are constructed to contain shared subsequences,
such as:
A B C D E F G H I J
K L M D E F N O P Q
The position and length of shared subsequences are parameters in the tests.
Training: Identical to basic first order tests above.
Testing: Identical to basic first order tests above unless noted.
We can also calculate the number of segments and synapses that should be
learned. We raise an error if too many or too few were learned.
H0) Two simple high order sequences, each of length 7, with a shared
subsequence in positions 2-4. Each pattern has a consecutive set of 5 bits on.
No pattern shares any columns with the others. These sequences are easy to
visualize and is very useful for debugging.
H1) Learn two sequences with a short shared pattern. Parameters
should be the same as B1. This test will FAIL since cellsPerCol == 1. No
consecutive patterns share any column.
H2) As above but with cellsPerCol == 4. This test should PASS. No consecutive
patterns share any column.
H2a) Same as above, except P=2. Test that permanences go up and that no
additional synapses or segments are learned.
H3) Same parameters as H.2 except sequences are created such that they share a
single significant sub-sequence. Subsequences should be reasonably long and in
the middle of sequences. No consecutive patterns share any column.
H4) Like H.3, except the shared subsequence is in the beginning. (e.g.
"ABCDEF" and "ABCGHIJ". At the point where the shared subsequence ends, all
possible next patterns should be predicted. As soon as you see the first unique
pattern, the predictions should collapse to be a perfect prediction.
H5) Shared patterns. Similar to H3 except that patterns are shared between
sequences. All sequences are different shufflings of the same set of N
patterns (there is no shared subsequence). Care should be taken such that the
same three patterns never follow one another in two sequences.
H6) Combination of H5) and H3). Shared patterns in different sequences, with a
shared subsequence.
H7) Stress test: every other pattern is shared. [Unimplemented]
H8) Start predicting in the middle of a sequence. [Unimplemented]
H9) Hub capacity. How many patterns can use that hub?
[Implemented, but does not run by default.]
H10) Sensitivity to small amounts of noise during inference. [Unimplemented]
H11) Higher order patterns with alternating elements.
Create the following 4 sequences:
A B A B A C
A B A B D E
A B F G H I
A J K L M N
After training we should verify that the expected transitions are in the
model. Prediction accuracy should be perfect. In addition, during inference,
after the first element is presented, the columns should not burst any more.
Need to verify, for the first sequence, that the high order representation
when presented with the second A and B is different from the representation
in the first presentation.
===============================================================================
Temporal Pooling Tests [UNIMPLEMENTED]
===============================================================================
Parameters: Use a "fast learning mode": With these settings sequences should be
learned in one pass:
minThreshold = newSynapseCount
globalDecay = 0
initialPerm = 0.8
connectedPerm = 0.7
permanenceDec = 0
permanenceInc = 0.4
Other Parameters:
cellsPerCol = 4
newSynapseCount=11
activationThreshold = 11
permanenceMax = 1
doPooling = True
Input Sequence: We train with M input sequences, each consisting of N random
patterns. Each pattern consists of a random number of bits on. The number of 1's
in each pattern should be between 17 and 21 columns. The sequences are
constructed so that consecutive patterns within a sequence don't share any
columns.
Note: for pooling tests the density of input patterns should be pretty low
since each pooling step increases the output density. At the same time, we need
enough bits on in the input for the temporal memory to find enough synapses. So,
for the tests, constraints should be something like:
(Input Density) * (Number of pooling steps) < 25 %.
AND
sum(Input) > newSynapseCount*1.5
Training: The TM is trained with P passes of the M sequences. There
should be a reset between sequences. The total number of iterations during
training is P*N*M.
Testing: Run inference through the same set of sequences, with a reset before
each sequence. For each sequence the system should accurately predict the
pattern at the next P time steps, up to and including the N-P'th pattern. A
perfect prediction consists of getting every column correct in the prediction,
with no extra columns. We report the number of columns that are incorrect and
report a failure if more than 2 columns are incorrectly predicted.
P1) Train the TM two times (P=2) on a single long sequence consisting of random
patterns (N=20, M=1). There should be no overlapping columns between successive
patterns. During inference, the TM should be able reliably predict the pattern
two time steps in advance. numCols should be about 350 to meet the above
constraints and also to maintain consistency with test P2.
P2) Increase TM rate to 3 time steps in advance (P=3). At each step during
inference, the TM should be able to reliably predict the pattern coming up at
t+1, t+2, and t+3..
P3) Set segUpdateValidDuration to 2 and set P=3. This should behave almost
identically to P1. It should only predict the next time step correctly and not
two time steps in advance. (Check off by one error in this logic.)
P4) As above, but with multiple sequences.
P5) Same as P3 but with shared subsequences.
Continuous mode tests
=====================
Slow changing inputs.
Orphan Decay Tests
==================
HiLo Tests
==========
A high order sequence memory like the TM can memorize very long sequences. In
many applications though you don't want to memorize. You see a long sequence of
patterns but there are actually lower order repeating sequences embedded within
it. A simplistic example is words in a sentence. Words such as You'd like the TM to learn those sequences.
Tests should capture number of synapses learned and compare against
theoretically optimal numbers to pass/fail.
HL0a) For debugging, similar to H0. We want to learn a 3 pattern long sequence presented
with noise before and after, with no resets. Two steps of noise will be presented.
The noise will be 20 patterns, presented in random order. Every pattern has a
consecutive set of 5 bits on, so the vector will be 115 bits long. No pattern
shares any columns with the others. These sequences are easy to visualize and is
very useful for debugging.
TM parameters should be the same as B7 except that permanenceDec should be 0.05:
activationThreshold = newSynapseCount
minThreshold = activationThreshold
initialPerm = 0.2
connectedPerm = 0.7
permanenceInc = 0.2
permanenceDec = 0.05
So, this means it should learn a sequence after 4 repetitions. It will take
4 orphan decay steps to get an incorrect synapse to go away completely.
HL0b) Like HL0a, but after the 3-sequence is learned, try to learn a 4-sequence that
builds on the 3-sequence. For example, if learning A-B-C we train also on
D-A-B-C. It should learn that ABC is separate from DABC. Note: currently this
test is disabled in the code. It is a bit tricky to test this. When you present DAB,
you should predict the same columns as when you present AB (i.e. in both cases
C should be predicted). However, the representation for C in DABC should be
different than the representation for C in ABC. Furthermore, when you present
AB, the representation for C should be an OR of the representation in DABC and ABC
since you could also be starting in the middle of the DABC sequence. All this is
actually happening in the code, but verified by visual inspection only.
HL1) Noise + sequence + noise + sequence repeatedly without resets until it has
learned that sequence. Train the TM repeatedly with N random sequences that all
share a single subsequence. Each random sequence can be 10 patterns long,
sharing a subsequence that is 5 patterns long. There should be no resets
between presentations. Inference should then be on that 5 long shared subsequence.
Example (3-long shared subsequence):
A B C D E F G H I J
K L M D E F N O P Q
R S T D E F U V W X
Y Z 1 D E F 2 3 4 5
TM parameters should be the same as HL0.
HL2) Like HL1, but after A B C has learned, try to learn D A B C . It should learn
ABC is separate from DABC.
HL3) Like HL2, but test with resets.
HL4) Like HL1 but with minThreshold high. This should FAIL and learn a ton
of synapses.
HiLo but with true high order sequences embedded in noise
Present 25 sequences in random order with no resets but noise between
sequences (1-20 samples). Learn all 25 sequences. Test global decay vs non-zero
permanenceDec .
Pooling + HiLo Tests [UNIMPLEMENTED]
====================
Needs to be defined.
Global Decay Tests [UNIMPLEMENTED]
==================
Simple tests to ensure global decay is actually working.
Sequence Likelihood Tests
=========================
These tests are in the file TMLikelihood.py
Segment Learning Tests [UNIMPLEMENTED]
======================
Multi-attribute sequence tests.
SL1) Train the TM repeatedly using a single (multiple) sequence plus noise. The
sequence can be relatively short, say 20 patterns. No two consecutive patterns
in the sequence should share columns. Add random noise each time a pattern is
presented. The noise should be different for each presentation and can be equal
to the number of on bits in the pattern. After N iterations of the noisy
sequences, the TM should should achieve perfect inference on the true sequence.
There should be resets between each presentation of the sequence.
Check predictions in the sequence only. And test with clean sequences.
Vary percentage of bits that are signal vs noise.
Noise can be a fixed alphabet instead of being randomly generated.
HL2) As above, but with no resets.
Shared Column Tests [UNIMPLEMENTED]
===================
Carefully test what happens when consecutive patterns in a sequence share
columns.
Sequence Noise Tests [UNIMPLEMENTED]
====================
Note: I don't think these will work with the current logic. Need to discuss
whether we want to accommodate sequence noise like this.
SN1) Learn sequence with pooling up to T timesteps. Run inference on a sequence
and occasionally drop elements of a sequence. Inference should still work.
SN2) As above, but occasionally add a random pattern into a sequence.
SN3) A combination of the above two.
Capacity Tests [UNIMPLEMENTED]
==============
These are stress tests that verify that the temporal memory can learn a large
number of sequences and can predict a large number of possible next steps. Some
research needs to be done first to understand the capacity of the system as it
relates to the number of columns, cells per column, etc.
Token Prediction Tests: Test how many predictions of individual tokens we can
superimpose and still recover.
Online Learning Tests [UNIMPLEMENTED]
=====================
These tests will verify that the temporal memory continues to work even if
sequence statistics (and the actual sequences) change slowly over time. The TM
should adapt to the changes and learn to recognize newer sequences (and forget
the older sequences?).
"""
import cPickle
import numpy
import pickle
import pprint
import random
import sys
from numpy import *
from nupic.algorithms import fdrutilities as fdrutils
from nupic.algorithms.backtracking_tm import BacktrackingTM
from nupic.algorithms.backtracking_tm_cpp import BacktrackingTMCPP
#---------------------------------------------------------------------------------
TEST_CPP_TM = 1 # temporarily disabled until it can be updated
VERBOSITY = 0 # how chatty the unit tests should be
SEED = 33 # the random seed used throughout
TMClass = BacktrackingTM
checkSynapseConsistency = False
rgen = numpy.random.RandomState(SEED) # always call this rgen, NOT random
#---------------------------------------------------------------------------------
# Helper routines
#--------------------------------------------------------------------------------
def printOneTrainingVector(x):
print ''.join('1' if k != 0 else '.' for k in x)
def printAllTrainingSequences(trainingSequences, upTo = 99999):
for t in xrange(min(len(trainingSequences[0]), upTo)):
print 't=',t,
for i,trainingSequence in enumerate(trainingSequences):
print "\tseq#",i,'\t',
printOneTrainingVector(trainingSequences[i][t])
def generatePattern(numCols = 100,
minOnes =21,
maxOnes =25,
colSet = [],
prevPattern =numpy.array([])):
"""Generate a single test pattern with given parameters.
Parameters:
--------------------------------------------
numCols: Number of columns in each pattern.
minOnes: The minimum number of 1's in each pattern.
maxOnes: The maximum number of 1's in each pattern.
colSet: The set of column indices for the pattern.
prevPattern: Pattern to avoid (null intersection).
"""
assert minOnes < maxOnes
assert maxOnes < numCols
nOnes = rgen.randint(minOnes, maxOnes)
candidates = list(colSet.difference(set(prevPattern.nonzero()[0])))
rgen.shuffle(candidates)
ind = candidates[:nOnes]
x = numpy.zeros(numCols, dtype='float32')
x[ind] = 1
return x
def buildTrainingSet(numSequences = 2,
sequenceLength = 100,
pctShared = 0.2,
seqGenMode = 'shared sequence',
subsequenceStartPos = 10,
numCols = 100,
minOnes=21,
maxOnes = 25,
disjointConsecutive =True):
"""Build random high order test sequences.
Parameters:
--------------------------------------------
numSequences: The number of sequences created.
sequenceLength: The length of each sequence.
pctShared: The percentage of sequenceLength that is shared across
every sequence. If sequenceLength is 100 and pctShared
is 0.2, then a subsequence consisting of 20 patterns
will be in every sequence. Can also be the keyword
'one pattern', in which case a single time step is shared.
seqGenMode: What kind of sequence to generate. If contains 'shared'
generates shared subsequence. If contains 'no shared',
does not generate any shared subsequence. If contains
'shuffle', will use common patterns shuffle among the
different sequences. If contains 'beginning', will
place shared subsequence at the beginning.
subsequenceStartPos: The position where the shared subsequence starts
numCols: Number of columns in each pattern.
minOnes: The minimum number of 1's in each pattern.
maxOnes: The maximum number of 1's in each pattern.
disjointConsecutive: Whether to generate disjoint consecutive patterns or not.
"""
# Calculate the set of column indexes once to be used in each call to generatePattern()
colSet = set(range(numCols))
if 'beginning' in seqGenMode:
assert 'shared' in seqGenMode and 'no shared' not in seqGenMode
if 'no shared' in seqGenMode or numSequences == 1:
pctShared = 0.0
#--------------------------------------------------------------------------------
# Build shared subsequence
if 'no shared' not in seqGenMode and 'one pattern' not in seqGenMode:
sharedSequenceLength = int(pctShared*sequenceLength)
elif 'one pattern' in seqGenMode:
sharedSequenceLength = 1
else:
sharedSequenceLength = 0
assert sharedSequenceLength + subsequenceStartPos < sequenceLength
sharedSequence = []
for i in xrange(sharedSequenceLength):
if disjointConsecutive and i > 0:
x = generatePattern(numCols, minOnes, maxOnes, colSet, sharedSequence[i-1])
else:
x = generatePattern(numCols, minOnes, maxOnes, colSet)
sharedSequence.append(x)
#--------------------------------------------------------------------------------
# Build random training set, splicing in the shared subsequence
trainingSequences = []
if 'beginning' not in seqGenMode:
trailingLength = sequenceLength - sharedSequenceLength - subsequenceStartPos
else:
trailingLength = sequenceLength - sharedSequenceLength
for k,s in enumerate(xrange(numSequences)):
# TODO: implement no repetitions
if len(trainingSequences) > 0 and 'shuffle' in seqGenMode:
r = range(subsequenceStartPos) \
+ range(subsequenceStartPos + sharedSequenceLength, sequenceLength)
rgen.shuffle(r)
r = r[:subsequenceStartPos] \
+ range(subsequenceStartPos, subsequenceStartPos + sharedSequenceLength) \
+ r[subsequenceStartPos:]
sequence = [trainingSequences[k-1][j] for j in r]
else:
sequence = []
if 'beginning' not in seqGenMode:
for i in xrange(subsequenceStartPos):
if disjointConsecutive and i > 0:
x = generatePattern(numCols, minOnes, maxOnes, colSet, sequence[i-1])
else:
x = generatePattern(numCols, minOnes, maxOnes, colSet)
sequence.append(x)
if 'shared' in seqGenMode and 'no shared' not in seqGenMode:
sequence.extend(sharedSequence)
for i in xrange(trailingLength):
if disjointConsecutive and i > 0:
x = generatePattern(numCols, minOnes, maxOnes, colSet, sequence[i-1])
else:
x = generatePattern(numCols, minOnes, maxOnes, colSet)
sequence.append(x)
assert len(sequence) == sequenceLength
trainingSequences.append(sequence)
assert len(trainingSequences) == numSequences
if VERBOSITY >= 2:
print "Training Sequences"
pprint.pprint(trainingSequences)
if sharedSequenceLength > 0:
return (trainingSequences, subsequenceStartPos + sharedSequenceLength)
else:
return (trainingSequences, -1)
def getSimplePatterns(numOnes, numPatterns):
"""Very simple patterns. Each pattern has numOnes consecutive
bits on. There are numPatterns*numOnes bits in the vector."""
numCols = numOnes * numPatterns
p = []
for i in xrange(numPatterns):
x = numpy.zeros(numCols, dtype='float32')
x[i*numOnes:(i+1)*numOnes] = 1
p.append(x)
return p
def buildSimpleTrainingSet(numOnes=5):
"""Two very simple high order sequences for debugging. Each pattern in the
sequence has a series of 1's in a specific set of columns."""
numPatterns = 11
p = getSimplePatterns(numOnes, numPatterns)
s1 = [p[0], p[1], p[2], p[3], p[4], p[5], p[6] ]
s2 = [p[7], p[8], p[2], p[3], p[4], p[9], p[10]]
trainingSequences = [s1, s2]
return (trainingSequences, 5)
def buildAlternatingTrainingSet(numOnes=5):
"""High order sequences that alternate elements. Pattern i has one's in
i*numOnes to (i+1)*numOnes.
The sequences are:
A B A B A C
A B A B D E
A B F G H I
A J K L M N
"""
numPatterns = 14
p = getSimplePatterns(numOnes, numPatterns)
s1 = [p[0], p[1], p[0], p[1], p[0], p[2]]
s2 = [p[0], p[1], p[0], p[1], p[3], p[4]]
s3 = [p[0], p[1], p[5], p[6], p[7], p[8]]
s4 = [p[0], p[9], p[10], p[11], p[12], p[13]]
trainingSequences = [s1, s2, s3, s4]
return (trainingSequences, 5)
def buildHL0aTrainingSet(numOnes=5):
"""Simple sequences for HL0. Each pattern in the sequence has a series of 1's
in a specific set of columns.
There are 23 patterns, p0 to p22.
The sequence we want to learn is p0->p1->p2
We create a very long sequence consisting of N N p0 p1 p2 N N p0 p1 p2
N is randomly chosen from p3 to p22
"""
numPatterns = 23
p = getSimplePatterns(numOnes, numPatterns)
s = []
s.append(p[rgen.randint(3,23)])
for i in xrange(20):
s.append(p[rgen.randint(3,23)])
s.append(p[0])
s.append(p[1])
s.append(p[2])
s.append(p[rgen.randint(3,23)])
return ([s], [[p[0], p[1], p[2]]])
def buildHL0bTrainingSet(numOnes=5):
"""Simple sequences for HL0b. Each pattern in the sequence has a series of 1's
in a specific set of columns.
There are 23 patterns, p0 to p22.
The sequences we want to learn are p1->p2->p3 and p0->p1->p2->p4.
We create a very long sequence consisting of these two sub-sequences
intermixed with noise, such as:
N N p0 p1 p2 p4 N N p1 p2 p3 N N p1 p2 p3
N is randomly chosen from p5 to p22
"""
numPatterns = 23
p = getSimplePatterns(numOnes, numPatterns)
s = []
s.append(p[rgen.randint(5,numPatterns)])
for i in xrange(50):
r = rgen.randint(5,numPatterns)
print r,
s.append(p[r])
if rgen.binomial(1, 0.5) > 0:
print "S1",
s.append(p[0])
s.append(p[1])
s.append(p[2])
s.append(p[4])
else:
print "S2",
s.append(p[1])
s.append(p[2])
s.append(p[3])
r = rgen.randint(5,numPatterns)
s.append(p[r])
print r,
print
return ([s], [ [p[0], p[1], p[2], p[4]], [p[1], p[2], p[3]] ])
# Basic test (creation, pickling, basic run of learning and inference)
def basicTest():
global TMClass, SEED, VERBOSITY, checkSynapseConsistency
#--------------------------------------------------------------------------------
# Create TM object
numberOfCols =10
cellsPerColumn =3
initialPerm =.2
connectedPerm =.8
minThreshold =2
newSynapseCount =5
permanenceInc =.1
permanenceDec =.05
permanenceMax =1
globalDecay =.05
activationThreshold =4 # low for those basic tests on purpose
doPooling =True
segUpdateValidDuration =5
seed =SEED
verbosity =VERBOSITY
tm = TMClass(numberOfCols, cellsPerColumn,
initialPerm, connectedPerm,
minThreshold, newSynapseCount,
permanenceInc, permanenceDec, permanenceMax,
globalDecay, activationThreshold,
doPooling, segUpdateValidDuration,
seed=seed, verbosity=verbosity,
pamLength = 1000,
checkSynapseConsistency=checkSynapseConsistency)
print "Creation ok"
#--------------------------------------------------------------------------------
# Save and reload
pickle.dump(tm, open("test_tm.pkl", "wb"))
tm2 = pickle.load(open("test_tm.pkl"))
assert tm2.numberOfCols == numberOfCols
assert tm2.cellsPerColumn == cellsPerColumn
print tm2.initialPerm
assert tm2.initialPerm == numpy.float32(.2)
assert tm2.connectedPerm == numpy.float32(.8)
assert tm2.minThreshold == minThreshold
assert tm2.newSynapseCount == newSynapseCount
assert tm2.permanenceInc == numpy.float32(.1)
assert tm2.permanenceDec == numpy.float32(.05)
assert tm2.permanenceMax == 1
assert tm2.globalDecay == numpy.float32(.05)
assert tm2.activationThreshold == activationThreshold
assert tm2.doPooling == doPooling
assert tm2.segUpdateValidDuration == segUpdateValidDuration
assert tm2.seed == SEED
assert tm2.verbosity == verbosity
print "Save/load ok"
#--------------------------------------------------------------------------------
# Learn
for i in xrange(5):
xi = rgen.randint(0,2,(numberOfCols))
x = numpy.array(xi, dtype="uint32")
y = tm.learn(x)
#--------------------------------------------------------------------------------
# Infer
patterns = rgen.randint(0,2,(4,numberOfCols))
for i in xrange(10):
xi = rgen.randint(0,2,(numberOfCols))
x = numpy.array(xi, dtype="uint32")
y = tm.infer(x)
if i > 0:
p = tm._checkPrediction([pattern.nonzero()[0] for pattern in patterns])
print "basicTest ok"
#---------------------------------------------------------------------------------
# Figure out acceptable patterns if none were passed to us.
def findAcceptablePatterns(tm, t, whichSequence, trainingSequences, nAcceptable = 1):
"""
Tries to infer the set of acceptable patterns for prediction at the given
time step and for the give sequence. Acceptable patterns are: the current one,
plus a certain number of patterns after timeStep, in the sequence that the TM
is currently tracking. Any other pattern is not acceptable.
TODO:
====
- Doesn't work for noise cases.
- Might run in trouble if shared subsequence at the beginning.
Parameters:
==========
tm the whole TM, so that we can look at its parameters
t the current time step
whichSequence the sequence we are currently tracking
trainingSequences all the training sequences
nAcceptable the number of steps forward from the current timeStep
we are willing to consider acceptable. In the case of
pooling, it is less than or equal to the min of the
number of training reps and the segUpdateValidDuration
parameter of the TM, depending on the test case.
The default value is 1, because by default, the pattern
after the current one should always be predictable.
Return value:
============
acceptablePatterns A list of acceptable patterns for prediction.
"""
# Determine how many steps forward we want to see in the prediction
upTo = t + 2 # always predict current and next
# If the TM is pooling, more steps can be predicted
if tm.doPooling:
upTo += min(tm.segUpdateValidDuration, nAcceptable)
assert upTo <= len(trainingSequences[whichSequence])
acceptablePatterns = []
# Check whether we were in a shared subsequence at the beginning.
# If so, at the point of exiting the shared subsequence (t), we should
# be predicting multiple patterns for 1 time step, then collapse back
# to a single sequence.
if len(trainingSequences) == 2 and \
(trainingSequences[0][0] == trainingSequences[1][0]).all():
if (trainingSequences[0][t] == trainingSequences[1][t]).all() \
and (trainingSequences[0][t+1] != trainingSequences[1][t+1]).any():
acceptablePatterns.append(trainingSequences[0][t+1])
acceptablePatterns.append(trainingSequences[1][t+1])
# Add patterns going forward
acceptablePatterns += [trainingSequences[whichSequence][t] \
for t in xrange(t,upTo)]
return acceptablePatterns
def _testSequence(trainingSequences,
nTrainingReps = 1,
numberOfCols = 40,
cellsPerColumn =5,
initialPerm =.8,
connectedPerm =.7,
minThreshold = 11,
newSynapseCount =5,
permanenceInc =.4,
permanenceDec =0.0,
permanenceMax =1,
globalDecay =0.0,
pamLength = 1000,
activationThreshold =5,
acceptablePatterns = [], # if empty, try to infer what they are
doPooling = False,
nAcceptable = -1, # if doPooling, number of acceptable steps
noiseModel = None,
noiseLevel = 0,
doResets = True,
shouldFail = False,
testSequences = None,
predJustAfterHubOnly = None,
compareToPy = False,
nMultiStepPrediction = 0,
highOrder = False):
"""Test a single set of sequences once and return the number of
prediction failures, the number of errors, and the number of perfect
predictions"""
global BacktrackingTM, SEED, checkSynapseConsistency, VERBOSITY
numPerfect = 0 # When every column is correct in the prediction
numStrictErrors = 0 # When at least one column is incorrect
numFailures = 0 # When > 2 columns are incorrect
sequenceLength = len(trainingSequences[0])
segUpdateValidDuration =5
verbosity = VERBOSITY
# override default maxSeqLEngth value for high-order sequences
if highOrder:
tm = TMClass(numberOfCols, cellsPerColumn,
initialPerm, connectedPerm,
minThreshold, newSynapseCount,
permanenceInc, permanenceDec, permanenceMax,
globalDecay, activationThreshold,
doPooling, segUpdateValidDuration,
seed=SEED, verbosity=verbosity,
checkSynapseConsistency=checkSynapseConsistency,
pamLength=pamLength,
maxSeqLength=0
)
else:
tm = TMClass(numberOfCols, cellsPerColumn,
initialPerm, connectedPerm,
minThreshold, newSynapseCount,
permanenceInc, permanenceDec, permanenceMax,
globalDecay, activationThreshold,
doPooling, segUpdateValidDuration,
seed=SEED, verbosity=verbosity,
checkSynapseConsistency=checkSynapseConsistency,
pamLength=pamLength
)
if compareToPy:
# override default maxSeqLEngth value for high-order sequences
if highOrder:
py_tm = BacktrackingTM(numberOfCols, cellsPerColumn,
initialPerm, connectedPerm,
minThreshold, newSynapseCount,
permanenceInc, permanenceDec, permanenceMax,
globalDecay, activationThreshold,
doPooling, segUpdateValidDuration,
seed=SEED, verbosity=verbosity,
pamLength=pamLength,
maxSeqLength=0
)
else:
py_tm = BacktrackingTM(numberOfCols, cellsPerColumn,
initialPerm, connectedPerm,
minThreshold, newSynapseCount,
permanenceInc, permanenceDec, permanenceMax,
globalDecay, activationThreshold,
doPooling, segUpdateValidDuration,
seed=SEED, verbosity=verbosity,
pamLength=pamLength,
)
trainingSequences = trainingSequences[0]
if testSequences == None: testSequences = trainingSequences
inferAcceptablePatterns = acceptablePatterns == []
#--------------------------------------------------------------------------------
# Learn
for r in xrange(nTrainingReps):
if VERBOSITY > 1:
print "============= Learning round",r,"================="
for sequenceNum, trainingSequence in enumerate(trainingSequences):
if VERBOSITY > 1:
print "============= New sequence ================="
if doResets:
tm.reset()
if compareToPy:
py_tm.reset()
for t,x in enumerate(trainingSequence):
if noiseModel is not None and \
'xor' in noiseModel and 'binomial' in noiseModel \
and 'training' in noiseModel:
noise_vector = rgen.binomial(len(x), noiseLevel, (len(x)))
x = logical_xor(x, noise_vector)
if VERBOSITY > 2:
print "Time step",t, "learning round",r, "sequence number", sequenceNum
print "Input: ",tm.printInput(x)
print "NNZ:", x.nonzero()
x = numpy.array(x).astype('float32')
y = tm.learn(x)
if compareToPy:
py_y = py_tm.learn(x)
if t % 25 == 0: # To track bugs, do that every iteration, but very slow
assert fdrutils.tmDiff(tm, py_tm, VERBOSITY) == True
if VERBOSITY > 3:
tm.printStates(printPrevious = (VERBOSITY > 4))
print
if VERBOSITY > 3:
print "Sequence finished. Complete state after sequence"
tm.printCells()
print
numPerfectAtHub = 0
if compareToPy:
print "End of training"
assert fdrutils.tmDiff(tm, py_tm, VERBOSITY) == True
#--------------------------------------------------------------------------------
# Infer
if VERBOSITY > 1: print "============= Inference ================="
for s,testSequence in enumerate(testSequences):
if VERBOSITY > 1: print "============= New sequence ================="
if doResets:
tm.reset()
if compareToPy:
py_tm.reset()
slen = len(testSequence)
for t,x in enumerate(testSequence):
# Generate noise (optional)
if noiseModel is not None and \
'xor' in noiseModel and 'binomial' in noiseModel \
and 'inference' in noiseModel:
noise_vector = rgen.binomial(len(x), noiseLevel, (len(x)))
x = logical_xor(x, noise_vector)
if VERBOSITY > 2: print "Time step",t, '\nInput:', tm.printInput(x)
x = numpy.array(x).astype('float32')
y = tm.infer(x)
if compareToPy:
py_y = py_tm.infer(x)
assert fdrutils.tmDiff(tm, py_tm, VERBOSITY) == True
# if t == predJustAfterHubOnly:
# z = sum(y, axis = 1)
# print '\t\t',
# print ''.join('.' if z[i] == 0 else '1' for i in xrange(len(z)))
if VERBOSITY > 3: tm.printStates(printPrevious = (VERBOSITY > 4),
printLearnState = False); print
if nMultiStepPrediction > 0:
y_ms = tm.predict(nSteps=nMultiStepPrediction)
if VERBOSITY > 3:
print "Multi step prediction at Time step", t
for i in range(nMultiStepPrediction):
print "Prediction at t+", i+1
tm.printColConfidence(y_ms[i])
# Error Checking
for i in range(nMultiStepPrediction):
predictedTimeStep = t+i+1
if predictedTimeStep < slen:
input = testSequence[predictedTimeStep].nonzero()[0]
prediction = y_ms[i].nonzero()[0]
foundInInput, totalActiveInInput, \
missingFromInput, totalActiveInPrediction = \
fdrutils.checkMatch(input, prediction, sparse=True)
falseNegatives = totalActiveInInput - foundInInput
falsePositives = missingFromInput
if VERBOSITY > 2:
print "Predition from %d to %d" % (t, t+i+1)
print "\t\tFalse Negatives:", falseNegatives
print "\t\tFalse Positivies:", falsePositives
if falseNegatives > 0 or falsePositives > 0:
numStrictErrors += 1
if falseNegatives > 0 and VERBOSITY > 1:
print "Multi step prediction from t=", t, "to t=", t+i+1,\
"false negative with error=",falseNegatives,
print "out of", totalActiveInInput,"ones"
if falsePositives > 0 and VERBOSITY > 1:
print "Multi step prediction from t=", t, "to t=", t+i+1,\
"false positive with error=",falsePositives,
print "out of",totalActiveInInput,"ones"
if falsePositives > 3 or falseNegatives > 3:
numFailures += 1
# Analyze the failure if we care about it
if VERBOSITY > 1 and not shouldFail:
print 'Input at t=', t
print '\t\t',; printOneTrainingVector(testSequence[t])
print 'Prediction for t=', t+i+1
print '\t\t',; printOneTrainingVector(y_ms[i])
print 'Actual input at t=', t+i+1
print '\t\t',; printOneTrainingVector(testSequence[t+i+1])
if t < slen-1:
# If no acceptable patterns were passed to us, we need to infer them
# for the current sequence and time step by looking at the testSequences.
# nAcceptable is used to reduce the number of automatically determined
# acceptable patterns.
if inferAcceptablePatterns:
acceptablePatterns = findAcceptablePatterns(tm, t, s, testSequences,
nAcceptable)
scores = tm._checkPrediction([pattern.nonzero()[0] \
for pattern in acceptablePatterns])
falsePositives, falseNegatives = scores[0], scores[1]
# We report an error if FN or FP is > 0.
# We report a failure if number of FN or number of FP is > 2 for any
# pattern. We also count the number of perfect predictions.
if falseNegatives > 0 or falsePositives > 0:
numStrictErrors += 1
if falseNegatives > 0 and VERBOSITY > 1:
print "Pattern",s,"time",t,\
"prediction false negative with error=",falseNegatives,
print "out of",int(testSequence[t+1].sum()),"ones"
if falsePositives > 0 and VERBOSITY > 1:
print "Pattern",s,"time",t,\
"prediction false positive with error=",falsePositives,
print "out of",int(testSequence[t+1].sum()),"ones"
if falseNegatives > 3 or falsePositives > 3:
numFailures += 1
# Analyze the failure if we care about it
if VERBOSITY > 1 and not shouldFail:
print 'Test sequences'
if len(testSequences) > 1:
printAllTrainingSequences(testSequences, t+1)
else:
print '\t\t',; printOneTrainingVector(testSequence[t])
print '\t\t',; printOneTrainingVector(testSequence[t+1])
print 'Acceptable'
for p in acceptablePatterns:
print '\t\t',; printOneTrainingVector(p)
print 'Output'
diagnostic = ''
output = sum(tm.currentOutput,axis=1)
print '\t\t',; printOneTrainingVector(output)
else:
numPerfect += 1
if predJustAfterHubOnly is not None and predJustAfterHubOnly == t:
numPerfectAtHub += 1
if predJustAfterHubOnly is None:
return numFailures, numStrictErrors, numPerfect, tm
else:
return numFailures, numStrictErrors, numPerfect, numPerfectAtHub, tm
def TestB1(numUniquePatterns, nTests, cellsPerColumn = 1, name = "B1"):
numCols = 100
sequenceLength = numUniquePatterns
nFailed = 0
for numSequences in [1]:
print "Test "+name+" (sequence memory - 1 repetition - 1 sequence)"
for k in range(nTests): # Test that configuration several times
trainingSet = buildTrainingSet(numSequences =numSequences,
sequenceLength = sequenceLength,
pctShared = 0.0,
subsequenceStartPos = 0,
numCols = numCols,
minOnes = 15, maxOnes = 20)
numFailures, numStrictErrors, numPerfect, tm = \
_testSequence(trainingSet,
nTrainingReps = 1,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 8,
newSynapseCount = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
activationThreshold = 8,
doPooling = False)
if numFailures == 0:
print "Test "+name+" ok"
else:
print "Test "+name+" failed"
nFailed = nFailed + 1
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return nFailed
def TestB7(numUniquePatterns, nTests, cellsPerColumn = 1, name = "B7"):
numCols = 100
sequenceLength = numUniquePatterns
nFailed = 0
for numSequences in [1]:
print "Test "+name+" (sequence memory - 4 repetition - 1 sequence - slow learning)"
for k in range(nTests): # Test that configuration several times
trainingSet = buildTrainingSet(numSequences =numSequences,
sequenceLength = sequenceLength,
pctShared = 0.0,
subsequenceStartPos = 0,
numCols = numCols,
minOnes = 15, maxOnes = 20)
numFailures, numStrictErrors, numPerfect, tm = \
_testSequence(trainingSet,
nTrainingReps = 4,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
minThreshold = 11,
newSynapseCount = 11,
activationThreshold = 11,
initialPerm = .2,
connectedPerm = .6,
permanenceInc = .2,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
doPooling = False)
if numFailures == 0:
print "Test "+name+" ok"
else:
print "Test "+name+" failed"
nFailed = nFailed + 1
print "numFailures=", numFailures,
print "numStrictErrors=", numStrictErrors,
print "numPerfect=", numPerfect
return nFailed
def TestB2(numUniquePatterns, nTests, cellsPerColumn = 1, name = "B2"):
numCols = 100
sequenceLength = numUniquePatterns
nFailed = 0
for numSequences in [1]: # TestC has multiple sequences
print "Test",name,"(sequence memory - second repetition of the same sequence" +\
" should not add synapses)"
print "Num patterns in sequence =", numUniquePatterns,
print "cellsPerColumn=",cellsPerColumn
for k in range(nTests): # Test that configuration several times
trainingSet = buildTrainingSet(numSequences =numSequences,
sequenceLength = sequenceLength,
pctShared = 0.0,
subsequenceStartPos = 0,
numCols = numCols,
minOnes = 15, maxOnes = 20)
# Do one pass through the training set
numFailures1, numStrictErrors1, numPerfect1, tm1 = \
_testSequence(trainingSet,
nTrainingReps = 1,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 8,
newSynapseCount = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
activationThreshold = 8)
# Do two passes through the training set
numFailures, numStrictErrors, numPerfect, tm2 = \
_testSequence(trainingSet,
nTrainingReps = 2,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 8,
newSynapseCount = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
activationThreshold = 8)
# Check that training with a second pass did not result in more synapses
segmentInfo1 = tm1.getSegmentInfo()
segmentInfo2 = tm2.getSegmentInfo()
if (segmentInfo1[0] != segmentInfo2[0]) or \
(segmentInfo1[1] != segmentInfo2[1]) :
print "Training twice incorrectly resulted in more segments or synapses"
print "Number of segments: ", segmentInfo1[0], segmentInfo2[0]
numFailures += 1
if numFailures == 0:
print "Test",name,"ok"
else:
print "Test",name,"failed"
nFailed = nFailed + 1
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return nFailed
def TestB3(numUniquePatterns, nTests):
numCols = 100
sequenceLength = numUniquePatterns
nFailed = 0
for numSequences in [2,5]:
print "Test B3 (sequence memory - 2 repetitions -", numSequences, "sequences)"
for k in range(nTests): # Test that configuration several times
trainingSet = buildTrainingSet(numSequences =numSequences,
sequenceLength = sequenceLength,
pctShared = 0.0,
subsequenceStartPos = 0,
numCols = numCols,
minOnes = 15, maxOnes = 20)
numFailures, numStrictErrors, numPerfect, tm = \
_testSequence(trainingSet,
nTrainingReps = 2,
numberOfCols = numCols,
cellsPerColumn = 4,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 11,
activationThreshold = 8,
doPooling = False)
if numFailures == 0:
print "Test B3 ok"
else:
print "Test B3 failed"
nFailed = nFailed + 1
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return nFailed
def TestH0(numOnes = 5,nMultiStepPrediction=0):
cellsPerColumn = 4
print "Higher order test 0 with cellsPerColumn=",cellsPerColumn
trainingSet = buildSimpleTrainingSet(numOnes)
numFailures, numStrictErrors, numPerfect, tm = \
_testSequence(trainingSet,
nTrainingReps = 20,
numberOfCols = trainingSet[0][0][0].size,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 6,
permanenceInc = .4,
permanenceDec = .2,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 5,
activationThreshold = 4,
doPooling = False,
nMultiStepPrediction=nMultiStepPrediction)
if numFailures == 0 and \
numStrictErrors == 0 and \
numPerfect == len(trainingSet[0])*(len(trainingSet[0][0]) - 1):
print "Test PASS"
return 0
else:
print "Test FAILED"
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return 1
def TestH(sequenceLength, nTests, cellsPerColumn, numCols =100, nSequences =[2],
pctShared = 0.1, seqGenMode = 'shared sequence', nTrainingReps = 2,
shouldFail = False, compareToPy = False, highOrder = False):
nFailed = 0
subsequenceStartPos = 10
assert subsequenceStartPos < sequenceLength
for numSequences in nSequences:
print "Higher order test with sequenceLength=",sequenceLength,
print "cellsPerColumn=",cellsPerColumn,"nTests=",nTests,
print "numSequences=",numSequences, "pctShared=", pctShared
for k in range(nTests): # Test that configuration several times
trainingSet = buildTrainingSet(numSequences = numSequences,
sequenceLength = sequenceLength,
pctShared = pctShared, seqGenMode = seqGenMode,
subsequenceStartPos = subsequenceStartPos,
numCols = numCols,
minOnes = 21, maxOnes = 25)
numFailures, numStrictErrors, numPerfect, tm = \
_testSequence(trainingSet,
nTrainingReps = nTrainingReps,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 12,
permanenceInc = .4,
permanenceDec = .1,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 11,
activationThreshold = 8,
doPooling = False,
shouldFail = shouldFail,
compareToPy = compareToPy,
highOrder = highOrder)
if numFailures == 0 and not shouldFail \
or numFailures > 0 and shouldFail:
print "Test PASS",
if shouldFail:
print '(should fail, and failed)'
else:
print
else:
print "Test FAILED"
nFailed = nFailed + 1
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return nFailed
def TestH11(numOnes = 3):
cellsPerColumn = 4
print "Higher order test 11 with cellsPerColumn=",cellsPerColumn
trainingSet = buildAlternatingTrainingSet(numOnes= 3)
numFailures, numStrictErrors, numPerfect, tm = \
_testSequence(trainingSet,
nTrainingReps = 1,
numberOfCols = trainingSet[0][0][0].size,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 6,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 1,
activationThreshold = 1,
doPooling = False)
if numFailures == 0 and \
numStrictErrors == 0 and \
numPerfect == len(trainingSet[0])*(len(trainingSet[0][0]) - 1):
print "Test PASS"
return 0
else:
print "Test FAILED"
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return 1
def TestH2a(sequenceLength, nTests, cellsPerColumn, numCols =100, nSequences =[2],
pctShared = 0.02, seqGenMode = 'shared sequence',
shouldFail = False):
"""
Still need to test:
Two overlapping sequences. OK to get new segments but check that we can
get correct high order prediction after multiple reps.
"""
print "Test H2a - second repetition of the same sequence should not add synapses"
nFailed = 0
subsequenceStartPos = 10
assert subsequenceStartPos < sequenceLength
for numSequences in nSequences:
print "Higher order test with sequenceLength=",sequenceLength,
print "cellsPerColumn=",cellsPerColumn,"nTests=",nTests,"numCols=", numCols
print "numSequences=",numSequences, "pctShared=", pctShared,
print "sharing mode=", seqGenMode
for k in range(nTests): # Test that configuration several times
trainingSet = buildTrainingSet(numSequences = numSequences,
sequenceLength = sequenceLength,
pctShared = pctShared, seqGenMode = seqGenMode,
subsequenceStartPos = subsequenceStartPos,
numCols = numCols,
minOnes = 21, maxOnes = 25)
print "============== 10 ======================"
numFailures3, numStrictErrors3, numPerfect3, tm3 = \
_testSequence(trainingSet,
nTrainingReps = 10,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .4,
connectedPerm = .7,
minThreshold = 12,
permanenceInc = .1,
permanenceDec = 0.1,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 15,
activationThreshold = 12,
doPooling = False,
shouldFail = shouldFail)
print "============== 2 ======================"
numFailures, numStrictErrors, numPerfect, tm2 = \
_testSequence(trainingSet,
nTrainingReps = 2,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 12,
permanenceInc = .1,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 15,
activationThreshold = 12,
doPooling = False,
shouldFail = shouldFail)
print "============== 1 ======================"
numFailures1, numStrictErrors1, numPerfect1, tm1 = \
_testSequence(trainingSet,
nTrainingReps = 1,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 12,
permanenceInc = .1,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 15,
activationThreshold = 12,
doPooling = False,
shouldFail = shouldFail)
# Check that training with a second pass did not result in more synapses
segmentInfo1 = tm1.getSegmentInfo()
segmentInfo2 = tm2.getSegmentInfo()
if (abs(segmentInfo1[0] - segmentInfo2[0]) > 3) or \
(abs(segmentInfo1[1] - segmentInfo2[1]) > 3*15) :
print "Training twice incorrectly resulted in too many segments or synapses"
print segmentInfo1
print segmentInfo2
print tm3.getSegmentInfo()
tm3.trimSegments()
print tm3.getSegmentInfo()
print "Failures for 1, 2, and N reps"
print numFailures1, numStrictErrors1, numPerfect1
print numFailures, numStrictErrors, numPerfect
print numFailures3, numStrictErrors3, numPerfect3
numFailures += 1
if numFailures == 0 and not shouldFail \
or numFailures > 0 and shouldFail:
print "Test PASS",
if shouldFail:
print '(should fail, and failed)'
else:
print
else:
print "Test FAILED"
nFailed = nFailed + 1
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return nFailed
def TestP(sequenceLength, nTests, cellsPerColumn, numCols =300, nSequences =[2],
pctShared = 0.1, seqGenMode = 'shared subsequence', nTrainingReps = 2):
nFailed = 0
newSynapseCount = 7
activationThreshold = newSynapseCount - 2
minOnes = 1.5 * newSynapseCount
maxOnes = .3 * numCols / nTrainingReps
for numSequences in nSequences:
print "Pooling test with sequenceLength=",sequenceLength,
print 'numCols=', numCols,
print "cellsPerColumn=",cellsPerColumn,"nTests=",nTests,
print "numSequences=",numSequences, "pctShared=", pctShared,
print "nTrainingReps=", nTrainingReps, "minOnes=", minOnes,
print "maxOnes=", maxOnes
for k in range(nTests): # Test that configuration several times
minOnes = 1.5 * newSynapseCount
trainingSet = buildTrainingSet(numSequences =numSequences,
sequenceLength = sequenceLength,
pctShared = pctShared, seqGenMode = seqGenMode,
subsequenceStartPos = 10,
numCols = numCols,
minOnes = minOnes, maxOnes = maxOnes)
numFailures, numStrictErrors, numPerfect, tm = \
_testSequence(trainingSet,
nTrainingReps = nTrainingReps,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = newSynapseCount,
activationThreshold = activationThreshold,
doPooling = True)
if numFailures == 0 and \
numStrictErrors == 0 and \
numPerfect == numSequences*(sequenceLength - 1):
print "Test PASS"
else:
print "Test FAILED"
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
nFailed = nFailed + 1
return nFailed
def TestHL0a(numOnes = 5):
cellsPerColumn = 4
newSynapseCount = 5
activationThreshold = newSynapseCount
print "HiLo test 0a with cellsPerColumn=",cellsPerColumn
trainingSet, testSet = buildHL0aTrainingSet()
numCols = trainingSet[0][0].size
numFailures, numStrictErrors, numPerfect, tm = \
_testSequence([trainingSet],
nTrainingReps = 1,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .2,
connectedPerm = .7,
permanenceInc = .2,
permanenceDec = 0.05,
permanenceMax = 1,
globalDecay = .0,
minThreshold = activationThreshold,
newSynapseCount = newSynapseCount,
activationThreshold = activationThreshold,
pamLength = 2,
doPooling = False,
testSequences = testSet)
tm.trimSegments()
retAfter = tm.getSegmentInfo()
print retAfter[0], retAfter[1]
if retAfter[0] > 20:
print "Too many segments"
numFailures += 1
if retAfter[1] > 100:
print "Too many synapses"
numFailures += 1
if numFailures == 0:
print "Test HL0a ok"
return 0
else:
print "Test HL0a failed"
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return 1
def TestHL0b(numOnes = 5):
cellsPerColumn = 4
newSynapseCount = 5
activationThreshold = newSynapseCount
print "HiLo test 0b with cellsPerColumn=",cellsPerColumn
trainingSet, testSet = buildHL0bTrainingSet()
numCols = trainingSet[0][0].size
print "numCols=", numCols
numFailures, numStrictErrors, numPerfect, tm = \
_testSequence([trainingSet],
nTrainingReps = 1,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .2,
connectedPerm = .7,
permanenceInc = .2,
permanenceDec = 0.05,
permanenceMax = 1,
globalDecay = .0,
minThreshold = activationThreshold,
newSynapseCount = newSynapseCount,
activationThreshold = activationThreshold,
doPooling = False,
testSequences = testSet)
tm.trimSegments()
retAfter = tm.getSegmentInfo()
tm.printCells()
if numFailures == 0:
print "Test HL0 ok"
return 0
else:
print "Test HL0 failed"
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return 1
def TestHL(sequenceLength, nTests, cellsPerColumn, numCols =200, nSequences =[2],
pctShared = 0.1, seqGenMode = 'shared subsequence', nTrainingReps = 3,
noiseModel = 'xor binomial in learning only', noiseLevel = 0.1,
hiloOn = True):
nFailed = 0
newSynapseCount = 8
activationThreshold = newSynapseCount
minOnes = 1.5 * newSynapseCount
maxOnes = 0.3 * numCols / nTrainingReps
if hiloOn == False:
minThreshold = 0.9
for numSequences in nSequences:
print "Hilo test with sequenceLength=", sequenceLength,
print "cellsPerColumn=", cellsPerColumn, "nTests=", nTests,
print "numSequences=", numSequences, "pctShared=", pctShared,
print "nTrainingReps=", nTrainingReps, "minOnes=", minOnes,
print "maxOnes=", maxOnes,
print 'noiseModel=', noiseModel, 'noiseLevel=', noiseLevel
for k in range(nTests): # Test that configuration several times
minOnes = 1.5 * newSynapseCount
trainingSet = buildTrainingSet(numSequences =numSequences,
sequenceLength = sequenceLength,
pctShared = pctShared, seqGenMode = seqGenMode,
subsequenceStartPos = 10,
numCols = numCols,
minOnes = minOnes, maxOnes = maxOnes)
numFailures, numStrictErrors, numPerfect, tm = \
_testSequence(trainingSet,
nTrainingReps = nTrainingReps,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .2,
connectedPerm = .7,
minThreshold = activationThreshold,
newSynapseCount = newSynapseCount,
activationThreshold = activationThreshold,
permanenceInc = .2,
permanenceDec = 0.05,
permanenceMax = 1,
globalDecay = .0,
doPooling = False,
noiseModel = noiseModel,
noiseLevel = noiseLevel)
if numFailures == 0 and \
numStrictErrors == 0 and \
numPerfect == numSequences*(sequenceLength - 1):
print "Test PASS"
else:
print "Test FAILED"
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
nFailed = nFailed + 1
return nFailed
def worker(x):
"""Worker function to use in parallel hub capacity test below."""
cellsPerColumn, numSequences = x[0], x[1]
nTrainingReps = 1
sequenceLength = 10
numCols = 200
print 'Started', cellsPerColumn, numSequences
seqGenMode = 'shared subsequence, one pattern'
subsequenceStartPos = 5
trainingSet = buildTrainingSet(numSequences = numSequences,
sequenceLength = sequenceLength,
pctShared = .1, seqGenMode = seqGenMode,
subsequenceStartPos = subsequenceStartPos,
numCols = numCols,
minOnes = 21, maxOnes = 25)
numFailures1, numStrictErrors1, numPerfect1, atHub, tm = \
_testSequence(trainingSet,
nTrainingReps = nTrainingReps,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 8,
activationThreshold = 8,
doPooling = False,
shouldFail = False,
predJustAfterHubOnly = 5)
seqGenMode = 'no shared subsequence'
trainingSet = buildTrainingSet(numSequences = numSequences,
sequenceLength = sequenceLength,
pctShared = 0, seqGenMode = seqGenMode,
subsequenceStartPos = 0,
numCols = numCols,
minOnes = 21, maxOnes = 25)
numFailures2, numStrictErrors2, numPerfect2, tm = \
_testSequence(trainingSet,
nTrainingReps = nTrainingReps,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 8,
activationThreshold = 8,
doPooling = False,
shouldFail = False)
print 'Completed',
print cellsPerColumn, numSequences, numFailures1, numStrictErrors1, numPerfect1, atHub, \
numFailures2, numStrictErrors2, numPerfect2
return cellsPerColumn, numSequences, numFailures1, numStrictErrors1, numPerfect1, atHub, \
numFailures2, numStrictErrors2, numPerfect2
def hubCapacity():
"""
Study hub capacity. Figure out how many sequences can share a pattern
for a given number of cells per column till we the system fails.
DON'T RUN IN BUILD SYSTEM!!! (takes too long)
"""
from multiprocessing import Pool
import itertools
print "Hub capacity test"
# scalar value on predictions by looking at max perm over column
p = Pool(2)
results = p.map(worker, itertools.product([1,2,3,4,5,6,7,8], xrange(1,2000,200)))
f = open('results-numPerfect.11.22.10.txt', 'w')
for i,r in enumerate(results):
print >>f, '{%d,%d,%d,%d,%d,%d,%d,%d,%d},' % r
f.close()
def runTests(testLength = "short"):
# Data structure to collect results of tests
# TODO: put numFailures, numStrictErrors and numPerfect in here for reporting
tests = {}
# always run this one: if that one fails, we can't do anything
basicTest()
print
#---------------------------------------------------------------------------------
if testLength == "long":
tests['B1'] = TestB1(numUniquePatterns, nTests)
tests['B2'] = TestB2(numUniquePatterns, nTests)
tests['B8'] = TestB7(4, nTests, cellsPerColumn = 4, name="B8")
tests['B10'] = TestB2(numUniquePatterns, nTests, cellsPerColumn = 4,
name = "B10")
# Run these always
tests['B3'] = TestB3(numUniquePatterns, nTests)
tests['B6'] = TestB1(numUniquePatterns, nTests,
cellsPerColumn = 4, name="B6")
tests['B7'] = TestB7(numUniquePatterns, nTests)
print
#---------------------------------------------------------------------------------
#print "Test H11"
#tests['H11'] = TestH11()
if True:
print "Test H0"
tests['H0'] = TestH0(numOnes = 5)
print "Test H2"
#tests['H2'] = TestH(numUniquePatterns, nTests, cellsPerColumn = 4,
# nTrainingReps = numUniquePatterns, compareToPy = False)
print "Test H3"
tests['H3'] = TestH(numUniquePatterns, nTests,
numCols = 200,
cellsPerColumn = 20,
pctShared = 0.3, nTrainingReps=numUniquePatterns,
compareToPy = False,
highOrder = True)
print "Test H4" # Produces 3 false positives, but otherwise fine.
# TODO: investigate initial false positives?
tests['H4'] = TestH(numUniquePatterns, nTests,
cellsPerColumn = 20,
pctShared = 0.1,
seqGenMode='shared subsequence at beginning')
if True:
print "Test H0 with multistep prediction"
tests['H0_MS'] = TestH0(numOnes = 5, nMultiStepPrediction=2)
if True:
print "Test H1" # - Should Fail
tests['H1'] = TestH(numUniquePatterns, nTests,
cellsPerColumn = 1, nTrainingReps = 1,
shouldFail = True)
# Also fails in --long mode. See H2 above
#print "Test H2a"
#tests['H2a'] = TestH2a(numUniquePatterns,
# nTests, pctShared = 0.02, numCols = 300, cellsPerColumn = 4)
if False:
print "Test H5" # make sure seqs are good even with shuffling, fast learning
tests['H5'] = TestH(numUniquePatterns, nTests,
cellsPerColumn = 10,
pctShared = 0.0,
seqGenMode='shuffle, no shared subsequence')
print "Test H6" # should work
tests['H6'] = TestH(numUniquePatterns, nTests,
cellsPerColumn = 10,
pctShared = 0.4,
seqGenMode='shuffle, shared subsequence')
# Try with 2 sequences, then 3 sequences interleaved so that there is
# always a shared pattern, but it belongs to 2 different sequences each
# time!
#print "Test H7"
#tests['H7'] = TestH(numUniquePatterns, nTests,
# cellsPerColumn = 10,
# pctShared = 0.4,
# seqGenMode='shuffle, shared subsequence')
# tricky: if start predicting in middle of subsequence, several predictions
# are possible
#print "Test H8"
#tests['H8'] = TestH(numUniquePatterns, nTests,
# cellsPerColumn = 10,
# pctShared = 0.4,
# seqGenMode='shuffle, shared subsequence')
print "Test H9" # plot hub capacity
tests['H9'] = TestH(numUniquePatterns, nTests,
cellsPerColumn = 10,
pctShared = 0.4,
seqGenMode='shuffle, shared subsequence')
#print "Test H10" # plot
#tests['H10'] = TestH(numUniquePatterns, nTests,
# cellsPerColumn = 10,
# pctShared = 0.4,
# seqGenMode='shuffle, shared subsequence')
print
#---------------------------------------------------------------------------------
if False:
print "Test P1"
tests['P1'] = TestP(numUniquePatterns, nTests,
cellsPerColumn = 4,
pctShared = 0.0,
seqGenMode = 'no shared subsequence',
nTrainingReps = 3)
if False:
print "Test P2"
tests['P2'] = TestP(numUniquePatterns, nTests,
cellsPerColumn = 4,
pctShared = 0.0,
seqGenMode = 'no shared subsequence',
nTrainingReps = 5)
print "Test P3"
tests['P3'] = TestP(numUniquePatterns, nTests,
cellsPerColumn = 4,
pctShared = 0.0,
seqGenMode = 'no shared subsequence',
nSequences = [2] if testLength == 'short' else [2,5],
nTrainingReps = 5)
print "Test P4"
tests['P4'] = TestP(numUniquePatterns, nTests,
cellsPerColumn = 4,
pctShared = 0.0,
seqGenMode = 'shared subsequence',
nSequences = [2] if testLength == 'short' else [2,5],
nTrainingReps = 5)
print
#---------------------------------------------------------------------------------
if True:
print "Test HL0a"
tests['HL0a'] = TestHL0a(numOnes = 5)
if False:
print "Test HL0b"
tests['HL0b'] = TestHL0b(numOnes = 5)
print "Test HL1"
tests['HL1'] = TestHL(sequenceLength = 20,
nTests = nTests,
numCols = 100,
nSequences = [1],
nTrainingReps = 3,
cellsPerColumn = 1,
seqGenMode = 'no shared subsequence',
noiseModel = 'xor binomial in learning only',
noiseLevel = 0.1,
doResets = False)
print "Test HL2"
tests['HL2'] = TestHL(numUniquePatterns = 20,
nTests = nTests,
numCols = 200,
nSequences = [1],
nTrainingReps = 3,
cellsPerColumn = 1,
seqGenMode = 'no shared subsequence',
noiseModel = 'xor binomial in learning only',
noiseLevel = 0.1,
doResets = False)
print "Test HL3"
tests['HL3'] = TestHL(numUniquePatterns = 30,
nTests = nTests,
numCols = 200,
nSequences = [2],
pctShared = 0.66,
nTrainingReps = 3,
cellsPerColumn = 1,
seqGenMode = 'shared subsequence',
noiseModel = None,
noiseLevel = 0.0,
doResets = True)
print "Test HL4"
tests['HL4'] = TestHL(numUniquePatterns = 30,
nTests = nTests,
numCols = 200,
nSequences = [2],
pctShared = 0.66,
nTrainingReps = 3,
cellsPerColumn = 1,
seqGenMode = 'shared subsequence',
noiseModel = None,
noiseLevel = 0.0,
doResets = False)
print "Test HL5"
tests['HL5'] = TestHL(numUniquePatterns = 30,
nTests = nTests,
numCols = 200,
nSequences = [2],
pctShared = 0.66,
nTrainingReps = 3,
cellsPerColumn = 1,
seqGenMode = 'shared subsequence',
noiseModel = 'xor binomial in learning only',
noiseLevel = 0.1,
doResets = False)
print "Test HL6"
tests['HL6'] = nTests - TestHL(numUniquePatterns = 20,
nTests = nTests,
numCols = 200,
nSequences = [1],
nTrainingReps = 3,
cellsPerColumn = 1,
seqGenMode = 'no shared subsequence',
noiseModel = 'xor binomial in learning only',
noiseLevel = 0.1,
doResets = True,
hiloOn = False)
print
#---------------------------------------------------------------------------------
nFailures = 0
for k,v in tests.iteritems():
nFailures = nFailures + v
if nFailures > 0: # 1 to account for H1
print "There are failed tests"
print "Test\tn failures"
for k,v in tests.iteritems():
print k, "\t", v
assert 0
else:
print "All tests pass"
#---------------------------------------------------------------------------------
# Keep
if False:
import hotshot, hotshot.stats
prof = hotshot.Profile("profile.prof")
prof.runcall(TestB2, numUniquePatterns=100, nTests=2)
prof.close()
stats = hotshot.stats.load("profile.prof")
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(50)
if __name__=="__main__":
if not TEST_CPP_TM:
print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
print "!! WARNING: C++ TM testing is DISABLED until it can be updated."
print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
# Three different test lengths are passed in through the command line.
# Developer tests use --short. Autobuild does not pass in anything.
# Acceptance tests pass in --long. testLength reflects these possibilities
# as "autobuild", "short", and "long"
testLength = "autobuild"
# Scan command line arguments to see what to do for the seed
# TODO: make default be a random seed, once we're sure it will pass reliably!
for i,arg in enumerate(sys.argv):
if 'seed' in arg:
try:
# used specified seed
SEED = int(sys.argv[i+1])
except ValueError as e:
# random seed
SEED = numpy.random.randint(100)
if 'verbosity' in arg:
VERBOSITY = int(sys.argv[i+1])
if 'help' in arg:
print "TMTest.py --short|long --seed number|'rand' --verbosity number"
sys.exit()
if "short" in arg:
testLength = "short"
if "long" in arg:
testLength = "long"
rgen = numpy.random.RandomState(SEED) # always call this rgen, NOT random
# Setup the severity and length of the tests
if testLength == "short":
numUniquePatterns = 50
nTests = 1
elif testLength == "autobuild":
print "Running autobuild tests"
numUniquePatterns = 50
nTests = 1
elif testLength == "long":
numUniquePatterns = 100
nTests = 3
print "TM tests", testLength, "numUniquePatterns=", numUniquePatterns, "nTests=", nTests,
print "seed=", SEED
print
if testLength == "long":
print 'Testing Python TM'
TMClass = BacktrackingTM
runTests(testLength)
if testLength != 'long':
checkSynapseConsistency = False
else:
# Setting this to True causes test to take way too long
# Temporarily turned off so we can investigate
checkSynapseConsistency = False
if TEST_CPP_TM:
print 'Testing C++ TM'
TMClass = BacktrackingTMCPP
runTests(testLength)
| 88,330 | Python | .py | 1,834 | 35.529989 | 107 | 0.568193 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,163 | tm_overlapping_sequences_test.py | numenta_nupic-legacy/tests/integration/nupic/algorithms/tm_overlapping_sequences_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Overlapping sequences test
===========================
Test learning of sequences with shared (overlapping) subsequences.
Test 1 - Test with fast learning, make sure PAM allows us to train with fewer
repeats of the training data.
Test 2 - Test with slow learning, make sure PAM allows us to train with fewer
repeats of the training data.
Test 3 - Test with slow learning, some overlap in the patterns, and TM
thresholds of 80% of newSynapseCount
Test 4 - Test with "Forbes-like" data. A bunch of sequences of lengths between 2
and 10 elements long.
"""
import numpy
import pprint
import random
import sys
import unittest2 as unittest
from optparse import OptionParser
from nupic.algorithms import fdrutilities as fdrutils
from nupic.algorithms.backtracking_tm import BacktrackingTM
from nupic.algorithms.backtracking_tm_cpp import BacktrackingTMCPP
from nupic.support.unittesthelpers import testcasebase
VERBOSITY = 0 # how chatty the unit tests should be
SEED = 35 # the random seed used throughout
# Whether to only run the short tests.
SHORT = True
# If set to 0 the CPP TM will not be tested
INCLUDE_CPP_TM = 1 # Also test with CPP TM
def printOneTrainingVector(x):
"Print a single vector succinctly."
print ''.join('1' if k != 0 else '.' for k in x)
def printAllTrainingSequences(trainingSequences, upTo = 99999):
for i,trainingSequence in enumerate(trainingSequences):
print "============= Sequence",i,"================="
for j,pattern in enumerate(trainingSequence):
printOneTrainingVector(pattern)
def getSimplePatterns(numOnes, numPatterns, patternOverlap=0):
"""Very simple patterns. Each pattern has numOnes consecutive
bits on. The amount of overlap between consecutive patterns is
configurable, via the patternOverlap parameter.
Parameters:
-----------------------------------------------------------------------
numOnes: Number of bits ON in each pattern
numPatterns: Number of unique patterns to generate
patternOverlap: Number of bits of overlap between each successive pattern
retval: patterns
"""
assert (patternOverlap < numOnes)
# How many new bits are introduced in each successive pattern?
numNewBitsInEachPattern = numOnes - patternOverlap
numCols = numNewBitsInEachPattern * numPatterns + patternOverlap
p = []
for i in xrange(numPatterns):
x = numpy.zeros(numCols, dtype='float32')
startBit = i*numNewBitsInEachPattern
nextStartBit = startBit + numOnes
x[startBit:nextStartBit] = 1
p.append(x)
return p
def buildOverlappedSequences( numSequences = 2,
seqLen = 5,
sharedElements = [3,4],
numOnBitsPerPattern = 3,
patternOverlap = 0,
seqOverlap = 0,
**kwargs
):
""" Create training sequences that share some elements in the middle.
Parameters:
-----------------------------------------------------
numSequences: Number of unique training sequences to generate
seqLen: Overall length of each sequence
sharedElements: Which element indices of each sequence are shared. These
will be in the range between 0 and seqLen-1
numOnBitsPerPattern: Number of ON bits in each TM input pattern
patternOverlap: Max number of bits of overlap between any 2 patterns
retval: (numCols, trainingSequences)
numCols - width of the patterns
trainingSequences - a list of training sequences
"""
# Total number of patterns used to build the sequences
numSharedElements = len(sharedElements)
numUniqueElements = seqLen - numSharedElements
numPatterns = numSharedElements + numUniqueElements * numSequences
# Create the table of patterns
patterns = getSimplePatterns(numOnBitsPerPattern, numPatterns, patternOverlap)
# Total number of columns required
numCols = len(patterns[0])
# -----------------------------------------------------------------------
# Create the training sequences
trainingSequences = []
uniquePatternIndices = range(numSharedElements, numPatterns)
for i in xrange(numSequences):
sequence = []
# pattern indices [0 ... numSharedElements-1] are reserved for the shared
# middle
sharedPatternIndices = range(numSharedElements)
# Build up the sequence
for j in xrange(seqLen):
if j in sharedElements:
patIdx = sharedPatternIndices.pop(0)
else:
patIdx = uniquePatternIndices.pop(0)
sequence.append(patterns[patIdx])
trainingSequences.append(sequence)
if VERBOSITY >= 3:
print "\nTraining sequences"
printAllTrainingSequences(trainingSequences)
return (numCols, trainingSequences)
def buildSequencePool(numSequences = 10,
seqLen = [2,3,4],
numPatterns = 5,
numOnBitsPerPattern = 3,
patternOverlap = 0,
**kwargs
):
""" Create a bunch of sequences of various lengths, all built from
a fixed set of patterns.
Parameters:
-----------------------------------------------------
numSequences: Number of training sequences to generate
seqLen: List of possible sequence lengths
numPatterns: How many possible patterns there are to use within
sequences
numOnBitsPerPattern: Number of ON bits in each TM input pattern
patternOverlap: Max number of bits of overlap between any 2 patterns
retval: (numCols, trainingSequences)
numCols - width of the patterns
trainingSequences - a list of training sequences
"""
# Create the table of patterns
patterns = getSimplePatterns(numOnBitsPerPattern, numPatterns, patternOverlap)
# Total number of columns required
numCols = len(patterns[0])
# -----------------------------------------------------------------------
# Create the training sequences
trainingSequences = []
for i in xrange(numSequences):
# Build it up from patterns
sequence = []
length = random.choice(seqLen)
for j in xrange(length):
patIdx = random.choice(xrange(numPatterns))
sequence.append(patterns[patIdx])
# Put it in
trainingSequences.append(sequence)
if VERBOSITY >= 3:
print "\nTraining sequences"
printAllTrainingSequences(trainingSequences)
return (numCols, trainingSequences)
def createTMs(includeCPP = True,
includePy = True,
numCols = 100,
cellsPerCol = 4,
activationThreshold = 3,
minThreshold = 3,
newSynapseCount = 3,
initialPerm = 0.6,
permanenceInc = 0.1,
permanenceDec = 0.0,
globalDecay = 0.0,
pamLength = 0,
checkSynapseConsistency = True,
maxInfBacktrack = 0,
maxLrnBacktrack = 0,
**kwargs
):
"""Create one or more TM instances, placing each into a dict keyed by
name.
Parameters:
------------------------------------------------------------------
retval: tms - dict of TM instances
"""
# Keep these fixed:
connectedPerm = 0.5
tms = dict()
if includeCPP:
if VERBOSITY >= 2:
print "Creating BacktrackingTMCPP instance"
cpp_tm = BacktrackingTMCPP(numberOfCols = numCols, cellsPerColumn = cellsPerCol,
initialPerm = initialPerm, connectedPerm = connectedPerm,
minThreshold = minThreshold, newSynapseCount = newSynapseCount,
permanenceInc = permanenceInc, permanenceDec = permanenceDec,
activationThreshold = activationThreshold,
globalDecay = globalDecay, burnIn = 1,
seed=SEED, verbosity=VERBOSITY,
checkSynapseConsistency = checkSynapseConsistency,
collectStats = True,
pamLength = pamLength,
maxInfBacktrack = maxInfBacktrack,
maxLrnBacktrack = maxLrnBacktrack,
)
# Ensure we are copying over learning states for TMDiff
cpp_tm.retrieveLearningStates = True
tms['CPP'] = cpp_tm
if includePy:
if VERBOSITY >= 2:
print "Creating PY TM instance"
py_tm = BacktrackingTM(numberOfCols = numCols, cellsPerColumn = cellsPerCol,
initialPerm = initialPerm, connectedPerm = connectedPerm,
minThreshold = minThreshold, newSynapseCount = newSynapseCount,
permanenceInc = permanenceInc, permanenceDec = permanenceDec,
activationThreshold = activationThreshold,
globalDecay = globalDecay, burnIn = 1,
seed=SEED, verbosity=VERBOSITY,
collectStats = True,
pamLength = pamLength,
maxInfBacktrack = maxInfBacktrack,
maxLrnBacktrack = maxLrnBacktrack,
)
tms['PY '] = py_tm
return tms
def assertNoTMDiffs(tms):
"""
Check for diffs among the TM instances in the passed in tms dict and
raise an assert if any are detected
Parameters:
---------------------------------------------------------------------
tms: dict of TM instances
"""
if len(tms) == 1:
return
if len(tms) > 2:
raise "Not implemented for more than 2 TMs"
same = fdrutils.tmDiff2(*tms.values(), verbosity=VERBOSITY)
assert(same)
return
def evalSequences(tms,
trainingSequences,
testSequences = None,
nTrainRepetitions = 1,
doResets = True,
**kwargs):
"""Train the TMs on the entire training set for nTrainRepetitions in a row.
Then run the test set through inference once and return the inference stats.
Parameters:
---------------------------------------------------------------------
tms: dict of TM instances
trainingSequences: list of training sequences. Each sequence is a list
of TM input patterns
testSequences: list of test sequences. If None, we will test against
the trainingSequences
nTrainRepetitions: Number of times to run the training set through the TM
doResets: If true, send a reset to the TM between each sequence
"""
# If no test sequence is specified, use the first training sequence
if testSequences == None:
testSequences = trainingSequences
# First TM instance is used by default for verbose printing of input values,
# etc.
firstTP = tms.values()[0]
assertNoTMDiffs(tms)
# =====================================================================
# Loop through the training set nTrainRepetitions times
# ==========================================================================
for trainingNum in xrange(nTrainRepetitions):
if VERBOSITY >= 2:
print "\n##############################################################"
print "################# Training round #%d of %d #################" \
% (trainingNum, nTrainRepetitions)
for (name,tm) in tms.iteritems():
print "TM parameters for %s: " % (name)
print "---------------------"
tm.printParameters()
print
# ======================================================================
# Loop through the sequences in the training set
numSequences = len(testSequences)
for sequenceNum, trainingSequence in enumerate(trainingSequences):
numTimeSteps = len(trainingSequence)
if VERBOSITY >= 2:
print "\n================= Sequence #%d of %d ================" \
% (sequenceNum, numSequences)
if doResets:
for tm in tms.itervalues():
tm.reset()
# --------------------------------------------------------------------
# Train each element of the sequence
for t, x in enumerate(trainingSequence):
# Print Verbose info about this element
if VERBOSITY >= 2:
print
if VERBOSITY >= 3:
print "------------------------------------------------------------"
print "--------- sequence: #%d of %d, timeStep: #%d of %d -----------" \
% (sequenceNum, numSequences, t, numTimeSteps)
firstTP.printInput(x)
print "input nzs:", x.nonzero()
# Train in this element
x = numpy.array(x).astype('float32')
for tm in tms.itervalues():
tm.learn(x, enableInference=True)
# Print the input and output states
if VERBOSITY >= 3:
for (name,tm) in tms.iteritems():
print "I/O states of %s TM:" % (name)
print "-------------------------------------",
tm.printStates(printPrevious = (VERBOSITY >= 5))
print
assertNoTMDiffs(tms)
# Print out number of columns that weren't predicted
if VERBOSITY >= 2:
for (name,tm) in tms.iteritems():
stats = tm.getStats()
print "# of unpredicted columns for %s TM: %d of %d" \
% (name, stats['curMissing'], x.sum())
numBurstingCols = tm.infActiveState['t'].min(axis=1).sum()
print "# of bursting columns for %s TM: %d of %d" \
% (name, numBurstingCols, x.sum())
# Print the trained cells
if VERBOSITY >= 4:
print "Sequence %d finished." % (sequenceNum)
for (name,tm) in tms.iteritems():
print "All cells of %s TM:" % (name)
print "-------------------------------------",
tm.printCells()
print
# --------------------------------------------------------------------
# Done training all sequences in this round, print the total number of
# missing, extra columns and make sure it's the same among the TMs
if VERBOSITY >= 2:
print
prevResult = None
for (name,tm) in tms.iteritems():
stats = tm.getStats()
if VERBOSITY >= 1:
print "Stats for %s TM over all sequences for training round #%d of %d:" \
% (name, trainingNum, nTrainRepetitions)
print " total missing:", stats['totalMissing']
print " total extra:", stats['totalExtra']
if prevResult is None:
prevResult = (stats['totalMissing'], stats['totalExtra'])
else:
assert (stats['totalMissing'] == prevResult[0])
assert (stats['totalExtra'] == prevResult[1])
tm.resetStats()
# =====================================================================
# Finish up learning
if VERBOSITY >= 3:
print "Calling trim segments"
prevResult = None
for tm in tms.itervalues():
nSegsRemoved, nSynsRemoved = tm.trimSegments()
if prevResult is None:
prevResult = (nSegsRemoved, nSynsRemoved)
else:
assert (nSegsRemoved == prevResult[0])
assert (nSynsRemoved == prevResult[1])
assertNoTMDiffs(tms)
if VERBOSITY >= 4:
print "Training completed. Complete state:"
for (name,tm) in tms.iteritems():
print "%s:" % (name)
tm.printCells()
print
# ==========================================================================
# Infer
# ==========================================================================
if VERBOSITY >= 2:
print "\n##############################################################"
print "########################## Inference #########################"
# Reset stats in all TMs
for tm in tms.itervalues():
tm.resetStats()
# -------------------------------------------------------------------
# Loop through the test sequences
numSequences = len(testSequences)
for sequenceNum, testSequence in enumerate(testSequences):
numTimeSteps = len(testSequence)
# Identify this sequence
if VERBOSITY >= 2:
print "\n================= Sequence %d of %d ================" \
% (sequenceNum, numSequences)
# Send in the rest
if doResets:
for tm in tms.itervalues():
tm.reset()
# -------------------------------------------------------------------
# Loop through the elements of this sequence
for t,x in enumerate(testSequence):
# Print verbose info about this element
if VERBOSITY >= 2:
print
if VERBOSITY >= 3:
print "------------------------------------------------------------"
print "--------- sequence: #%d of %d, timeStep: #%d of %d -----------" \
% (sequenceNum, numSequences, t, numTimeSteps)
firstTP.printInput(x)
print "input nzs:", x.nonzero()
# Infer on this element
for tm in tms.itervalues():
tm.infer(x)
assertNoTMDiffs(tms)
# Print out number of columns that weren't predicted
if VERBOSITY >= 2:
for (name,tm) in tms.iteritems():
stats = tm.getStats()
print "# of unpredicted columns for %s TM: %d of %d" \
% (name, stats['curMissing'], x.sum())
# Debug print of internal state
if VERBOSITY >= 3:
for (name,tm) in tms.iteritems():
print "I/O states of %s TM:" % (name)
print "-------------------------------------",
tm.printStates(printPrevious = (VERBOSITY >= 5),
printLearnState = False)
print
# Done with this sequence
# Debug print of all stats of the TMs
if VERBOSITY >= 4:
print
for (name,tm) in tms.iteritems():
print "Interim internal stats for %s TM:" % (name)
print "---------------------------------"
pprint.pprint(tm.getStats())
print
if VERBOSITY >= 2:
print "\n##############################################################"
print "####################### Inference Done #######################"
# Get the overall stats for each TM and return them
tpStats = dict()
for (name,tm) in tms.iteritems():
tpStats[name] = stats = tm.getStats()
if VERBOSITY >= 2:
print "Stats for %s TM over all sequences:" % (name)
print " total missing:", stats['totalMissing']
print " total extra:", stats['totalExtra']
for (name,tm) in tms.iteritems():
if VERBOSITY >= 3:
print "\nAll internal stats for %s TM:" % (name)
print "-------------------------------------",
pprint.pprint(tpStats[name])
print
return tpStats
def _testConfig(baseParams, expMissingMin=0, expMissingMax=0, **mods):
"""
Build up a set of sequences, create the TM(s), train them, test them,
and check that we got the expected number of missing predictions during
inference.
Parameters:
-----------------------------------------------------------------------
baseParams: dict of all of the parameters for building sequences,
creating the TMs, and training and testing them. This
gets updated from 'mods' before we use it.
expMissingMin: Minimum number of expected missing predictions during testing.
expMissingMax: Maximum number of expected missing predictions during testing.
mods: dict of modifications to make to the baseParams.
"""
# Update the base with the modifications
params = dict(baseParams)
params.update(mods)
# --------------------------------------------------------------------
# Create the sequences
func = params['seqFunction']
(numCols, trainingSequences) = func(**params)
# --------------------------------------------------------------------
# Create the TMs
if params['numCols'] is None:
params['numCols'] = numCols
tps = createTMs(**params)
# --------------------------------------------------------------------
# Train and get test results
tpStats = evalSequences(tms= tps,
trainingSequences=trainingSequences,
testSequences=None,
**params)
# -----------------------------------------------------------------------
# Make sure there are the expected number of missing predictions
for (name, stats) in tpStats.iteritems():
print "Detected %d missing predictions overall during inference" \
% (stats['totalMissing'])
if expMissingMin is not None and stats['totalMissing'] < expMissingMin:
print "FAILURE: Expected at least %d total missing but got %d" \
% (expMissingMin, stats['totalMissing'])
assert False
if expMissingMax is not None and stats['totalMissing'] > expMissingMax:
print "FAILURE: Expected at most %d total missing but got %d" \
% (expMissingMax, stats['totalMissing'])
assert False
return True
class TMOverlappingSeqsTest(testcasebase.TestCaseBase):
def testFastLearning(self):
"""
Test with fast learning, make sure PAM allows us to train with fewer
repeats of the training data.
"""
numOnBitsPerPattern = 3
# ================================================================
# Base params
baseParams = dict(
# Sequence generation
seqFunction = buildOverlappedSequences,
numSequences = 2,
seqLen = 10,
sharedElements = [2,3],
numOnBitsPerPattern = numOnBitsPerPattern,
# TM construction
includeCPP = INCLUDE_CPP_TM,
numCols = None, # filled in based on generated sequences
activationThreshold = numOnBitsPerPattern,
minThreshold = numOnBitsPerPattern,
newSynapseCount = numOnBitsPerPattern,
initialPerm = 0.6,
permanenceInc = 0.1,
permanenceDec = 0.0,
globalDecay = 0.0,
pamLength = 0,
# Training/testing
nTrainRepetitions = 8,
doResets = True,
)
# ================================================================
# Run various configs
# No PAM, with 3 repetitions, still missing predictions
print "\nRunning without PAM, 3 repetitions of the training data..."
self.assertTrue(_testConfig(baseParams=baseParams, expMissingMin=20,
expMissingMax=None, pamLength=1,
nTrainRepetitions=3))
# With PAM, with only 3 repetitions, 0 missing predictions
print "\nRunning with PAM, 3 repetitions of the training data..."
self.assertTrue(_testConfig(baseParams=baseParams, expMissingMin=0,
expMissingMax=0, pamLength=5,
nTrainRepetitions=3))
def testSlowLearning(self):
"""
Test with slow learning, make sure PAM allows us to train with fewer
repeats of the training data.
"""
numOnBitsPerPattern = 3
# ================================================================
# Base params
baseParams = dict(
# Sequence generation
seqFunction = buildOverlappedSequences,
numSequences = 2,
seqLen = 10,
sharedElements = [2,3],
numOnBitsPerPattern = numOnBitsPerPattern,
# TM construction
includeCPP = INCLUDE_CPP_TM,
numCols = None, # filled in based on generated sequences
activationThreshold = numOnBitsPerPattern,
minThreshold = numOnBitsPerPattern,
newSynapseCount = numOnBitsPerPattern,
initialPerm = 0.11,
permanenceInc = 0.1,
permanenceDec = 0.0,
globalDecay = 0.0,
pamLength = 0,
# Training/testing
nTrainRepetitions = 8,
doResets = True,
)
# ================================================================
# Run various configs
# No PAM, requires 40 repetitions
# No PAM, with 10 repetitions, still missing predictions
print "\nRunning without PAM, 10 repetitions of the training data..."
self.assertTrue(_testConfig(baseParams=baseParams, expMissingMin=10,
expMissingMax=None, pamLength=1,
nTrainRepetitions=10))
# With PAM, with only 10 repetitions, 0 missing predictions
print "\nRunning with PAM, 10 repetitions of the training data..."
self.assertTrue(_testConfig(baseParams=baseParams, expMissingMin=0,
expMissingMax=0, pamLength=6,
nTrainRepetitions=10))
def testSlowLearningWithOverlap(self):
"""
Test with slow learning, some overlap in the patterns, and TM thresholds
of 80% of newSynapseCount
Make sure PAM allows us to train with fewer repeats of the training data.
"""
# Cannot use skipIf decorator because it reads SHORT before it is set.
if SHORT:
self.skipTest("Test skipped by default. Enable with --long.")
numOnBitsPerPattern = 5
# ================================================================
# Base params
baseParams = dict(
# Sequence generation
seqFunction = buildOverlappedSequences,
numSequences = 2,
seqLen = 10,
sharedElements = [2,3],
numOnBitsPerPattern = numOnBitsPerPattern,
patternOverlap = 2,
# TM construction
includeCPP = INCLUDE_CPP_TM,
numCols = None, # filled in based on generated sequences
activationThreshold = int(0.8 * numOnBitsPerPattern),
minThreshold = int(0.8 * numOnBitsPerPattern),
newSynapseCount = numOnBitsPerPattern,
initialPerm = 0.11,
permanenceInc = 0.1,
permanenceDec = 0.0,
globalDecay = 0.0,
pamLength = 0,
# Training/testing
nTrainRepetitions = 8,
doResets = True,
)
# ================================================================
# Run various configs
# No PAM, with 10 repetitions, still missing predictions
print "\nRunning without PAM, 10 repetitions of the training data..."
self.assertTrue(_testConfig(baseParams=baseParams, expMissingMin=10,
expMissingMax=None, pamLength=1,
nTrainRepetitions=10))
# With PAM, with only 10 repetitions, 0 missing predictions
print "\nRunning with PAM, 10 repetitions of the training data..."
self.assertTrue(_testConfig(baseParams=baseParams, expMissingMin=0,
expMissingMax=0, pamLength=6,
nTrainRepetitions=10))
def testForbesLikeData(self):
"""
Test with "Forbes-like" data. A bunch of sequences of lengths between 2
and 10 elements long.
We will test with both fast and slow learning.
Make sure PAM allows us to train with fewer repeats of the training data.
"""
# Cannot use skipIf decorator because it reads SHORT before it is set.
if SHORT:
self.skipTest("Test skipped by default. Enable with --long.")
numOnBitsPerPattern = 3
# ================================================================
# Base params
baseParams = dict(
# Sequence generation
seqFunction = buildSequencePool,
numSequences = 20,
seqLen = [3,10],
numPatterns = 10,
numOnBitsPerPattern = numOnBitsPerPattern,
patternOverlap = 1,
# TM construction
includeCPP = INCLUDE_CPP_TM,
numCols = None, # filled in based on generated sequences
activationThreshold = int(0.8 * numOnBitsPerPattern),
minThreshold = int(0.8 * numOnBitsPerPattern),
newSynapseCount = numOnBitsPerPattern,
initialPerm = 0.51,
permanenceInc = 0.1,
permanenceDec = 0.0,
globalDecay = 0.0,
pamLength = 0,
checkSynapseConsistency = False,
# Training/testing
nTrainRepetitions = 8,
doResets = True,
)
# ================================================================
# Run various configs
# Fast mode, no PAM
# Fast mode, with PAM
print "\nRunning without PAM, fast learning, 2 repetitions of the " \
"training data..."
self.assertTrue(_testConfig(baseParams=baseParams, expMissingMin=50,
expMissingMax=None, pamLength=1,
nTrainRepetitions=2))
# Fast mode, with PAM
print "\nRunning with PAM, fast learning, 2 repetitions of the " \
"training data..."
self.assertTrue(_testConfig(baseParams=baseParams, expMissingMin=0,
expMissingMax=0, pamLength=5,
nTrainRepetitions=2))
# Slow mode, no PAM
print "\nRunning without PAM, slow learning, 8 repetitions of the " \
"training data..."
self.assertTrue(_testConfig(baseParams=baseParams, expMissingMin=1,
expMissingMax=None, initialPerm=0.31,
pamLength=1, nTrainRepetitions=8))
# Fast mode, with PAM
print "\nRunning with PAM, slow learning, 8 repetitions of the " \
"training data..."
self.assertTrue(_testConfig(baseParams=baseParams, expMissingMin=0,
expMissingMax=0, initialPerm=0.31, pamLength=5,
nTrainRepetitions=8))
if __name__=="__main__":
# Process command line arguments
parser = OptionParser()
parser.add_option(
"--verbosity", default=VERBOSITY, type="int",
help="Verbosity level, either 0, 1, 2, or 3 [default: %default].")
parser.add_option("--seed", default=SEED, type="int",
help="Random seed to use [default: %default].")
parser.add_option("--short", action="store_true", default=True,
help="Run short version of the tests [default: %default].")
parser.add_option("--long", action="store_true", default=False,
help="Run long version of the tests [default: %default].")
(options, args) = parser.parse_args()
SEED = options.seed
VERBOSITY = options.verbosity
SHORT = not options.long
# Seed the random number generators
rgen = numpy.random.RandomState(SEED)
random.seed(SEED)
if not INCLUDE_CPP_TM:
print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
print "!! WARNING: C++ TM testing is DISABLED until it can be updated."
print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
# Form the command line for the unit test framework.
args = [sys.argv[0]] + args
unittest.main(argv=args, verbosity=VERBOSITY)
| 32,242 | Python | .py | 724 | 35.645028 | 94 | 0.565095 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,164 | tutorial_temporal_memory_test.py | numenta_nupic-legacy/tests/integration/nupic/algorithms/tutorial_temporal_memory_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import nupic.bindings.algorithms
import pprint
import unittest
from abc import ABCMeta
import nupic.algorithms.temporal_memory
from nupic.data.generators.pattern_machine import ConsecutivePatternMachine
from nupic.support.unittesthelpers.abstract_temporal_memory_test import AbstractTemporalMemoryTest
class TutorialTemporalMemoryTest(AbstractTemporalMemoryTest):
__metaclass__ = ABCMeta
VERBOSITY = 1
def getPatternMachine(self):
return ConsecutivePatternMachine(6, 1)
def getDefaultTMParams(self):
return {
"columnDimensions": (6,),
"cellsPerColumn": 4,
"initialPermanence": 0.3,
"connectedPermanence": 0.5,
"minThreshold": 1,
"maxNewSynapseCount": 6,
"permanenceIncrement": 0.1,
"permanenceDecrement": 0.05,
"activationThreshold": 1,
}
def testFirstOrder(self):
"""Basic first order sequences"""
self.init()
sequence = self.sequenceMachine.generateFromNumbers([0, 1, 2, 3, None])
self.feedTM(sequence)
self.assertEqual(len(self.tm.mmGetTracePredictedActiveColumns().data[3]), 0)
self.feedTM(sequence, num=2)
self.feedTM(sequence)
self.assertEqual(len(self.tm.mmGetTracePredictedActiveColumns().data[3]), 1)
self.feedTM(sequence, num=4)
self.feedTM(sequence)
self.assertEqual(len(self.tm.mmGetTracePredictedActiveColumns().data[3]), 1)
def testHighOrder(self):
"""High order sequences (in order)"""
self.init()
sequenceA = self.sequenceMachine.generateFromNumbers([0, 1, 2, 3, None])
sequenceB = self.sequenceMachine.generateFromNumbers([4, 1, 2, 5, None])
self.feedTM(sequenceA, num=5)
self.feedTM(sequenceA, learn=False)
self.assertEqual(len(self.tm.mmGetTracePredictedActiveColumns().data[3]), 1)
self.feedTM(sequenceB)
self.feedTM(sequenceB, num=2)
self.feedTM(sequenceB, learn=False)
self.assertEqual(len(self.tm.mmGetTracePredictedActiveColumns().data[1]), 1)
self.feedTM(sequenceB, num=3)
self.feedTM(sequenceB, learn=False)
self.assertEqual(len(self.tm.mmGetTracePredictedActiveColumns().data[2]), 1)
self.feedTM(sequenceB, num=3)
self.feedTM(sequenceB, learn=False)
self.assertEqual(len(self.tm.mmGetTracePredictedActiveColumns().data[3]), 1)
self.feedTM(sequenceA, learn=False)
self.assertEqual(len(self.tm.mmGetTracePredictedActiveColumns().data[3]), 1)
self.assertEqual(len(self.tm.mmGetTracePredictedInactiveColumns().data[3]), 1)
self.feedTM(sequenceA, num=10)
self.feedTM(sequenceA, learn=False)
self.assertEqual(len(self.tm.mmGetTracePredictedActiveColumns().data[3]), 1)
# TODO: Requires some form of synaptic decay to forget the ABC=>Y
# transition that's initially formed
# self.assertEqual(len(self.tm.mmGetTracePredictedInactiveColumns().data[3]), 0)
def testHighOrderAlternating(self):
"""High order sequences (alternating)"""
self.init()
sequence = self.sequenceMachine.generateFromNumbers([0, 1, 2, 3, None])
sequence += self.sequenceMachine.generateFromNumbers([4, 1, 2, 5, None])
self.feedTM(sequence)
self.feedTM(sequence, num=10)
self.feedTM(sequence, learn=False)
# TODO: Requires some form of synaptic decay to forget the
# ABC=>Y and XBC=>D transitions that are initially formed
self.assertEqual(len(self.tm.mmGetTracePredictedActiveColumns().data[3]), 1)
# self.assertEqual(len(self.tm.mmGetTracePredictedInactiveColumns().data[3]), 0)
self.assertEqual(len(self.tm.mmGetTracePredictedActiveColumns().data[7]), 1)
# self.assertEqual(len(self.tm.mmGetTracePredictedInactiveColumns().data[7]), 0)
def testEndlesslyRepeating(self):
"""Endlessly repeating sequence of 2 elements"""
self.init({"columnDimensions": [2]})
sequence = self.sequenceMachine.generateFromNumbers([0, 1])
for _ in xrange(7):
self.feedTM(sequence)
self.feedTM(sequence, num=50)
def testEndlesslyRepeatingWithNoNewSynapses(self):
"""Endlessly repeating sequence of 2 elements with maxNewSynapseCount=1"""
self.init({"columnDimensions": [2],
"maxNewSynapseCount": 1,
"cellsPerColumn": 10})
sequence = self.sequenceMachine.generateFromNumbers([0, 1])
for _ in xrange(7):
self.feedTM(sequence)
self.feedTM(sequence, num=100)
def testLongRepeatingWithNovelEnding(self):
"""Long repeating sequence with novel pattern at the end"""
self.init({"columnDimensions": [3]})
sequence = self.sequenceMachine.generateFromNumbers([0, 1])
sequence *= 10
sequence += [self.patternMachine.get(2), None]
for _ in xrange(4):
self.feedTM(sequence)
self.feedTM(sequence, num=10)
def testSingleEndlesslyRepeating(self):
"""A single endlessly repeating pattern"""
self.init({"columnDimensions": [1]})
sequence = [self.patternMachine.get(0)]
for _ in xrange(4):
self.feedTM(sequence)
for _ in xrange(2):
self.feedTM(sequence, num=10)
# ==============================
# Overrides
# ==============================
def setUp(self):
super(TutorialTemporalMemoryTest, self).setUp()
print ("\n"
"======================================================\n"
"Test: {0} \n"
"{1}\n"
"======================================================\n"
).format(self.id(), self.shortDescription())
def init(self, *args, **kwargs):
super(TutorialTemporalMemoryTest, self).init(*args, **kwargs)
print "Initialized new TM with parameters:"
print pprint.pformat(self._computeTMParams(kwargs.get("overrides")))
print
def feedTM(self, sequence, learn=True, num=1):
self._showInput(sequence, learn=learn, num=num)
super(TutorialTemporalMemoryTest, self).feedTM(
sequence, learn=learn, num=num)
print self.tm.mmPrettyPrintTraces(self.tm.mmGetDefaultTraces(verbosity=2),
breakOnResets=self.tm.mmGetTraceResets())
print
if learn:
self._printConnections()
# ==============================
# Helper functions
# ==============================
def _printConnections(self):
# This is in a helper so that it can be overridden.
print self.tm.mmPrettyPrintConnections()
def _showInput(self, sequence, learn=True, num=1):
sequenceText = self.sequenceMachine.prettyPrintSequence(
sequence,
verbosity=self.VERBOSITY)
learnText = "(learning {0})".format("enabled" if learn else "disabled")
numText = " [{0} times]".format(num) if num > 1 else ""
print "Feeding sequence {0}{1}:\n{2}".format(
learnText, numText, sequenceText)
print
class TutorialTemporalMemoryTestsCPP(TutorialTemporalMemoryTest, unittest.TestCase):
def getTMClass(self):
return nupic.bindings.algorithms.TemporalMemory
def _printConnections(self):
# Can't call segmentsForCell on C++ connections class (yet).
pass
class TutorialTemporalMemoryTestsPY(TutorialTemporalMemoryTest, unittest.TestCase):
def getTMClass(self):
return nupic.algorithms.temporal_memory.TemporalMemory
if __name__ == "__main__":
unittest.main()
| 8,145 | Python | .py | 181 | 39.850829 | 98 | 0.692679 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,165 | temporal_memory_monitor_mixin_test.py | numenta_nupic-legacy/tests/integration/nupic/algorithms/monitor_mixin/temporal_memory_monitor_mixin_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import unittest
from nupic.algorithms.monitor_mixin.temporal_memory_monitor_mixin import (
TemporalMemoryMonitorMixin)
from nupic.algorithms.temporal_memory import TemporalMemory
from nupic.data.generators.pattern_machine import ConsecutivePatternMachine
from nupic.data.generators.sequence_machine import SequenceMachine
class MonitoredTemporalMemory(TemporalMemoryMonitorMixin, TemporalMemory): pass
class TemporalMemoryMonitorMixinTest(unittest.TestCase):
def setUp(self):
self.patternMachine = ConsecutivePatternMachine(100, 5)
self.sequenceMachine = SequenceMachine(self.patternMachine)
self.tm = MonitoredTemporalMemory(columnDimensions=[100],
cellsPerColumn=4,
initialPermanence=0.6,
connectedPermanence=0.5,
minThreshold=1,
maxNewSynapseCount=6,
permanenceIncrement=0.1,
permanenceDecrement=0.05,
activationThreshold=1)
def testFeedSequence(self):
sequence = self._generateSequence()
sequenceLength = len(sequence) - 3 # without resets
# Replace last pattern (before the None) with an unpredicted one
sequence[-2] = self.patternMachine.get(4)
self._feedSequence(sequence, sequenceLabel="Test")
activeColumnsTrace = self.tm.mmGetTraceActiveColumns()
predictiveCellsTrace = self.tm.mmGetTracePredictiveCells()
sequenceLabelsTrace = self.tm.mmGetTraceSequenceLabels()
resetsTrace = self.tm.mmGetTraceResets()
predictedActiveCellsTrace = self.tm.mmGetTracePredictedActiveCells()
predictedInactiveCellsTrace = self.tm.mmGetTracePredictedInactiveCells()
predictedActiveColumnsTrace = self.tm.mmGetTracePredictedActiveColumns()
predictedInactiveColumnsTrace = self.tm.mmGetTracePredictedInactiveColumns()
unpredictedActiveColumnsTrace = self.tm.mmGetTraceUnpredictedActiveColumns()
self.assertEqual(len(activeColumnsTrace.data), sequenceLength)
self.assertEqual(len(predictiveCellsTrace.data), sequenceLength)
self.assertEqual(len(sequenceLabelsTrace.data), sequenceLength)
self.assertEqual(len(resetsTrace.data), sequenceLength)
self.assertEqual(len(predictedActiveCellsTrace.data), sequenceLength)
self.assertEqual(len(predictedInactiveCellsTrace.data), sequenceLength)
self.assertEqual(len(predictedActiveColumnsTrace.data), sequenceLength)
self.assertEqual(len(predictedInactiveColumnsTrace.data), sequenceLength)
self.assertEqual(len(unpredictedActiveColumnsTrace.data), sequenceLength)
self.assertEqual(activeColumnsTrace.data[-1], self.patternMachine.get(4))
self.assertEqual(sequenceLabelsTrace.data[-1], "Test")
self.assertEqual(resetsTrace.data[0], True)
self.assertEqual(resetsTrace.data[1], False)
self.assertEqual(resetsTrace.data[10], True)
self.assertEqual(resetsTrace.data[-1], False)
self.assertEqual(len(predictedActiveCellsTrace.data[-2]), 5)
self.assertEqual(len(predictedActiveCellsTrace.data[-1]), 0)
self.assertEqual(len(predictedInactiveCellsTrace.data[-2]), 0)
self.assertEqual(len(predictedInactiveCellsTrace.data[-1]), 5)
self.assertEqual(len(predictedActiveColumnsTrace.data[-2]), 5)
self.assertEqual(len(predictedActiveColumnsTrace.data[-1]), 0)
self.assertEqual(len(predictedInactiveColumnsTrace.data[-2]), 0)
self.assertEqual(len(predictedInactiveColumnsTrace.data[-1]), 5)
self.assertEqual(len(unpredictedActiveColumnsTrace.data[-2]), 0)
self.assertEqual(len(unpredictedActiveColumnsTrace.data[-1]), 5)
def testClearHistory(self):
sequence = self._generateSequence()
self._feedSequence(sequence, sequenceLabel="Test")
self.tm.mmClearHistory()
activeColumnsTrace = self.tm.mmGetTraceActiveColumns()
predictiveCellsTrace = self.tm.mmGetTracePredictiveCells()
sequenceLabelsTrace = self.tm.mmGetTraceSequenceLabels()
resetsTrace = self.tm.mmGetTraceResets()
predictedActiveCellsTrace = self.tm.mmGetTracePredictedActiveCells()
predictedInactiveCellsTrace = self.tm.mmGetTracePredictedInactiveCells()
predictedActiveColumnsTrace = self.tm.mmGetTracePredictedActiveColumns()
predictedInactiveColumnsTrace = self.tm.mmGetTracePredictedInactiveColumns()
unpredictedActiveColumnsTrace = self.tm.mmGetTraceUnpredictedActiveColumns()
self.assertEqual(len(activeColumnsTrace.data), 0)
self.assertEqual(len(predictiveCellsTrace.data), 0)
self.assertEqual(len(sequenceLabelsTrace.data), 0)
self.assertEqual(len(resetsTrace.data), 0)
self.assertEqual(len(predictedActiveCellsTrace.data), 0)
self.assertEqual(len(predictedInactiveCellsTrace.data), 0)
self.assertEqual(len(predictedActiveColumnsTrace.data), 0)
self.assertEqual(len(predictedInactiveColumnsTrace.data), 0)
self.assertEqual(len(unpredictedActiveColumnsTrace.data), 0)
def testSequencesMetrics(self):
sequence = self._generateSequence()
self._feedSequence(sequence, "Test1")
sequence.reverse()
sequence.append(sequence.pop(0)) # Move None (reset) to the end
self._feedSequence(sequence, "Test2")
sequencesPredictedActiveCellsPerColumnMetric = \
self.tm.mmGetMetricSequencesPredictedActiveCellsPerColumn()
sequencesPredictedActiveCellsSharedMetric = \
self.tm.mmGetMetricSequencesPredictedActiveCellsShared()
self.assertEqual(sequencesPredictedActiveCellsPerColumnMetric.mean, 1)
self.assertEqual(sequencesPredictedActiveCellsSharedMetric.mean, 1)
self._feedSequence(sequence, "Test3")
sequencesPredictedActiveCellsPerColumnMetric = \
self.tm.mmGetMetricSequencesPredictedActiveCellsPerColumn()
sequencesPredictedActiveCellsSharedMetric = \
self.tm.mmGetMetricSequencesPredictedActiveCellsShared()
self.assertEqual(sequencesPredictedActiveCellsPerColumnMetric.mean, 1)
self.assertTrue(sequencesPredictedActiveCellsSharedMetric.mean > 1)
# ==============================
# Helper functions
# ==============================
def _generateSequence(self):
numbers = range(0, 10)
sequence = self.sequenceMachine.generateFromNumbers(numbers)
sequence.append(None)
sequence *= 3
return sequence
def _feedSequence(self, sequence, sequenceLabel=None):
for pattern in sequence:
if pattern is None:
self.tm.reset()
else:
self.tm.compute(pattern, sequenceLabel=sequenceLabel)
if __name__ == "__main__":
unittest.main()
| 7,610 | Python | .py | 138 | 48.73913 | 80 | 0.745159 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,166 | pca_knn_data.py | numenta_nupic-legacy/tests/integration/nupic/algorithms/knn_classifier_test/pca_knn_data.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
## @file
This file generates data for the PCA/KNN classifier tests
"""
import logging
import numpy
LOGGER = logging.getLogger(__name__)
def generate(numDims, numClasses, k, numPatternsPerClass,
numPatterns, numTests, numSVDSamples, keep):
LOGGER.info('N dims=%s', numDims)
LOGGER.info('N classes=%s', numClasses)
LOGGER.info('k=%s', k)
LOGGER.info('N vectors per class=%s', numPatternsPerClass)
LOGGER.info('N training vectors=%s', numPatterns)
LOGGER.info('N test vectors=%s', numTests)
LOGGER.info('N SVD samples=%s', numSVDSamples)
LOGGER.info('N reduced dims=%s', int(keep*numDims))
LOGGER.info('Generating data')
numpy.random.seed(42)
data0 = numpy.zeros((numPatterns + numTests, numDims))
class0 = numpy.zeros((numPatterns + numTests), dtype='int')
c = 0
for i in range(numClasses):
pt = 5*i*numpy.ones((numDims))
for _j in range(numPatternsPerClass):
data0[c] = pt+5*numpy.random.random((numDims))
class0[c] = i
c += 1
if 0: # Change this to visualize the output
import pylab
pylab.ion()
pylab.figure()
_u, _s, vt = pylab.svd(data0[:numPatterns])
tmp = numpy.zeros((numPatterns, 2))
for i in range(numPatterns):
tmp[i] = numpy.dot(vt, data0[i])[:2]
pylab.scatter(tmp[:, 0], tmp[:, 1])
ind = numpy.random.permutation(numPatterns + numTests)
train_data = data0[ind[:numPatterns]]
train_class = class0[ind[:numPatterns]]
test_data = data0[ind[numPatterns:]]
test_class = class0[ind[numPatterns:]]
return train_data, train_class, test_data, test_class
| 2,569 | Python | .py | 63 | 37.698413 | 73 | 0.682329 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,167 | categories_test.py | numenta_nupic-legacy/tests/integration/nupic/algorithms/knn_classifier_test/categories_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import logging
import unittest2 as unittest
import numpy
from nupic.algorithms.knn_classifier import KNNClassifier
LOGGER = logging.getLogger(__name__)
class KNNCategoriesTest(unittest.TestCase):
"""Tests how k Nearest Neighbor classifier handles categories"""
def testCategories(self):
# We need determinism!
#
# "Life is like a game of cards. The hand you are dealt is determinism; the
# way you play it is free will." Jawaharlal Nehru
#
# What the heck, let's just set the random seed
numpy.random.seed(42)
failures, _knn = simulateCategories()
self.assertEqual(len(failures), 0,
"Tests failed: \n" + failures)
def simulateCategories(numSamples=100, numDimensions=500):
"""Simulate running KNN classifier on many disjoint categories"""
failures = ""
LOGGER.info("Testing the sparse KNN Classifier on many disjoint categories")
knn = KNNClassifier(k=1, distanceNorm=1.0, useSparseMemory=True)
for i in range(0, numSamples):
# select category randomly and generate vector
c = 2*numpy.random.randint(0, 50) + 50
v = createPattern(c, numDimensions)
knn.learn(v, c)
# Go through each category and ensure we have at least one from each!
for i in range(0, 50):
c = 2*i+50
v = createPattern(c, numDimensions)
knn.learn(v, c)
errors = 0
for i in range(0, numSamples):
# select category randomly and generate vector
c = 2*numpy.random.randint(0, 50) + 50
v = createPattern(c, numDimensions)
inferCat, _kir, _kd, _kcd = knn.infer(v)
if inferCat != c:
LOGGER.info("Mistake with %s %s %s %s %s", v[v.nonzero()], \
"mapped to category", inferCat, "instead of category", c)
LOGGER.info(" %s", v.nonzero())
errors += 1
if errors != 0:
failures += "Failure in handling non-consecutive category indices\n"
# Test closest methods
errors = 0
for i in range(0, 10):
# select category randomly and generate vector
c = 2*numpy.random.randint(0, 50) + 50
v = createPattern(c, numDimensions)
p = knn.closestTrainingPattern(v, c)
if not (c in p.nonzero()[0]):
LOGGER.info("Mistake %s %s", p.nonzero(), v.nonzero())
LOGGER.info("%s %s", p[p.nonzero()], v[v.nonzero()])
errors += 1
if errors != 0:
failures += "Failure in closestTrainingPattern method\n"
return failures, knn
def createPattern(c, numDimensions):
"""
Create a sparse pattern from category c with the given number of dimensions.
The pattern is created by setting element c to be a high random number.
Element c-1 and c+1 are set to low random numbers. numDimensions must be > c.
"""
v = numpy.zeros(numDimensions)
v[c] = 5*numpy.random.random() + 10
v[c+1] = numpy.random.random()
if c > 0:
v[c-1] = numpy.random.random()
return v
if __name__ == "__main__":
unittest.main()
| 3,864 | Python | .py | 94 | 37.244681 | 79 | 0.680845 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,168 | classifier_test.py | numenta_nupic-legacy/tests/integration/nupic/algorithms/knn_classifier_test/classifier_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
import logging
import time
import unittest2 as unittest
import cPickle
import numpy
from nupic.bindings.regions.PyRegion import RealNumpyDType
from nupic.algorithms.knn_classifier import KNNClassifier
import pca_knn_data
LOGGER = logging.getLogger(__name__)
class KNNClassifierTest(unittest.TestCase):
"""Tests for k Nearest Neighbor classifier"""
def runTestKNNClassifier(self, short = 0):
""" Test the KNN classifier in this module. short can be:
0 (short), 1 (medium), or 2 (long)
"""
failures = ""
if short != 2:
numpy.random.seed(42)
else:
seed_value = int(time.time())
numpy.random.seed(seed_value)
LOGGER.info('Seed used: %d', seed_value)
f = open('seedval', 'a')
f.write(str(seed_value))
f.write('\n')
f.close()
failures += simulateKMoreThanOne()
LOGGER.info("\nTesting KNN Classifier on dense patterns")
numPatterns, numClasses = getNumTestPatterns(short)
patternSize = 100
patterns = numpy.random.rand(numPatterns, patternSize)
patternDict = dict()
testDict = dict()
# Assume there are no repeated patterns -- if there are, then
# numpy.random would be completely broken.
# Patterns in testDict are identical to those in patternDict but for the
# first 2% of items.
for i in xrange(numPatterns):
patternDict[i] = dict()
patternDict[i]['pattern'] = patterns[i]
patternDict[i]['category'] = numpy.random.randint(0, numClasses-1)
testDict[i] = copy.deepcopy(patternDict[i])
testDict[i]['pattern'][:int(0.02*patternSize)] = numpy.random.rand()
testDict[i]['category'] = None
LOGGER.info("\nTesting KNN Classifier with L2 norm")
knn = KNNClassifier(k=1)
failures += simulateClassifier(knn, patternDict, \
"KNN Classifier with L2 norm test")
LOGGER.info("\nTesting KNN Classifier with L1 norm")
knnL1 = KNNClassifier(k=1, distanceNorm=1.0)
failures += simulateClassifier(knnL1, patternDict, \
"KNN Classifier with L1 norm test")
# Test with exact matching classifications.
LOGGER.info("\nTesting KNN Classifier with exact matching. For testing we "
"slightly alter the training data and expect None to be returned for the "
"classifications.")
knnExact = KNNClassifier(k=1, exact=True)
failures += simulateClassifier(knnExact,
patternDict,
"KNN Classifier with exact matching test",
testDict=testDict)
numPatterns, numClasses = getNumTestPatterns(short)
patterns = (numpy.random.rand(numPatterns, 25) > 0.7).astype(RealNumpyDType)
patternDict = dict()
for i in patterns:
iString = str(i.tolist())
if not patternDict.has_key(iString):
randCategory = numpy.random.randint(0, numClasses-1)
patternDict[iString] = dict()
patternDict[iString]['pattern'] = i
patternDict[iString]['category'] = randCategory
LOGGER.info("\nTesting KNN on sparse patterns")
knnDense = KNNClassifier(k=1)
failures += simulateClassifier(knnDense, patternDict, \
"KNN Classifier on sparse pattern test")
self.assertEqual(len(failures), 0,
"Tests failed: \n" + failures)
if short == 2:
f = open('seedval', 'a')
f.write('Pass\n')
f.close()
def runTestPCAKNN(self, short = 0):
LOGGER.info('\nTesting PCA/k-NN classifier')
LOGGER.info('Mode=%s', short)
numDims = 10
numClasses = 10
k = 10
numPatternsPerClass = 100
numPatterns = int(.9 * numClasses * numPatternsPerClass)
numTests = numClasses * numPatternsPerClass - numPatterns
numSVDSamples = int(.1 * numPatterns)
keep = 1
train_data, train_class, test_data, test_class = \
pca_knn_data.generate(numDims, numClasses, k, numPatternsPerClass,
numPatterns, numTests, numSVDSamples, keep)
pca_knn = KNNClassifier(k=k,numSVDSamples=numSVDSamples,
numSVDDims=keep)
knn = KNNClassifier(k=k)
LOGGER.info('Training PCA k-NN')
for i in range(numPatterns):
knn.learn(train_data[i], train_class[i])
pca_knn.learn(train_data[i], train_class[i])
LOGGER.info('Testing PCA k-NN')
numWinnerFailures = 0
numInferenceFailures = 0
numDistFailures = 0
numAbsErrors = 0
for i in range(numTests):
winner, inference, dist, categoryDist = knn.infer(test_data[i])
pca_winner, pca_inference, pca_dist, pca_categoryDist \
= pca_knn.infer(test_data[i])
if winner != test_class[i]:
numAbsErrors += 1
if pca_winner != winner:
numWinnerFailures += 1
if (numpy.abs(pca_inference - inference) > 1e-4).any():
numInferenceFailures += 1
if (numpy.abs(pca_dist - dist) > 1e-4).any():
numDistFailures += 1
s0 = 100*float(numTests - numAbsErrors) / float(numTests)
s1 = 100*float(numTests - numWinnerFailures) / float(numTests)
s2 = 100*float(numTests - numInferenceFailures) / float(numTests)
s3 = 100*float(numTests - numDistFailures) / float(numTests)
LOGGER.info('PCA/k-NN success rate=%s%s', s0, '%')
LOGGER.info('Winner success=%s%s', s1, '%')
LOGGER.info('Inference success=%s%s', s2, '%')
LOGGER.info('Distance success=%s%s', s3, '%')
self.assertEqual(s1, 100.0,
"PCA/k-NN test failed")
def testKNNClassifierShort(self):
self.runTestKNNClassifier(0)
def testPCAKNNShort(self):
self.runTestPCAKNN(0)
def testKNNClassifierMedium(self):
self.runTestKNNClassifier(1)
def testPCAKNNMedium(self):
self.runTestPCAKNN(1)
def simulateKMoreThanOne():
"""A small test with k=3"""
failures = ""
LOGGER.info("Testing the sparse KNN Classifier with k=3")
knn = KNNClassifier(k=3)
v = numpy.zeros((6, 2))
v[0] = [1.0, 0.0]
v[1] = [1.0, 0.2]
v[2] = [1.0, 0.2]
v[3] = [1.0, 2.0]
v[4] = [1.0, 4.0]
v[5] = [1.0, 4.5]
knn.learn(v[0], 0)
knn.learn(v[1], 0)
knn.learn(v[2], 0)
knn.learn(v[3], 1)
knn.learn(v[4], 1)
knn.learn(v[5], 1)
winner, _inferenceResult, _dist, _categoryDist = knn.infer(v[0])
if winner != 0:
failures += "Inference failed with k=3\n"
winner, _inferenceResult, _dist, _categoryDist = knn.infer(v[2])
if winner != 0:
failures += "Inference failed with k=3\n"
winner, _inferenceResult, _dist, _categoryDist = knn.infer(v[3])
if winner != 0:
failures += "Inference failed with k=3\n"
winner, _inferenceResult, _dist, _categoryDist = knn.infer(v[5])
if winner != 1:
failures += "Inference failed with k=3\n"
if len(failures) == 0:
LOGGER.info("Tests passed.")
return failures
def simulateClassifier(knn, patternDict, testName, testDict=None):
"""Train this classifier instance with the given patterns."""
failures = ""
numPatterns = len(patternDict)
LOGGER.info("Training the classifier")
tick = time.time()
for i in patternDict.keys():
knn.learn(patternDict[i]['pattern'], patternDict[i]['category'])
tock = time.time()
LOGGER.info("Time Elapsed %s", tock-tick)
knnString = cPickle.dumps(knn)
LOGGER.info("Size of the classifier is %s", len(knnString))
# Run the classifier to infer categories on either the training data, or the
# test data (of it's provided).
error_count = 0
tick = time.time()
if testDict:
LOGGER.info("Testing the classifier on the test set")
for i in testDict.keys():
winner, _inferenceResult, _dist, _categoryDist \
= knn.infer(testDict[i]['pattern'])
if winner != testDict[i]['category']:
error_count += 1
else:
LOGGER.info("Testing the classifier on the training set")
LOGGER.info("Number of patterns: %s", len(patternDict))
for i in patternDict.keys():
LOGGER.info("Testing %s - %s %s", i, patternDict[i]['category'], \
len(patternDict[i]['pattern']))
winner, _inferenceResult, _dist, _categoryDist \
= knn.infer(patternDict[i]['pattern'])
if winner != patternDict[i]['category']:
error_count += 1
tock = time.time()
LOGGER.info("Time Elapsed %s", tock-tick)
error_rate = float(error_count) / numPatterns
LOGGER.info("Error rate is %s", error_rate)
if error_rate == 0:
LOGGER.info(testName + " passed")
else:
LOGGER.info(testName + " failed")
failures += testName + " failed\n"
return failures
def getNumTestPatterns(short=0):
"""Return the number of patterns and classes the test should use."""
if short==0:
LOGGER.info("Running short tests")
numPatterns = numpy.random.randint(300, 600)
numClasses = numpy.random.randint(50, 150)
elif short==1:
LOGGER.info("\nRunning medium tests")
numPatterns = numpy.random.randint(500, 1500)
numClasses = numpy.random.randint(50, 150)
else:
LOGGER.info("\nRunning long tests")
numPatterns = numpy.random.randint(500, 3000)
numClasses = numpy.random.randint(30, 1000)
LOGGER.info("number of patterns is %s", numPatterns)
LOGGER.info("number of classes is %s", numClasses)
return numPatterns, numClasses
if __name__ == "__main__":
unittest.main()
| 10,198 | Python | .py | 253 | 34.936759 | 80 | 0.668052 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,169 | hotgym_regression_test.py | numenta_nupic-legacy/tests/integration/nupic/opf/hotgym_regression_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Regression test that checks for differences in hotgym results.
If the prediction results differ then the test fails and must be
explicitly updated to match the new results.
"""
import collections
import csv
import os
import shutil
import unittest
from nupic.frameworks.opf import experiment_runner
class HotgymRegressionTest(unittest.TestCase):
"""Hotgym regression test to validate that predictions don't change."""
def testHotgymRegression(self):
experimentDir = os.path.join(
os.path.dirname(__file__).partition(
os.path.normpath("tests/integration/nupic/opf"))[0],
"examples", "opf", "experiments", "multistep", "hotgym")
resultsDir = os.path.join(experimentDir, "inference")
savedModelsDir = os.path.join(experimentDir, "savedmodels")
try:
_model = experiment_runner.runExperiment([experimentDir])
resultsPath = os.path.join(
resultsDir, "DefaultTask.TemporalMultiStep.predictionLog.csv")
with open(resultsPath) as f:
reader = csv.reader(f)
headers = reader.next()
self.assertEqual(headers[14],
"multiStepBestPredictions:multiStep:errorMetric='aae':"
"steps=1:window=1000:field=consumption")
lastRow = collections.deque(reader, 1)[0]
# Changes that affect prediction results will cause this test to fail. If
# the change is understood and reviewers agree that there has not been a
# regression then this value can be updated to reflect the new result.
self.assertAlmostEqual(float(lastRow[14]), 5.85504058885)
finally:
shutil.rmtree(resultsDir, ignore_errors=True)
shutil.rmtree(savedModelsDir, ignore_errors=True)
if __name__ == "__main__":
unittest.main()
| 2,755 | Python | .py | 59 | 42.135593 | 80 | 0.697388 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,170 | prediction_metrics_manager_test.py | numenta_nupic-legacy/tests/integration/nupic/opf/prediction_metrics_manager_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""This file invokes prediction_metrics_manager.py tests
TODO: Move these tests to unit test format.
"""
from nupic.frameworks.opf.prediction_metrics_manager import (
test as predictionMetricsManagerTest)
if __name__ == "__main__":
predictionMetricsManagerTest()
| 1,251 | Python | .py | 27 | 44.888889 | 72 | 0.689655 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,171 | expgenerator_test.py | numenta_nupic-legacy/tests/integration/nupic/opf/expgenerator_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
import imp
import json
import logging
from optparse import OptionParser
import os
import pprint
import shutil
import string
import subprocess
import sys
from pkg_resources import resource_filename
import unittest2 as unittest
from nupic.database.client_jobs_dao import ClientJobsDAO
from nupic.support import aggregationDivide
from nupic.support.unittesthelpers.testcasebase import (
TestCaseBase as HelperTestCaseBase)
from nupic.swarming import hypersearch_worker
from nupic.swarming.permutation_helpers import PermuteChoices
from nupic.swarming.utils import generatePersistentJobGUID, rCopy
from nupic.frameworks.opf.exp_description_api import OpfEnvironment
from nupic.swarming.exp_generator import experiment_generator
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
LOGGER = logging.getLogger(__name__)
HOTGYM_INPUT = "extra/hotgym/hotgym.csv"
g_debug = False
# Our __main__ entry block sets this to an instance of MyTestEnvironment()
g_myEnv = None
class MyTestEnvironment(object):
def __init__(self, options):
# Save all command line options
self.options = options
# Build installation root (e.g., ~/nupic/current)
installRootDir = os.path.abspath(options.installDir)
if not os.path.exists(installRootDir):
raise RuntimeError("install directory %s doesn't exist" % \
(options.installDir))
_debugOut("installRootDir=<%s>" % (installRootDir,))
# Where this script is running from (autotest expgenerator_test.py may have
# copied it from its original location)
self.testRunDir = os.path.dirname(os.path.abspath(__file__))
_debugOut("self.testRunDir=<%s>" % (self.testRunDir,))
# Where to place generated files
self.testOutDir = os.path.join(self.testRunDir, 'expGeneratorOut')
shutil.rmtree(self.testOutDir, ignore_errors=True)
os.makedirs(self.testOutDir)
LOGGER.info("Generating experiment description files in: %s", \
os.path.abspath(self.testOutDir))
def cleanUp(self):
shutil.rmtree(self.testOutDir, ignore_errors=True)
return
class ExperimentTestBaseClass(HelperTestCaseBase):
# We will load the description.py and permutations.py files as modules
# multiple times in order to verify that they are valid python scripts. To
# facilitate this, we reload with a unique module name
# ("expGenerator_generated_script%d") each time.
__pythonScriptImportCount = 0
@classmethod
def newScriptImportName(cls):
cls.__pythonScriptImportCount += 1
name = "expGenerator_generated_script%d" % cls.__pythonScriptImportCount
return name
def setUp(self):
""" Method called to prepare the test fixture. This is called by the
unittest framework immediately before calling the test method; any exception
raised by this method will be considered an error rather than a test
failure. The default implementation does nothing.
"""
global g_myEnv
if not g_myEnv:
# Setup environment
params = type('obj', (object,), {'installDir' : resource_filename("nupic", "")})
g_myEnv = MyTestEnvironment(params)
def tearDown(self):
""" Method called immediately after the test method has been called and the
result recorded. This is called even if the test method raised an exception,
so the implementation in subclasses may need to be particularly careful
about checking internal state. Any exception raised by this method will be
considered an error rather than a test failure. This method will only be
called if the setUp() succeeds, regardless of the outcome of the test
method. The default implementation does nothing.
"""
self.resetExtraLogItems()
g_myEnv.cleanUp()
def shortDescription(self):
""" Override to force unittest framework to use test method names instead
of docstrings in the report.
"""
return None
def checkPythonScript(self, scriptAbsPath):
self.assertTrue(os.path.isabs(scriptAbsPath))
self.assertTrue(os.path.isfile(scriptAbsPath),
("Expected python script to be present here: <%s>") % \
(scriptAbsPath))
# Test viability of the file as a python script by loading it
# An exception will be raised if this fails
mod = imp.load_source(self.newScriptImportName(), scriptAbsPath)
return mod
def getModules(self, expDesc, hsVersion='v2'):
""" This does the following:
1.) Calls ExpGenerator to generate a base description file and permutations
file from expDescription.
2.) Verifies that description.py and permutations.py are valid python
modules that can be loaded
3.) Returns the loaded base description module and permutations module
Parameters:
-------------------------------------------------------------------
expDesc: JSON format experiment description
hsVersion: which version of hypersearch to use ('v2'; 'v1' was dropped)
retval: (baseModule, permutationsModule)
"""
#------------------------------------------------------------------
# Call ExpGenerator to generate the base description and permutations
# files.
shutil.rmtree(g_myEnv.testOutDir, ignore_errors=True)
args = [
"--description=%s" % (json.dumps(expDesc)),
"--outDir=%s" % (g_myEnv.testOutDir),
"--version=%s" % (hsVersion)
]
self.addExtraLogItem({'args':args})
experiment_generator.expGenerator(args)
#----------------------------------------
# Check that generated scripts are present
descriptionPyPath = os.path.join(g_myEnv.testOutDir, "description.py")
permutationsPyPath = os.path.join(g_myEnv.testOutDir, "permutations.py")
return (self.checkPythonScript(descriptionPyPath),
self.checkPythonScript(permutationsPyPath))
def runBaseDescriptionAndPermutations(self, expDesc, hsVersion, maxModels=2):
""" This does the following:
1.) Calls ExpGenerator to generate a base description file and permutations
file from expDescription.
2.) Verifies that description.py and permutations.py are valid python
modules that can be loaded
3.) Runs the base description.py as an experiment using OPF RunExperiment.
4.) Runs a Hypersearch using the generated permutations.py by passing it
to HypersearchWorker.
Parameters:
-------------------------------------------------------------------
expDesc: JSON format experiment description
hsVersion: which version of hypersearch to use ('v2'; 'v1' was dropped)
retval: list of model results
"""
# --------------------------------------------------------------------
# Generate the description.py and permutations.py. These get generated
# in the g_myEnv.testOutDir directory.
self.getModules(expDesc, hsVersion=hsVersion)
permutationsPyPath = os.path.join(g_myEnv.testOutDir, "permutations.py")
# ----------------------------------------------------------------
# Try running the base experiment
args = [g_myEnv.testOutDir]
from nupic.frameworks.opf.experiment_runner import runExperiment
LOGGER.info("")
LOGGER.info("============================================================")
LOGGER.info("RUNNING EXPERIMENT")
LOGGER.info("============================================================")
runExperiment(args)
# ----------------------------------------------------------------
# Try running the generated permutations
jobParams = {'persistentJobGUID' : generatePersistentJobGUID(),
'permutationsPyFilename': permutationsPyPath,
'hsVersion': hsVersion,
}
if maxModels is not None:
jobParams['maxModels'] = maxModels
args = ['ignoreThis', '--params=%s' % (json.dumps(jobParams))]
self.resetExtraLogItems()
self.addExtraLogItem({'params':jobParams})
LOGGER.info("")
LOGGER.info("============================================================")
LOGGER.info("RUNNING PERMUTATIONS")
LOGGER.info("============================================================")
jobID = hypersearch_worker.main(args)
# Make sure all models completed successfully
cjDAO = ClientJobsDAO.get()
models = cjDAO.modelsGetUpdateCounters(jobID)
modelIDs = [model.modelId for model in models]
results = cjDAO.modelsGetResultAndStatus(modelIDs)
if maxModels is not None:
self.assertEqual(len(results), maxModels, "Expected to get %d model "
"results but only got %d" % (maxModels, len(results)))
for result in results:
self.assertEqual(result.completionReason, cjDAO.CMPL_REASON_EOF,
"Model did not complete successfully:\n%s" % (result.completionMsg))
return results
def assertIsInt(self, x, msg=None):
xInt = int(round(x))
if msg is None:
msg = "%s is not a valid integer" % (str(x))
self.assertLess(abs(x - xInt), 0.0001 * x, msg)
def assertValidSwarmingAggregations(self, expDesc, expectedAttempts):
""" Test that the set of aggregations produced for a swarm are correct
Parameters:
-----------------------------------------------------------------------
expDesc: JSON experiment description
expectedAttempts: list of (minAggregationMultiple, predictionSteps) pairs
that we expect to find in the aggregation choices.
"""
# Extract out the minAggregation
minAggregation = dict(expDesc['streamDef']['aggregation'])
minAggregation.pop('fields')
# --------------------------------------------------------------------
(base, perms) = self.getModules(expDesc)
predictionSteps = expDesc['inferenceArgs']['predictionSteps'][0]
# Make sure we have the expected info in the base description file
self.assertEqual(base.control['inferenceArgs']['predictionSteps'],
expDesc['inferenceArgs']['predictionSteps'])
#self.assertEqual(base.config['modelParams']['clParams']['steps'],
# '%s' % (predictionSteps))
tmpAggregationInfo = rCopy(
base.config['aggregationInfo'],
lambda value, _: value)
tmpAggregationInfo.pop('fields')
self.assertDictEqual(tmpAggregationInfo, minAggregation)
predictAheadTime = dict(minAggregation)
for key in predictAheadTime.iterkeys():
predictAheadTime[key] *= predictionSteps
self.assertEqual(base.config['predictAheadTime'],
predictAheadTime)
# And in the permutations file
self.assertEqual(
perms.minimize,
("multiStepBestPredictions:multiStep:errorMetric='altMAPE':"
"steps=\\[.*\\]:window=1000:field=consumption"))
# Make sure the right metrics were put in
metrics = base.control['metrics']
metricTuples = [(metric.metric, metric.inferenceElement, metric.params) \
for metric in metrics]
self.assertIn(('multiStep',
'multiStepBestPredictions',
{'window': 1000, 'steps': [predictionSteps],
'errorMetric': 'altMAPE'}),
metricTuples)
# ------------------------------------------------------------------------
# Get the aggregation periods to permute over, and make sure each is
# valid
aggPeriods = perms.permutations['aggregationInfo']
aggAttempts = []
for agg in aggPeriods.choices:
# Make sure it's an integer multiple of minAggregation
multipleOfMinAgg = aggregationDivide(agg, minAggregation)
self.assertIsInt(multipleOfMinAgg,
"invalid aggregation period %s is not an integer multiple" \
"of minAggregation (%s)" % (agg, minAggregation))
self.assertGreaterEqual(int(round(multipleOfMinAgg)), 1,
"invalid aggregation period %s is not >= minAggregation (%s)" % \
(agg, minAggregation))
# Make sure the predictAheadTime is an integer multiple of the aggregation
requiredSteps = aggregationDivide(predictAheadTime, agg)
self.assertIsInt(requiredSteps,
"invalid aggregation period %s is not an integer factor" \
"of predictAheadTime (%s)" % (agg, predictAheadTime))
self.assertGreaterEqual(int(round(requiredSteps)), 1,
"invalid aggregation period %s greater than " \
" predictAheadTime (%s)" % (agg, predictAheadTime))
# Make sure that computeInterval is an integer multiple of the aggregation
quotient = aggregationDivide(expDesc['computeInterval'], agg)
self.assertIsInt(quotient,
"invalid aggregation period %s is not an integer factor" \
"of computeInterval (%s)" % (agg, expDesc['computeInterval']))
self.assertGreaterEqual(int(round(quotient)), 1,
"Invalid aggregation period %s is greater than the computeInterval " \
"%s" % (agg, expDesc['computeInterval']))
aggAttempts.append((int(round(multipleOfMinAgg)), int(requiredSteps)))
# Print summary of aggregation attempts
LOGGER.info("This swarm will try the following \
(minAggregationMultiple, predictionSteps) combinations: %s", aggAttempts)
# ----------------------------------------------------------------------
# Were these the expected attempts?
aggAttempts.sort()
expectedAttempts.sort()
self.assertEqual(aggAttempts, expectedAttempts, "Expected this swarm to " \
"try the following (minAggMultiple, predictionSteps) " \
"attempts: %s, but instead it is going to try: %s" % \
(expectedAttempts, aggAttempts))
class PositiveExperimentTests(ExperimentTestBaseClass):
def test_ShowSchema(self):
""" Test showing the schema
"""
args = [
"--showSchema"
]
self.addExtraLogItem({'args':args})
#----------------------------------------
# Run it
experiment_generator.expGenerator(args)
return
def test_PredictionElement(self):
""" Test correct behavior in response to different settings in the
prediction element
"""
# Form the stream definition
streamDef = dict(
version = 1,
info = "test_NoProviders",
streams = [
dict(source="file://%s" % HOTGYM_INPUT,
info="hotGym.csv",
columns=["*"]),
],
)
# Generate the experiment description
expDesc = {
"inferenceType":"MultiStep",
"inferenceArgs":{
"predictedField":"consumption",
"predictionSteps": [1]
},
'environment':OpfEnvironment.Experiment,
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "timestamp",
"fieldType": "datetime"
},
{ "fieldName": "consumption",
"fieldType": "float",
"minValue": 0,
"maxValue": 200,
},
],
"resetPeriod": {"days" : 1, "hours" : 12},
"iterationCount": 10,
}
# --------------------------------------------------------------------
# Test it out with no prediction element
(_base, perms) = self.getModules(expDesc)
# Make sure we have the right optimization designation
self.assertEqual(perms.minimize,
("multiStepBestPredictions:multiStep:errorMetric='altMAPE':"
"steps=\\[1\\]:window=%d:field=consumption")
% experiment_generator.METRIC_WINDOW,
msg="got: %s" % perms.minimize)
# Should not have any classifier info to permute over
self.assertNotIn('clAlpha', perms.permutations)
return
def assertMetric(self, base, perm, predictedField,
optimizeMetric, nupicScore,
movingBaseline,
oneGram,
trivialMetric,
legacyMetric=None):
print "base.control"
pprint.pprint(base.control)
#taskMetrics = base.control['tasks'][0]['taskControl']['metrics']
taskMetrics = base.control['metrics']
for metricSpec in taskMetrics:
print metricSpec.metric
self.assertTrue(metricSpec.metric in ["multiStep", optimizeMetric,
movingBaseline, oneGram,
nupicScore, trivialMetric,
legacyMetric],
"Unrecognized Metric type: %s"% metricSpec.metric)
if metricSpec.metric == trivialMetric:
self.assertEqual(metricSpec.metric, trivialMetric)
self.assertEqual(metricSpec.inferenceElement,
InferenceElement.prediction)
elif metricSpec.metric == movingBaseline:
self.assertTrue("errorMetric" in metricSpec.params)
elif metricSpec.metric == oneGram:
self.assertTrue("errorMetric" in metricSpec.params)
elif metricSpec.metric == "multiStep":
pass
else:
self.assertEqual(metricSpec.metric, optimizeMetric)
#optimizeString = "prediction:%s:window=%d:field=%s" % \
# (optimizeMetric, ExpGenerator.METRIC_WINDOW,
# predictedField)
optimizeString = ("multiStepBestPredictions:multiStep:"
"errorMetric='%s':steps=\[1\]"
":window=%d:field=%s" % \
(optimizeMetric, experiment_generator.METRIC_WINDOW,
predictedField))
print "perm.minimize=",perm.minimize
print "optimizeString=",optimizeString
self.assertEqual(perm.minimize, optimizeString,
msg="got: %s" % perm.minimize)
def test_Metrics(self):
""" Test to make sure that the correct metrics are generated """
# =========================================================================
# Test category predicted field
# =========================================================================
streamDef = dict(
version = 1,
info = "test_category_predicted_field",
streams = [
# It doesn't matter if this stream source points to a real place or not.
dict(source="file://dummy",
info="dummy.csv",
columns=["*"]),
],
)
# Generate the experiment description
expDesc = {
"inferenceType":"MultiStep",
"inferenceArgs":{
"predictedField":"playType",
"predictionSteps": [1]
},
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "timestamp",
"fieldType": "datetime"
},
{ "fieldName": "address",
"fieldType": "string"
},
{ "fieldName": "ydsToGo",
"fieldType": "float",
},
{ "fieldName": "playType",
"fieldType": "string",
},
],
}
# Make sure we have the right metric type
# (avg_err for categories, aae for scalars)
(base, perms) = self.getModules(expDesc)
self.assertMetric(base, perms, expDesc['inferenceArgs']['predictedField'],
'avg_err',
'moving_mode',
'one_gram',
InferenceElement.prediction,
"trivial")
self.assertEqual(base.control['loggedMetrics'][0], ".*")
# =========================================================================
# Test scalar predicted field
# =========================================================================
expDesc['inferenceArgs']['predictedField'] = 'ydsToGo'
(base, perms) = self.getModules(expDesc)
self.assertMetric(base, perms, expDesc['inferenceArgs']['predictedField'],
'altMAPE',"moving_mean","one_gram",
InferenceElement.encodings, "trivial")
self.assertEqual(base.control['loggedMetrics'][0], ".*")
def test_IncludedFields(self):
""" Test correct behavior in response to different settings in the
includedFields element
"""
# Form the stream definition
streamDef = dict(
version = 1,
info = "test_NoProviders",
streams = [
dict(source="file://%s" % HOTGYM_INPUT,
info="hotGym.csv",
columns=["*"]),
],
)
# Generate the experiment description
expDesc = {
"inferenceType":"TemporalNextStep",
"inferenceArgs":{
"predictedField":"consumption"
},
'environment':OpfEnvironment.Experiment,
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "gym",
"fieldType": "string"
},
{ "fieldName": "address",
"fieldType": "string"
},
{ "fieldName": "timestamp",
"fieldType": "datetime"
},
{ "fieldName": "consumption",
"fieldType": "float",
},
],
"resetPeriod": {"days" : 1, "hours" : 12},
"iterationCount": 10,
}
# --------------------------------------------------------------------
# Test it out with all fields
(base, _perms) = self.getModules(expDesc)
# Make sure we have the expected encoders
actEncoderFields = set()
actEncoderNames = set()
for _, encoder in (
base.config['modelParams']['sensorParams']['encoders'].iteritems()):
actEncoderFields.add(encoder['fieldname'])
actEncoderNames.add(encoder['name'])
# Make sure we have the right optimization designation
self.assertEqual(actEncoderFields, set(['gym', 'address', 'timestamp',
'consumption']))
self.assertEqual(actEncoderNames, set(['gym', 'address',
'timestamp_timeOfDay', 'timestamp_dayOfWeek', 'timestamp_weekend',
'consumption']))
# --------------------------------------------------------------------
# Test with a subset of fields
expDesc['includedFields'] = [
{ "fieldName": "gym",
"fieldType": "string"
},
{ "fieldName": "consumption",
"fieldType": "float",
},
]
(base, _perms) = self.getModules(expDesc)
# Make sure we have the expected encoders
actEncoderFields = set()
actEncoderNames = set()
for _, encoder in (
base.config['modelParams']['sensorParams']['encoders'].iteritems()):
actEncoderFields.add(encoder['fieldname'])
actEncoderNames.add(encoder['name'])
# Make sure we have the right optimization designation
self.assertEqual(actEncoderFields, set(['gym', 'consumption']))
self.assertEqual(actEncoderNames, set(['gym', 'consumption']))
# --------------------------------------------------------------------
# Test that min and max are honored
expDesc['includedFields'] = [
{ "fieldName": "consumption",
"fieldType": "float",
"minValue" : 42,
"maxValue" : 42.42,
},
]
(base, _perms) = self.getModules(expDesc)
# Make sure we have the expected encoders
actEncoderFields = set()
actEncoderNames = set()
actEncoderTypes = set()
minValues = set()
maxValues = set()
for _, encoder in (
base.config['modelParams']['sensorParams']['encoders'].iteritems()):
actEncoderFields.add(encoder['fieldname'])
actEncoderNames.add(encoder['name'])
actEncoderTypes.add(encoder['type'])
minValues.add(encoder['minval'])
maxValues.add(encoder['maxval'])
# Make sure we have the right optimization designation
self.assertEqual(actEncoderFields, set(['consumption']))
self.assertEqual(actEncoderNames, set(['consumption']))
# Because both min and max were specifed,
# the encoder should be non-adaptive
self.assertEqual(actEncoderTypes, set(['ScalarEncoder']))
self.assertEqual(minValues, set([42]))
self.assertEqual(maxValues, set([42.42]))
# --------------------------------------------------------------------
# Test that overriding the encoderType is supported
expDesc['includedFields'] = [
{ "fieldName": "consumption",
"fieldType": "float",
"minValue" : 42,
"maxValue" : 42.42,
"encoderType": 'AdaptiveScalarEncoder',
},
]
(base, _perms) = self.getModules(expDesc)
# Make sure we have the expected encoders
actEncoderFields = set()
actEncoderNames = set()
actEncoderTypes = set()
minValues = set()
maxValues = set()
for _, encoder in (
base.config['modelParams']['sensorParams']['encoders'].iteritems()):
actEncoderFields.add(encoder['fieldname'])
actEncoderNames.add(encoder['name'])
actEncoderTypes.add(encoder['type'])
minValues.add(encoder['minval'])
maxValues.add(encoder['maxval'])
# Make sure we have the right optimization designation
self.assertEqual(actEncoderFields, set(['consumption']))
self.assertEqual(actEncoderNames, set(['consumption']))
self.assertEqual(actEncoderTypes, set(['AdaptiveScalarEncoder']))
self.assertEqual(minValues, set([42]))
self.assertEqual(maxValues, set([42.42]))
# --------------------------------------------------------------------
# Test that fieldnames with funny characters (-?<>!@##'"\=...) are
# generated properly. Should throw exception for \ character
characters = string.punctuation
expDesc['includedFields'] = [{'fieldName':char+'helloField'+char,
"fieldType":"float"}
for char in characters]\
+[{'fieldName':'consumption',
'fieldType':'float'}]
try:
(base, _perms) = self.getModules(expDesc)
except:
LOGGER.info("Passed: Threw exception for bad fieldname.")
# --------------------------------------------------------------------
## Now test without backslash
characters = characters.replace('\\','')
#expDesc['includedFields'] = [{'fieldName':char+'helloField'+char,
# "fieldType":"float"}
# for char in characters]\
# +[{'fieldName':'consumption',
# 'fieldType':'float'}]
#(base, perms) = self.getModules(expDesc)
return
def test_Aggregation(self):
""" Test that aggregation gets pulled out of the streamDef as it should
"""
# Form the stream definition
streamDef = dict(
version = 1,
info = "TestAggregation",
streams = [
dict(source="file://%s" % HOTGYM_INPUT,
info="hotGym.csv",
columns=["*"]),
],
aggregation = {
'years': 1,
'months': 2,
'weeks': 3,
'days': 4,
'hours': 5,
'minutes': 6,
'seconds': 7,
'milliseconds': 8,
'microseconds': 9,
'fields': [('consumption', 'sum'),
('gym', 'first')]
},
sequenceIdField = 'gym',
providers = {
"order": ["weather"],
"weather":{
"locationField": "address",
"providerType": "NamedProvider",
"timestampField": "timestamp",
"weatherTypes":[
"TEMP"
]
}
}
)
# Generate the experiment description
expDesc = {
"inferenceType":"TemporalNextStep",
"inferenceArgs":{
"predictedField":"consumption"
},
'environment':OpfEnvironment.Experiment,
"streamDef":streamDef,
"includedFields": [
{ "fieldName": "gym",
"fieldType": "string"
},
{ "fieldName": "consumption",
"fieldType": "float",
},
{ "fieldName": "TEMP",
"fieldType": "float",
"minValue": -30.0,
"maxValue": 120.0,
},
],
"iterationCount": 10,
"resetPeriod": {"days" : 1, "hours" : 12},
}
# --------------------------------------------------------------------
# Test with aggregation
(base, _perms) = self.getModules(expDesc)
# Make sure we have the expected aggregation
aggInfo = base.config['aggregationInfo']
aggInfo['fields'].sort()
streamDef['aggregation']['fields'].sort()
self.assertEqual(aggInfo, streamDef['aggregation'])
# --------------------------------------------------------------------
# Test with no aggregation
expDesc['streamDef'].pop('aggregation')
(base, _perms) = self.getModules(expDesc)
# Make sure we have the expected aggregation
aggInfo = base.config['aggregationInfo']
expAggInfo = {
'years': 0,
'months': 0,
'weeks': 0,
'days': 0,
'hours': 0,
'minutes': 0,
'seconds': 0,
'milliseconds': 0,
'microseconds': 0,
'fields': []
}
aggInfo['fields'].sort()
expAggInfo['fields'].sort()
self.assertEqual(aggInfo, expAggInfo)
return
def test_ResetPeriod(self):
""" Test that reset period gets handled correctly
"""
# Form the stream definition
streamDef = dict(
version = 1,
info = "test_NoProviders",
streams = [
dict(source="file://%s" % HOTGYM_INPUT,
info="hotGym.csv",
columns=["*"]),
],
)
# Generate the experiment description
expDesc = {
"inferenceType":"TemporalNextStep",
"inferenceArgs":{
"predictedField":"consumption"
},
'environment':OpfEnvironment.Experiment,
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "gym",
"fieldType": "string"
},
{ "fieldName": "consumption",
"fieldType": "float",
},
],
"iterationCount": 10,
"resetPeriod": {
'weeks': 3,
'days': 4,
'hours': 5,
'minutes': 6,
'seconds': 7,
'milliseconds': 8,
'microseconds': 9,
},
}
# --------------------------------------------------------------------
# Test with reset period
(base, _perms) = self.getModules(expDesc)
# Make sure we have the expected reset info
resetInfo = base.config['modelParams']['sensorParams']['sensorAutoReset']
self.assertEqual(resetInfo, expDesc['resetPeriod'])
# --------------------------------------------------------------------
# Test no reset period
expDesc.pop('resetPeriod')
(base, _perms) = self.getModules(expDesc)
# Make sure we have the expected reset info
resetInfo = base.config['modelParams']['sensorParams']['sensorAutoReset']
self.assertEqual(resetInfo, None)
return
def test_RunningExperimentHSv2(self):
""" Try running a basic Hypersearch V2 experiment and permutations
"""
# Form the stream definition
streamDef = dict(
version = 1,
info = "test_NoProviders",
streams = [
dict(source="file://%s" % HOTGYM_INPUT,
info="hotGym.csv",
columns=["*"]),
],
)
# Generate the experiment description
expDesc = {
"inferenceType":"TemporalMultiStep",
"inferenceArgs":{
"predictedField":"consumption"
},
'environment':OpfEnvironment.Nupic,
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "timestamp",
"fieldType": "datetime"
},
{ "fieldName": "consumption",
"fieldType": "float",
"minValue": 0,
"maxValue": 200,
},
],
"resetPeriod": {"days" : 1, "hours" : 12},
"iterationCount": 10,
}
# Test it out
self.runBaseDescriptionAndPermutations(expDesc, hsVersion='v2')
return
def test_MultiStep(self):
""" Test the we correctly generate a multi-step prediction experiment
"""
# Form the stream definition
streamDef = dict(
version = 1,
info = "test_NoProviders",
streams = [
dict(source="file://%s" % HOTGYM_INPUT,
info="hotGym.csv",
columns=["*"],
last_record=20),
],
aggregation = {
'years': 0,
'months': 0,
'weeks': 0,
'days': 0,
'hours': 1,
'minutes': 0,
'seconds': 0,
'milliseconds': 0,
'microseconds': 0,
'fields': [('consumption', 'sum'),
('gym', 'first'),
('timestamp', 'first')]
}
)
# Generate the experiment description
expDesc = {
'environment': OpfEnvironment.Nupic,
"inferenceArgs":{
"predictedField":"consumption",
"predictionSteps": [1, 5],
},
"inferenceType": "MultiStep",
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "timestamp",
"fieldType": "datetime"
},
{ "fieldName": "consumption",
"fieldType": "float",
},
],
"iterationCount": -1,
"runBaselines": True,
}
# --------------------------------------------------------------------
(base, perms) = self.getModules(expDesc)
print "base.config['modelParams']:"
pprint.pprint(base.config['modelParams'])
print "perms.permutations"
pprint.pprint(perms.permutations)
print "perms.minimize"
pprint.pprint(perms.minimize)
print "expDesc"
pprint.pprint(expDesc)
# Make sure we have the expected info in the base description file
self.assertEqual(base.control['inferenceArgs']['predictionSteps'],
expDesc['inferenceArgs']['predictionSteps'])
self.assertEqual(base.control['inferenceArgs']['predictedField'],
expDesc['inferenceArgs']['predictedField'])
self.assertEqual(base.config['modelParams']['inferenceType'],
"TemporalMultiStep")
# Make sure there is a '_classifier_input' encoder with classifierOnly
# set to True
self.assertEqual(base.config['modelParams']['sensorParams']['encoders']
['_classifierInput']['classifierOnly'], True)
self.assertEqual(base.config['modelParams']['sensorParams']['encoders']
['_classifierInput']['fieldname'],
expDesc['inferenceArgs']['predictedField'])
# And in the permutations file
self.assertIn('inferenceType', perms.permutations['modelParams'])
self.assertEqual(perms.minimize,
"multiStepBestPredictions:multiStep:errorMetric='altMAPE':" \
+ "steps=\\[1, 5\\]:window=1000:field=consumption")
self.assertIn('alpha', perms.permutations['modelParams']['clParams'])
# Should permute over the _classifier_input encoder params
self.assertIn('_classifierInput',
perms.permutations['modelParams']['sensorParams']['encoders'])
# Should set inputPredictedField to "auto" (the default)
self.assertEqual(perms.inputPredictedField, "auto")
# Should have TM parameters being permuted
self.assertIn('activationThreshold',
perms.permutations['modelParams']['tmParams'])
self.assertIn('minThreshold', perms.permutations['modelParams']['tmParams'])
# Make sure the right metrics were put in
metrics = base.control['metrics']
metricTuples = [(metric.metric, metric.inferenceElement, metric.params) \
for metric in metrics]
self.assertIn(('multiStep',
'multiStepBestPredictions',
{'window': 1000, 'steps': [1, 5], 'errorMetric': 'aae'}),
metricTuples)
# Test running it
self.runBaseDescriptionAndPermutations(expDesc, hsVersion='v2')
# --------------------------------------
# If we put the 5 step first, we should still get a list of steps to
# optimize over
expDesc2 = copy.deepcopy(expDesc)
expDesc2['inferenceArgs']['predictionSteps'] = [5, 1]
(base, perms) = self.getModules(expDesc2)
self.assertEqual(perms.minimize,
"multiStepBestPredictions:multiStep:errorMetric='altMAPE':" \
+ "steps=\\[5, 1\\]:window=1000:field=consumption")
# --------------------------------------
# If we specify NonTemporal, we shouldn't permute over TM parameters
expDesc2 = copy.deepcopy(expDesc)
expDesc2['inferenceType'] = 'NontemporalMultiStep'
(base, perms) = self.getModules(expDesc2)
self.assertEqual(base.config['modelParams']['inferenceType'],
expDesc2['inferenceType'])
self.assertEqual(base.control['inferenceArgs']['predictionSteps'],
expDesc2['inferenceArgs']['predictionSteps'])
self.assertEqual(base.control['inferenceArgs']['predictedField'],
expDesc2['inferenceArgs']['predictedField'])
self.assertIn('alpha', perms.permutations['modelParams']['clParams'])
self.assertNotIn('inferenceType', perms.permutations['modelParams'])
self.assertNotIn('activationThreshold',
perms.permutations['modelParams']['tmParams'])
self.assertNotIn('minThreshold',
perms.permutations['modelParams']['tmParams'])
# Make sure the right metrics were put in
metrics = base.control['metrics']
metricTuples = [(metric.metric, metric.inferenceElement, metric.params) \
for metric in metrics]
self.assertIn(('multiStep',
'multiStepBestPredictions',
{'window': 1000, 'steps': [1, 5], 'errorMetric': 'aae'}),
metricTuples)
# Test running it
self.runBaseDescriptionAndPermutations(expDesc, hsVersion='v2')
# --------------------------------------
# If we specify just generic MultiStep, we should permute over the inference
# type
expDesc2 = copy.deepcopy(expDesc)
expDesc2['inferenceType'] = 'MultiStep'
(base, perms) = self.getModules(expDesc2)
self.assertEqual(base.config['modelParams']['inferenceType'],
'TemporalMultiStep')
self.assertEqual(base.control['inferenceArgs']['predictionSteps'],
expDesc2['inferenceArgs']['predictionSteps'])
self.assertEqual(base.control['inferenceArgs']['predictedField'],
expDesc2['inferenceArgs']['predictedField'])
self.assertIn('alpha', perms.permutations['modelParams']['clParams'])
self.assertIn('inferenceType', perms.permutations['modelParams'])
self.assertIn('activationThreshold',
perms.permutations['modelParams']['tmParams'])
self.assertIn('minThreshold', perms.permutations['modelParams']['tmParams'])
# Make sure the right metrics were put in
metrics = base.control['metrics']
metricTuples = [(metric.metric, metric.inferenceElement, metric.params) \
for metric in metrics]
self.assertIn(('multiStep',
'multiStepBestPredictions',
{'window': 1000, 'steps': [1,5], 'errorMetric': 'aae'}),
metricTuples)
# Test running it
self.runBaseDescriptionAndPermutations(expDesc, hsVersion='v2')
# ---------------------------------------------------------------------
# If the caller sets inferenceArgs.inputPredictedField, make
# sure the permutations file has the same setting
expDesc2 = copy.deepcopy(expDesc)
expDesc2["inferenceArgs"]["inputPredictedField"] = "yes"
(base, perms) = self.getModules(expDesc2)
self.assertEqual(perms.inputPredictedField, "yes")
expDesc2 = copy.deepcopy(expDesc)
expDesc2["inferenceArgs"]["inputPredictedField"] = "no"
(base, perms) = self.getModules(expDesc2)
self.assertEqual(perms.inputPredictedField, "no")
expDesc2 = copy.deepcopy(expDesc)
expDesc2["inferenceArgs"]["inputPredictedField"] = "auto"
(base, perms) = self.getModules(expDesc2)
self.assertEqual(perms.inputPredictedField, "auto")
# ---------------------------------------------------------------------
# If the caller sets inferenceArgs.inputPredictedField to 'no', make
# sure there is no encoder for the predicted field
expDesc2 = copy.deepcopy(expDesc)
expDesc2["inferenceArgs"]["inputPredictedField"] = "no"
(base, perms) = self.getModules(expDesc2)
self.assertNotIn(
'consumption',
base.config['modelParams']['sensorParams']['encoders'].keys())
def test_DeltaEncoders(self):
streamDef = dict(
version = 1,
info = "test_NoProviders",
streams = [
dict(source="file://%s" % HOTGYM_INPUT,
info="hotGym.csv",
columns=["*"]),
],
)
# Generate the experiment description
expDesc = {
"inferenceType":"TemporalMultiStep",
"inferenceArgs":{
"predictedField":"consumption"
},
'environment':OpfEnvironment.Nupic,
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "timestamp",
"fieldType": "datetime"
},
{ "fieldName": "consumption",
"fieldType": "float",
"minValue": 0,
"maxValue": 200,
"runDelta": True
},
],
}
(base, perms) = self.getModules(expDesc)
encoder = base.config["modelParams"]["sensorParams"]["encoders"]\
["consumption"]
encoderPerm = perms.permutations["modelParams"]["sensorParams"]\
["encoders"]["consumption"]
self.assertEqual(encoder["type"], "ScalarSpaceEncoder")
self.assertIsInstance(encoderPerm.kwArgs['space'], PermuteChoices)
expDesc = {
"inferenceType":"TemporalMultiStep",
"inferenceArgs":{
"predictedField":"consumption"
},
'environment':OpfEnvironment.Nupic,
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "timestamp",
"fieldType": "datetime"
},
{ "fieldName": "consumption",
"fieldType": "float",
"minValue": 0,
"maxValue": 200,
"runDelta": True,
"space": "delta"
},
],
}
(base, perms) = self.getModules(expDesc)
encoder = base.config["modelParams"]["sensorParams"] \
["encoders"]["consumption"]
encoderPerm = perms.permutations["modelParams"]["sensorParams"] \
["encoders"]["consumption"]
self.assertEqual(encoder["type"], "ScalarSpaceEncoder")
self.assertEqual(encoder["space"], "delta")
self.assertEqual(encoderPerm.kwArgs['space'], "delta")
def test_AggregationSwarming(self):
""" Test the we correctly generate a multi-step prediction experiment that
uses aggregation swarming
"""
# The min aggregation
minAggregation = {
'years': 0,
'months': 0,
'weeks': 0,
'days': 0,
'hours': 0,
'minutes': 15,
'seconds': 0,
'milliseconds': 0,
'microseconds': 0,
}
streamAggregation = dict(minAggregation)
streamAggregation.update({
'fields': [('consumption', 'sum'),
('gym', 'first'),
('timestamp', 'first')]
})
# Form the stream definition
streamDef = dict(
version = 1,
info = "test_NoProviders",
streams = [
dict(source="file://%s" % HOTGYM_INPUT,
info="hotGym.csv",
columns=["*"],
last_record=10),
],
aggregation = streamAggregation,
)
# Generate the experiment description
expDesc = {
'environment': OpfEnvironment.Nupic,
"inferenceArgs":{
"predictedField":"consumption",
"predictionSteps": [24],
},
"inferenceType": "TemporalMultiStep",
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "timestamp",
"fieldType": "datetime"
},
{ "fieldName": "consumption",
"fieldType": "float",
},
],
"iterationCount": -1,
"runBaselines": False,
"computeInterval": {
'hours': 2
}
}
# ------------------------------------------------------------------------
# Test running it
#self.runBaseDescriptionAndPermutations(expDesc, hsVersion='v2')
# --------------------------------------------------------------------
# Check for consistency. (example 1)
# The expectedAttempts parameter is a list of
# (minAggregationMultiple, predictionSteps) pairs that will be attempted
self.assertValidSwarmingAggregations(expDesc = expDesc,
expectedAttempts = [(1, 24), (2, 12), (4, 6), (8, 3)])
# --------------------------------------------------------------------
# Try where there are lots of possible aggregations that we only try
# the last 5
expDescTmp = copy.deepcopy(expDesc)
expDescTmp['streamDef']['aggregation']['minutes'] = 1
expDescTmp['inferenceArgs']['predictionSteps'] = \
[4*60/1] # 4 hours / 1 minute
self.assertValidSwarmingAggregations(expDesc = expDescTmp,
expectedAttempts = [(24, 10), (30, 8), (40, 6), (60, 4), (120, 2)])
# --------------------------------------------------------------------
# Make sure computeInterval is honored (example 2)
expDescTmp = copy.deepcopy(expDesc)
expDescTmp['computeInterval']['hours'] = 3
expDescTmp['inferenceArgs']['predictionSteps'] = [16] # 4 hours
self.assertValidSwarmingAggregations(expDesc = expDescTmp,
expectedAttempts = [(1,16), (2, 8), (4, 4)])
# --------------------------------------------------------------------
# Make sure computeInterval in combination with predictAheadTime is honored
expDescTmp = copy.deepcopy(expDesc)
expDescTmp['computeInterval']['hours'] = 2
expDescTmp['inferenceArgs']['predictionSteps'] = [16] # 4 hours
self.assertValidSwarmingAggregations(expDesc = expDescTmp,
expectedAttempts = [(1,16), (2, 8), (4, 4), (8, 2)])
# --------------------------------------------------------------------
# Make sure we catch bad cases:
# computeInterval must be >= minAggregation
expDescTmp = copy.deepcopy(expDesc)
expDescTmp['computeInterval']['hours'] = 0
expDescTmp['computeInterval']['minutes'] = 1
with self.assertRaises(Exception) as cm:
self.assertValidSwarmingAggregations(expDesc = expDescTmp,
expectedAttempts = [(1, 16), (2, 8), (4, 4), (8, 2)])
LOGGER.info("Got expected exception: %s", cm.exception)
# computeInterval must be an integer multiple of minAggregation
expDescTmp = copy.deepcopy(expDesc)
expDescTmp['computeInterval']['hours'] = 0
expDescTmp['computeInterval']['minutes'] = 25
with self.assertRaises(Exception) as cm:
self.assertValidSwarmingAggregations(expDesc = expDescTmp,
expectedAttempts = [(1, 16), (2, 8), (4, 4), (8, 2)])
LOGGER.info("Got expected exception: %s", cm.exception)
# More than 1 predictionSteps passed in
expDescTmp = copy.deepcopy(expDesc)
expDescTmp['inferenceArgs']['predictionSteps'] = [1, 16]
with self.assertRaises(Exception) as cm:
self.assertValidSwarmingAggregations(expDesc = expDescTmp,
expectedAttempts = [(1, 16), (2, 8), (4, 4), (8, 2)])
LOGGER.info("Got expected exception: %s", cm.exception)
# No stream aggregation
expDescTmp = copy.deepcopy(expDesc)
expDescTmp['streamDef']['aggregation']['minutes'] = 0
with self.assertRaises(Exception) as cm:
self.assertValidSwarmingAggregations(expDesc = expDescTmp,
expectedAttempts = [(1, 16), (2, 8), (4, 4), (8, 2)])
LOGGER.info("Got expected exception: %s", cm.exception)
def test_SwarmSize(self):
""" Test correct behavior in response to different settings in the
swarmSize element
"""
# Form the stream definition
streamDef = dict(
version = 1,
info = "test_NoProviders",
streams = [
dict(source="file://%s" % HOTGYM_INPUT,
info="hotGym.csv",
columns=["*"]),
],
)
# Generate the experiment description
expDesc = {
"swarmSize": "large",
"inferenceType":"TemporalNextStep",
"inferenceArgs":{
"predictedField":"consumption"
},
'environment':OpfEnvironment.Nupic,
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "timestamp",
"fieldType": "datetime"
},
{ "fieldName": "consumption",
"fieldType": "float",
"minValue": 0,
"maxValue": 200,
},
],
"resetPeriod": {"days" : 1, "hours" : 12},
}
# --------------------------------------------------------------------
# Test out "large" swarm generation
(base, perms) = self.getModules(expDesc)
self.assertEqual(base.control['iterationCount'], -1,
msg="got: %s" % base.control['iterationCount'])
self.assertEqual(perms.minParticlesPerSwarm, 15,
msg="got: %s" % perms.minParticlesPerSwarm)
# Temporarily disable new large swarm features
#self.assertEqual(perms.killUselessSwarms, False,
# msg="got: %s" % perms.killUselessSwarms)
#self.assertEqual(perms.minFieldContribution, -1000,
# msg="got: %s" % perms.minFieldContribution)
#self.assertEqual(perms.maxFieldBranching, 10,
# msg="got: %s" % perms.maxFieldBranching)
#self.assertEqual(perms.tryAll3FieldCombinations, True,
# msg="got: %s" % perms.tryAll3FieldCombinations)
self.assertEqual(perms.tryAll3FieldCombinationsWTimestamps, True,
msg="got: %s" % perms.tryAll3FieldCombinationsWTimestamps)
self.assertFalse(hasattr(perms, 'maxModels'))
# Should set inputPredictedField to "auto"
self.assertEqual(perms.inputPredictedField, "auto")
# --------------------------------------------------------------------
# Test it out with medium swarm
expDesc["swarmSize"] = "medium"
(base, perms) = self.getModules(expDesc)
self.assertEqual(base.control['iterationCount'], 4000,
msg="got: %s" % base.control['iterationCount'])
self.assertEqual(perms.minParticlesPerSwarm, 5,
msg="got: %s" % perms.minParticlesPerSwarm)
self.assertEqual(perms.maxModels, 200,
msg="got: %s" % perms.maxModels)
self.assertFalse(hasattr(perms, 'killUselessSwarms'))
self.assertFalse(hasattr(perms, 'minFieldContribution'))
self.assertFalse(hasattr(perms, 'maxFieldBranching'))
self.assertFalse(hasattr(perms, 'tryAll3FieldCombinations'))
# Should set inputPredictedField to "auto"
self.assertEqual(perms.inputPredictedField, "auto")
# --------------------------------------------------------------------
# Test it out with small swarm
expDesc["swarmSize"] = "small"
(base, perms) = self.getModules(expDesc)
self.assertEqual(base.control['iterationCount'], 100,
msg="got: %s" % base.control['iterationCount'])
self.assertEqual(perms.minParticlesPerSwarm, 3,
msg="got: %s" % perms.minParticlesPerSwarm)
self.assertEqual(perms.maxModels, 1,
msg="got: %s" % perms.maxModels)
self.assertFalse(hasattr(perms, 'killUselessSwarms'))
self.assertFalse(hasattr(perms, 'minFieldContribution'))
self.assertFalse(hasattr(perms, 'maxFieldBranching'))
self.assertFalse(hasattr(perms, 'tryAll3FieldCombinations'))
# Should set inputPredictedField to "yes"
self.assertEqual(perms.inputPredictedField, "yes")
# --------------------------------------------------------------------
# Test it out with all of swarmSize, minParticlesPerSwarm, iteration
# count, and inputPredictedField specified
expDesc["swarmSize"] = "small"
expDesc["minParticlesPerSwarm"] = 2
expDesc["iterationCount"] = 42
expDesc["inferenceArgs"]["inputPredictedField"] = "auto"
(base, perms) = self.getModules(expDesc)
self.assertEqual(base.control['iterationCount'], 42,
msg="got: %s" % base.control['iterationCount'])
self.assertEqual(perms.minParticlesPerSwarm, 2,
msg="got: %s" % perms.minParticlesPerSwarm)
self.assertEqual(perms.maxModels, 1,
msg="got: %s" % perms.maxModels)
self.assertFalse(hasattr(perms, 'killUselessSwarms'))
self.assertFalse(hasattr(perms, 'minFieldContribution'))
self.assertFalse(hasattr(perms, 'maxFieldBranching'))
self.assertFalse(hasattr(perms, 'tryAll3FieldCombinations'))
self.assertEqual(perms.inputPredictedField, "auto")
# Test running it
modelResults = self.runBaseDescriptionAndPermutations(
expDesc, hsVersion='v2', maxModels=None)
self.assertEqual(len(modelResults), 1, "Expected to get %d model "
"results but only got %d" % (1, len(modelResults)))
def test_FixedFields(self):
""" Test correct behavior in response to setting the fixedFields swarming
option.
"""
# Form the stream definition
streamDef = dict(
version = 1,
info = "test_NoProviders",
streams = [
dict(source="file://%s" % HOTGYM_INPUT,
info="hotGym.csv",
columns=["*"]),
],
)
# Generate the experiment description
expDesc = {
"swarmSize": "large",
"inferenceType":"TemporalNextStep",
"inferenceArgs":{
"predictedField":"consumption"
},
'environment':OpfEnvironment.Nupic,
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "timestamp",
"fieldType": "datetime"
},
{ "fieldName": "consumption",
"fieldType": "float",
"minValue": 0,
"maxValue": 200,
},
],
"resetPeriod": {"days" : 1, "hours" : 12},
"fixedFields": ['consumption', 'timestamp'],
}
# --------------------------------------------------------------------
# Test out using fieldFields
(_base, perms) = self.getModules(expDesc)
self.assertEqual(perms.fixedFields, ['consumption', 'timestamp'],
msg="got: %s" % perms.fixedFields)
# Should be excluded from permutations script if not part of the JSON
# description
expDesc.pop('fixedFields')
(_base, perms) = self.getModules(expDesc)
self.assertFalse(hasattr(perms, 'fixedFields'))
def test_FastSwarmModelParams(self):
""" Test correct behavior in response to setting the fastSwarmModelParams
swarming option.
"""
# Form the stream definition
streamDef = dict(
version = 1,
info = "test_NoProviders",
streams = [
dict(source="file://%s" % HOTGYM_INPUT,
info="hotGym.csv",
columns=["*"]),
],
)
fastSwarmModelParams = {'this is': 'a test'}
# Generate the experiment description
expDesc = {
"swarmSize": "large",
"inferenceType":"TemporalNextStep",
"inferenceArgs":{
"predictedField":"consumption"
},
'environment':OpfEnvironment.Nupic,
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "timestamp",
"fieldType": "datetime"
},
{ "fieldName": "consumption",
"fieldType": "float",
"minValue": 0,
"maxValue": 200,
},
],
"resetPeriod": {"days" : 1, "hours" : 12},
"fastSwarmModelParams": fastSwarmModelParams,
}
# --------------------------------------------------------------------
# Test out using fieldFields
(_base, perms) = self.getModules(expDesc)
self.assertEqual(perms.fastSwarmModelParams, fastSwarmModelParams,
msg="got: %s" % perms.fastSwarmModelParams)
# Should be excluded from permutations script if not part of the JSON
# description
expDesc.pop('fastSwarmModelParams')
(base, perms) = self.getModules(expDesc)
self.assertFalse(hasattr(perms, 'fastSwarmModelParams'))
def test_AnomalyParams(self):
""" Test correct behavior in response to setting the anomalyParams
experiment description options
"""
# Form the stream definition
streamDef = dict(
version = 1,
info = "test_NoProviders",
streams = [
dict(source="file://%s" % HOTGYM_INPUT,
info="hotGym.csv",
columns=["*"]),
],
)
# Generate the experiment description
expDesc = {
'environment': OpfEnvironment.Nupic,
"inferenceArgs":{
"predictedField":"consumption",
"predictionSteps": [1],
},
"inferenceType": "TemporalAnomaly",
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "timestamp",
"fieldType": "datetime"
},
{ "fieldName": "consumption",
"fieldType": "float",
},
],
"iterationCount": -1,
"anomalyParams": {
"autoDetectThreshold": 1.1,
"autoDetectWaitRecords": 0,
"anomalyCacheRecords": 10
}
}
# --------------------------------------------------------------------
(base, _perms) = self.getModules(expDesc)
# Make sure we have the expected info in the base description file
self.assertEqual(base.control['inferenceArgs']['predictionSteps'],
expDesc['inferenceArgs']['predictionSteps'])
self.assertEqual(base.control['inferenceArgs']['predictedField'],
expDesc['inferenceArgs']['predictedField'])
self.assertEqual(base.config['modelParams']['inferenceType'],
expDesc['inferenceType'])
self.assertEqual(base.config['modelParams']['anomalyParams'],
expDesc['anomalyParams'])
# Only TemporalAnomaly models will have and use anomalyParams
expDesc['inferenceType'] = 'TemporalNextStep'
(base, _perms) = self.getModules(expDesc)
# Make sure we have the expected info in the base description file
self.assertEqual(base.control['inferenceArgs']['predictionSteps'],
expDesc['inferenceArgs']['predictionSteps'])
self.assertEqual(base.control['inferenceArgs']['predictedField'],
expDesc['inferenceArgs']['predictedField'])
self.assertEqual(base.config['modelParams']['inferenceType'],
expDesc['inferenceType'])
self.assertEqual(base.config['modelParams']['anomalyParams'],
expDesc['anomalyParams'])
def test_NontemporalClassification(self):
""" Test the we correctly generate a Nontemporal classification experiment
"""
# Form the stream definition
streamDef = dict(
version = 1,
info = "test_NoProviders",
streams = [
dict(source="file://%s" % HOTGYM_INPUT,
info="hotGym.csv",
columns=["*"],
last_record=10),
],
aggregation = {
'years': 0,
'months': 0,
'weeks': 0,
'days': 0,
'hours': 1,
'minutes': 0,
'seconds': 0,
'milliseconds': 0,
'microseconds': 0,
'fields': [('consumption', 'sum'),
('gym', 'first'),
('timestamp', 'first')]
}
)
# Generate the experiment description
expDesc = {
'environment': OpfEnvironment.Nupic,
"inferenceArgs":{
"predictedField":"consumption",
"predictionSteps": [0],
},
"inferenceType": "TemporalMultiStep",
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "timestamp",
"fieldType": "datetime"
},
{ "fieldName": "consumption",
"fieldType": "float",
},
],
"iterationCount": -1,
"runBaselines": True,
}
# --------------------------------------------------------------------
(base, perms) = self.getModules(expDesc)
# Make sure we have the expected info in the base description file
self.assertEqual(base.control['inferenceArgs']['predictionSteps'],
expDesc['inferenceArgs']['predictionSteps'])
self.assertEqual(base.control['inferenceArgs']['predictedField'],
expDesc['inferenceArgs']['predictedField'])
self.assertEqual(base.config['modelParams']['inferenceType'],
InferenceType.NontemporalClassification)
self.assertEqual(base.config['modelParams']['sensorParams']['encoders']
['_classifierInput']['classifierOnly'], True)
self.assertEqual(base.config['modelParams']['sensorParams']['encoders']
['_classifierInput']['fieldname'],
expDesc['inferenceArgs']['predictedField'])
self.assertNotIn('consumption',
base.config['modelParams']['sensorParams']['encoders'].keys())
# The SP and TM should both be disabled
self.assertFalse(base.config['modelParams']['spEnable'])
self.assertFalse(base.config['modelParams']['tmEnable'])
# Check permutations file
self.assertNotIn('inferenceType', perms.permutations['modelParams'])
self.assertEqual(perms.minimize,
"multiStepBestPredictions:multiStep:errorMetric='altMAPE':" \
+ "steps=\\[0\\]:window=1000:field=consumption")
self.assertIn('alpha', perms.permutations['modelParams']['clParams'])
# Should have no SP or TM params to permute over
self.assertEqual(perms.permutations['modelParams']['tmParams'], {})
self.assertEqual(perms.permutations['modelParams']['spParams'], {})
# Make sure the right metrics were put in
metrics = base.control['metrics']
metricTuples = [(metric.metric, metric.inferenceElement, metric.params) \
for metric in metrics]
self.assertIn(('multiStep',
'multiStepBestPredictions',
{'window': 1000, 'steps': [0], 'errorMetric': 'aae'}),
metricTuples)
# Test running it
self.runBaseDescriptionAndPermutations(expDesc, hsVersion='v2')
# --------------------------------------
# If we specify NonTemporalClassification, we should get the same
# description and permutations files
expDesc2 = copy.deepcopy(expDesc)
expDesc2['inferenceType'] = 'NontemporalClassification'
(newBase, _newPerms) = self.getModules(expDesc2)
self.assertEqual(base.config, newBase.config)
# --------------------------------------
# If we specify NonTemporalClassification, prediction steps MUST be [0]
expDesc2 = copy.deepcopy(expDesc)
expDesc2['inferenceType'] = 'NontemporalClassification'
expDesc2['inferenceArgs']['predictionSteps'] = [1]
gotException = False
try:
(newBase, _newPerms) = self.getModules(expDesc2)
except:
gotException = True
self.assertTrue(gotException)
# --------------------------------------
# If we specify NonTemporalClassification, inferenceArgs.inputPredictedField
# can not be 'yes'
expDesc2 = copy.deepcopy(expDesc)
expDesc2["inferenceArgs"]["inputPredictedField"] = "yes"
gotException = False
try:
(newBase, _newPerms) = self.getModules(expDesc2)
except:
gotException = True
self.assertTrue(gotException)
return
def _executeExternalCmdAndReapStdout(args):
"""
args: Args list as defined for the args parameter in subprocess.Popen()
Returns: result dicionary:
{
'exitStatus':<exit-status-of-external-command>,
'stdoutData':"string",
'stderrData':"string"
}
"""
_debugOut(("_executeExternalCmdAndReapStdout: Starting...\n<%s>") % \
(args,))
p = subprocess.Popen(args,
env=os.environ,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_debugOut(("Process started for <%s>") % (args,))
(stdoutData, stderrData) = p.communicate()
_debugOut(("Process completed for <%s>: exit status=%s, " +
"stdoutDataType=%s, stdoutData=<%s>, stderrData=<%s>") % \
(args, p.returncode, type(stdoutData), stdoutData, stderrData))
result = dict(
exitStatus = p.returncode,
stdoutData = stdoutData,
stderrData = stderrData,
)
_debugOut(("_executeExternalCmdAndReapStdout for <%s>: result=\n%s") % \
(args, pprint.pformat(result, indent=4)))
return result
def _debugOut(text):
if g_debug:
LOGGER.info(text)
return
def _getTestList():
""" Get the list of tests that can be run from this module"""
suiteNames = ['PositiveExperimentTests']
testNames = []
for suite in suiteNames:
for f in dir(eval(suite)):
if f.startswith('test'):
testNames.append('%s.%s' % (suite, f))
return testNames
if __name__ == '__main__':
LOGGER.info("\nCURRENT DIRECTORY: %s", os.getcwd())
helpString = \
"""%prog [options] [suitename.testname | suitename]...
Run the Hypersearch unit tests. Available suitename.testnames: """
# Update help string
allTests = _getTestList()
for test in allTests:
helpString += "\n %s" % (test)
# ============================================================================
# Process command line arguments
parser = OptionParser(helpString)
# Our custom options (that don't get passed to unittest):
customOptions = ['--installDir', '--verbosity', '--logLevel']
parser.add_option("--installDir", dest="installDir",
default=resource_filename("nupic", ""),
help="Path to the NTA install directory [default: %default].")
parser.add_option("--verbosity", default=0, type="int",
help="Verbosity level, either 0, 1, 2, or 3 [default: %default].")
parser.add_option("--logLevel", action="store", type="int",
default=logging.INFO,
help="override default log level. Pass in an integer value that "
"represents the desired logging level (10=logging.DEBUG, "
"20=logging.INFO, etc.) [default: %default].")
# The following are put here to document what is accepted by the unittest
# module - we don't actually use them in this code bas.
parser.add_option("--verbose", dest="verbose", default=os.environ['NUPIC'],
help="Verbose output")
parser.add_option("--quiet", dest="quiet", default=None,
help="Minimal output")
parser.add_option("--failfast", dest="failfast", default=None,
help="Stop on first failure")
parser.add_option("--catch", dest="catch", default=None,
help="Catch control-C and display results")
parser.add_option("--buffer", dest="buffer", default=None,
help="Buffer stdout and stderr during test runs")
(options, args) = parser.parse_args()
# Setup our environment
g_myEnv = MyTestEnvironment(options)
# Remove our private options
args = sys.argv[:]
for arg in sys.argv:
for option in customOptions:
if arg.startswith(option):
args.remove(arg)
break
# Run the tests
unittest.main(argv=args)
| 68,101 | Python | .py | 1,639 | 33.728493 | 86 | 0.597707 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,172 | opf_experiment_results_test.py | numenta_nupic-legacy/tests/integration/nupic/opf/opf_experiment_results_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
## @file
This file tests specific experiments to see if they are providing the
correct results. These are high level tests of the algorithms themselves.
"""
import os
import shutil
from subprocess import call
import time
import unittest2 as unittest
from pkg_resources import resource_filename
from nupic.data.file_record_stream import FileRecordStream
class OPFExperimentResultsTest(unittest.TestCase):
def testExperimentResults(self):
"""Run specific experiments and verify that they are producing the correct
results.
opfDir is the examples/opf directory in the install path
and is used to find run_opf_experiment.py
The testdir is the directory that contains the experiments we will be
running. When running in the auto-build setup, this will be a temporary
directory that has had this script, as well as the specific experiments
we will be running, copied into it by the qa/autotest/prediction_results.py
script.
When running stand-alone from the command line, this will point to the
examples/prediction directory in the install tree (same as predictionDir)
"""
nupic_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"..", "..", "..", "..")
opfDir = os.path.join(nupic_dir, "examples", "opf")
testDir = opfDir
# The testdir is the directory that contains the experiments we will be
# running. When running in the auto-build setup, this will be a temporary
# directory that has had this script, as well as the specific experiments
# we will be running, copied into it by the
# qa/autotest/prediction_results.py script.
# When running stand-alone from the command line, we can simply point to the
# examples/prediction directory in the install tree.
if not os.path.exists(os.path.join(testDir, "experiments/classification")):
testDir = opfDir
# Generate any dynamically generated datasets now
command = ['python', os.path.join(testDir, 'experiments', 'classification',
'makeDatasets.py')]
retval = call(command)
self.assertEqual(retval, 0)
# Generate any dynamically generated datasets now
command = ['python', os.path.join(testDir, 'experiments', 'multistep',
'make_datasets.py')]
retval = call(command)
self.assertEqual(retval, 0)
# Generate any dynamically generated datasets now
command = ['python', os.path.join(testDir, 'experiments',
'spatial_classification', 'make_datasets.py')]
retval = call(command)
self.assertEqual(retval, 0)
# Run from the test directory so that we can find our experiments
os.chdir(testDir)
runExperiment = os.path.join(nupic_dir, "scripts", "run_opf_experiment.py")
# A list of experiments to run. Valid attributes:
# experimentDir - Required, path to the experiment directory containing
# description.py
# args - optional. List of arguments for run_opf_experiment
# results - A dictionary of expected results. The keys are tuples
# containing (predictionLogFileName, columnName). The
# value is a (min, max) expected value from the last row
# in the prediction log.
multistepTests = [
# For this one, in theory the error for 1 step should be < 0.20
{ 'experimentDir': 'experiments/multistep/simple_0',
'results': {
('DefaultTask.TemporalMultiStep.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=1:window=200:field=field1"):
(0.0, 0.20),
}
},
# For this one, in theory the error for 1 step should be < 0.50, but we
# get slightly higher because our sample size is smaller than ideal
{ 'experimentDir': 'experiments/multistep/simple_0_f2',
'results': {
('DefaultTask.TemporalMultiStep.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='aae':steps=1:window=200:field=field2"):
(0.0, 0.66),
}
},
# For this one, in theory the error for 1 step should be < 0.20
{ 'experimentDir': 'experiments/multistep/simple_1',
'results': {
('DefaultTask.TemporalMultiStep.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=1:window=200:field=field1"):
(0.0, 0.20),
}
},
# For this test, we haven't figured out the theoretical error, this
# error is determined empirically from actual results
{ 'experimentDir': 'experiments/multistep/simple_1_f2',
'results': {
('DefaultTask.TemporalMultiStep.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='aae':steps=1:window=200:field=field2"):
(0.0, 3.76),
}
},
# For this one, in theory the error for 1 step should be < 0.20, but we
# get slightly higher because our sample size is smaller than ideal
{ 'experimentDir': 'experiments/multistep/simple_2',
'results': {
('DefaultTask.TemporalMultiStep.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=1:window=200:field=field1"):
(0.0, 0.31),
}
},
# For this one, in theory the error for 1 step should be < 0.10 and for
# 3 step < 0.30, but our actual results are better.
{ 'experimentDir': 'experiments/multistep/simple_3',
'results': {
('DefaultTask.TemporalMultiStep.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=1:window=200:field=field1"):
(0.0, 0.06),
('DefaultTask.TemporalMultiStep.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=3:window=200:field=field1"):
(0.0, 0.20),
}
},
# For this test, we haven't figured out the theoretical error, this
# error is determined empirically from actual results
{ 'experimentDir': 'experiments/multistep/simple_3_f2',
'results': {
('DefaultTask.TemporalMultiStep.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='aae':steps=1:window=200:field=field2"):
(0.0, 0.6),
('DefaultTask.TemporalMultiStep.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='aae':steps=3:window=200:field=field2"):
(0.0, 1.8),
}
},
# Test missing record support.
# Should have 0 error by the end of the dataset
{ 'experimentDir': 'experiments/missing_record/simple_0',
'results': {
('DefaultTask.NontemporalMultiStep.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=1:window=25:field=field1"):
(1.0, 1.0),
}
},
] # end of multistepTests
classificationTests = [
# ----------------------------------------------------------------------
# Classification Experiments
{ 'experimentDir': 'experiments/classification/category_hub_TP_0',
'results': {
('OnlineLearning.TemporalClassification.predictionLog.csv',
'classification:avg_err:window=200'): (0.0, 0.020),
}
},
{ 'experimentDir': 'experiments/classification/category_TM_0',
'results': {
('OnlineLearning.TemporalClassification.predictionLog.csv',
'classification:avg_err:window=200'): (0.0, 0.045),
('OnlineLearning.TemporalClassification.predictionLog.csv',
'classConfidences:neg_auc:computeEvery=10:window=200'): (-1.0, -0.98),
}
},
{ 'experimentDir': 'experiments/classification/category_TM_1',
'results': {
('OnlineLearning.TemporalClassification.predictionLog.csv',
'classification:avg_err:window=200'): (0.0, 0.005),
}
},
{ 'experimentDir': 'experiments/classification/scalar_TP_0',
'results': {
('OnlineLearning.TemporalClassification.predictionLog.csv',
'classification:avg_err:window=200'): (0.0, 0.155),
('OnlineLearning.TemporalClassification.predictionLog.csv',
'classConfidences:neg_auc:computeEvery=10:window=200'): (-1.0, -0.900),
}
},
{ 'experimentDir': 'experiments/classification/scalar_TP_1',
'results': {
('OnlineLearning.TemporalClassification.predictionLog.csv',
'classification:avg_err:window=200'): (0.0, 0.03),
}
},
] # End of classification tests
spatialClassificationTests = [
{ 'experimentDir': 'experiments/spatial_classification/category_0',
'results': {
('DefaultTask.NontemporalClassification.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=0:window=100:field=classification"):
(0.0, 0.05),
}
},
{ 'experimentDir': 'experiments/spatial_classification/category_1',
'results': {
('DefaultTask.NontemporalClassification.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=0:window=100:field=classification"):
(0.0, 0.0),
}
},
{ 'experimentDir': 'experiments/spatial_classification/scalar_0',
'results': {
('DefaultTask.NontemporalClassification.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='aae':steps=0:window=100:field=classification"):
(0.0, 0.025),
}
},
{ 'experimentDir': 'experiments/spatial_classification/scalar_1',
'results': {
('DefaultTask.NontemporalClassification.predictionLog.csv',
"multiStepBestPredictions:multiStep:errorMetric='aae':steps=0:window=100:field=classification"):
(-1e-10, 0.01),
}
},
]
anomalyTests = [
# ----------------------------------------------------------------------
# Classification Experiments
{ 'experimentDir': 'experiments/anomaly/temporal/simple',
'results': {
('DefaultTask.TemporalAnomaly.predictionLog.csv',
'anomalyScore:passThruPrediction:window=1000:field=f'): (0.02,
0.04),
}
},
] # End of anomaly tests
tests = []
tests += multistepTests
tests += classificationTests
tests += spatialClassificationTests
tests += anomalyTests
# Uncomment this to only run a specific experiment(s)
#tests = tests[7:8]
# This contains a list of tuples: (expDir, key, results)
summaryOfResults = []
startTime = time.time()
testIdx = -1
for test in tests:
testIdx += 1
expDirectory = test['experimentDir']
# -------------------------------------------------------------------
# Remove files/directories generated by previous tests:
toDelete = []
# Remove inference results
path = os.path.join(expDirectory, "inference")
toDelete.append(path)
path = os.path.join(expDirectory, "savedmodels")
toDelete.append(path)
for path in toDelete:
if not os.path.exists(path):
continue
print "Removing %s ..." % path
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
# ------------------------------------------------------------------------
# Run the test.
args = test.get('args', [])
print "Running experiment %s ..." % (expDirectory)
command = ['python', runExperiment, expDirectory] + args
retVal = call(command)
# If retVal is non-zero and this was not a negative test or if retVal is
# zero and this is a negative test something went wrong.
if retVal:
print "Details of failed test: %s" % test
print("TestIdx %d, OPF experiment '%s' failed with return code %i." %
(testIdx, expDirectory, retVal))
self.assertFalse(retVal)
# -----------------------------------------------------------------------
# Check the results
for (key, expValues) in test['results'].items():
(logFilename, colName) = key
# Open the prediction log file
logFile = FileRecordStream(os.path.join(expDirectory, 'inference',
logFilename))
colNames = [x[0] for x in logFile.getFields()]
if not colName in colNames:
print "TestIdx %d: %s not one of the columns in " \
"prediction log file. Available column names are: %s" % (testIdx,
colName, colNames)
self.assertTrue(colName in colNames)
colIndex = colNames.index(colName)
# Read till we get to the last line
while True:
try:
row = logFile.next()
except StopIteration:
break
result = row[colIndex]
# Save summary of results
summaryOfResults.append((expDirectory, colName, result))
print "Actual result for %s, %s:" % (expDirectory, colName), result
print "Expected range:", expValues
failed = (expValues[0] is not None and result < expValues[0]) \
or (expValues[1] is not None and result > expValues[1])
if failed:
print ("TestIdx %d: Experiment %s failed. \nThe actual result"
" for %s (%s) was outside the allowed range of %s" % (testIdx,
expDirectory, colName, result, expValues))
else:
print " Within expected range."
self.assertFalse(failed)
# =======================================================================
# Print summary of results:
print
print "Summary of results in all experiments run:"
print "========================================="
prevExpDir = None
for (expDir, key, results) in summaryOfResults:
if expDir != prevExpDir:
print
print expDir
prevExpDir = expDir
print " %s: %s" % (key, results)
print "\nElapsed time: %.1f seconds" % (time.time() - startTime)
if __name__ == "__main__":
unittest.main()
| 15,627 | Python | .py | 334 | 37.847305 | 114 | 0.608916 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,173 | opf_experiments_test.py | numenta_nupic-legacy/tests/integration/nupic/opf/opf_experiments_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from optparse import OptionParser
import os
import sys
import traceback
import unittest2 as unittest
from pkg_resources import resource_filename
from nupic.frameworks.opf.experiment_runner import (
runExperiment, initExperimentPrng)
# Globals
EXCLUDED_EXPERIMENTS = [] # none for now
NUPIC_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"..", "..", "..", "..")
PREDICTION_DIR = os.path.join(NUPIC_DIR, "examples", "opf")
RUN_ALL_ITERATIONS = False
def getAllDirectoriesWithFile(path, filename, excludeDirs):
"""
Returns a list of directories in the <path> with a given <filename>, excluding
<excludeDirs>
"""
directoryList = []
for dirpath, dirnames, filenames in os.walk(path):
for d in dirnames[:]:
if d in excludeDirs:
dirnames.remove(d)
print "EXCLUDING %s..." % (os.path.join(dirpath, d))
# If this directory is UNDER_DEVELOPMENT, exclude it
elif 'UNDER_DEVELOPMENT' in os.listdir(os.path.join(dirpath, d)):
dirnames.remove(d)
print "EXCLUDING %s..." % (os.path.join(dirpath, d))
for f in filenames:
if f==filename:
directoryList.append(dirpath)
return directoryList
def getAllExperimentDirectories(excludedExperiments=[]):
"""
Experiment directories are the directories with a description.py file
"""
excludedDirectories = ['exp', 'inference', 'networks', 'legacy']
excludedDirectories.extend(excludedExperiments)
return getAllDirectoriesWithFile(
path="experiments",
filename="description.py",
excludeDirs=excludedDirectories)
def runReducedExperiment(path, reduced=True):
"""
Run the experiment in the <path> with a reduced iteration count
"""
initExperimentPrng()
# Load experiment
if reduced:
args = [path, '--testMode']
else:
args = [path]
runExperiment(args)
class OPFExperimentsTest(unittest.TestCase):
def testExperiments(self):
os.chdir(PREDICTION_DIR)
expDirPathList = getAllExperimentDirectories(EXCLUDED_EXPERIMENTS)
self.assertTrue(len(expDirPathList) > 0)
failedExperiments = []
successExperiments = []
for expDirPath in expDirPathList:
if os.path.exists(os.path.join(expDirPath, "UNDER_DEVELOPMENT")):
print "Skipping experiment: %s -- under development" % expDirPath
continue
print "Running experiment: %s" % expDirPath
try:
if RUN_ALL_ITERATIONS:
runReducedExperiment(expDirPath, False)
else:
runReducedExperiment(expDirPath)
except KeyboardInterrupt:
print "Keyboard interrupt received. Exiting"
sys.exit(1)
except:
failedExperiments.append(expDirPath)
print
print "Unable to run experiment: %s" % expDirPath
print "See the trace below-"
traceback.print_exc()
else:
print "Successfully ran experiment: %s" % expDirPath
successExperiments.append(expDirPath)
self.assertEqual(len(failedExperiments), 0)
if __name__ == "__main__":
description = \
"Test all experiments in opf/experiments with reduced iterations.\
Currently excludes %s in the default mode" % str(EXCLUDED_EXPERIMENTS)
parser = OptionParser(description=description)
parser.add_option("-a", "--all", action="store_true",
dest="runAllExperiments", default=False,
help="Don't exclude any experiments.")
parser.add_option("-l", "--long", action="store_true",
dest="runAllIterations", default=False,
help="Don't reduce iterations.")
(options, args) = parser.parse_args()
if len(args) > 0:
PREDICTION_DIR = args[0]
if options.runAllExperiments:
EXCLUDED_EXPERIMENTS=[]
RUN_ALL_ITERATIONS = options.runAllIterations
unittest.main()
| 4,875 | Python | .py | 122 | 34.295082 | 80 | 0.679422 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,174 | opf_region_test.py | numenta_nupic-legacy/tests/integration/nupic/opf/opf_region_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This test ensures that SPRegion and TMRegion are working as expected. It runs a
number of tests:
1: testSaveAndReload -- tests that a saved and reloaded network behaves the same
as an unsaved network.
2: testMaxEnabledPhase -- tests that maxEnabledPhase can be set.
The following are unimplemented currently, but should be implemented:
Test N: test that top down compute is working
Test N: test that combined learning and inference is working
Test N: test that all the parameters of an SP region work properly
Test N: test that all the parameters of a TM region work properly
"""
import json
import numpy
import os
import random
import tempfile
import unittest2 as unittest
from nupic.bindings.algorithms import SpatialPooler
from pkg_resources import resource_filename
from nupic.algorithms.backtracking_tm_cpp import BacktrackingTMCPP
from nupic.data.file_record_stream import FileRecordStream
from nupic.encoders import MultiEncoder
from nupic.engine import Network
from nupic.regions.sp_region import SPRegion
from nupic.regions.tm_region import TMRegion
from nupic.support.unittesthelpers.testcasebase import TestCaseBase
_VERBOSITY = 0 # how chatty the unit tests should be
_SEED = 35 # the random seed used throughout
# Seed the random number generators
rgen = numpy.random.RandomState(_SEED)
random.seed(_SEED)
g_spRegionConfig = None
g_tpRegionConfig = None
def _initConfigDicts():
# ============================================================================
# Config field for SPRegion
global g_spRegionConfig # pylint: disable=W0603
g_spRegionConfig = dict(
spVerbosity = _VERBOSITY,
columnCount = 200,
inputWidth = 0,
numActiveColumnsPerInhArea = 20,
spatialImp = 'cpp',
seed = _SEED,
)
# ============================================================================
# Config field for TMRegion
global g_tpRegionConfig # pylint: disable=W0603
g_tpRegionConfig = dict(
verbosity = _VERBOSITY,
columnCount = 200,
cellsPerColumn = 8,
inputWidth = 0,
seed = _SEED,
temporalImp = 'cpp',
newSynapseCount = 15,
maxSynapsesPerSegment = 32,
maxSegmentsPerCell = 128,
initialPerm = 0.21,
permanenceInc = 0.1,
permanenceDec = 0.1,
globalDecay = 0.0,
maxAge = 0,
minThreshold = 12,
activationThreshold = 12,
)
# ==============================================================================
# Utility routines
def _setupTempDirectory(filename):
"""Create a temp directory, and return path to filename in that directory"""
tmpDir = tempfile.mkdtemp()
tmpFileName = os.path.join(tmpDir, os.path.basename(filename))
return tmpDir, tmpFileName
def _createEncoder():
"""Create the encoder instance for our test and return it."""
encoder = MultiEncoder()
encoder.addMultipleEncoders({
'timestamp': dict(fieldname='timestamp', type='DateEncoder',
timeOfDay=(5,5), forced=True),
'attendeeCount': dict(fieldname='attendeeCount', type='ScalarEncoder',
name='attendeeCount', minval=0, maxval=270,
clipInput=True, w=5, resolution=10, forced=True),
'consumption': dict(fieldname='consumption',type='ScalarEncoder',
name='consumption', minval=0,maxval=115,
clipInput=True, w=5, resolution=5, forced=True),
})
return encoder
# ==========================================================================
def _createOPFNetwork(addSP = True, addTP = False):
"""Create a 'new-style' network ala OPF and return it.
If addSP is true, an SPRegion will be added named 'level1SP'.
If addTP is true, a TMRegion will be added named 'level1TP'
"""
# ==========================================================================
# Create the encoder and data source stuff we need to configure the sensor
sensorParams = dict(verbosity = _VERBOSITY)
encoder = _createEncoder()
trainFile = resource_filename("nupic.datafiles", "extra/gym/gym.csv")
dataSource = FileRecordStream(streamID=trainFile)
dataSource.setAutoRewind(True)
# ==========================================================================
# Now create the network itself
n = Network()
n.addRegion("sensor", "py.RecordSensor", json.dumps(sensorParams))
sensor = n.regions['sensor'].getSelf()
sensor.encoder = encoder
sensor.dataSource = dataSource
# ==========================================================================
# Add the SP if requested
if addSP:
print "Adding SPRegion"
g_spRegionConfig['inputWidth'] = encoder.getWidth()
n.addRegion("level1SP", "py.SPRegion", json.dumps(g_spRegionConfig))
n.link("sensor", "level1SP", "UniformLink", "")
n.link("sensor", "level1SP", "UniformLink", "",
srcOutput="resetOut", destInput="resetIn")
n.link("level1SP", "sensor", "UniformLink", "",
srcOutput="spatialTopDownOut", destInput="spatialTopDownIn")
n.link("level1SP", "sensor", "UniformLink", "",
srcOutput="temporalTopDownOut", destInput="temporalTopDownIn")
# ==========================================================================
if addTP and addSP:
# Add the TM on top of SP if requested
# The input width of the TM is set to the column count of the SP
print "Adding TMRegion on top of SP"
g_tpRegionConfig['inputWidth'] = g_spRegionConfig['columnCount']
n.addRegion("level1TP", "py.TMRegion", json.dumps(g_tpRegionConfig))
n.link("level1SP", "level1TP", "UniformLink", "")
n.link("level1TP", "level1SP", "UniformLink", "",
srcOutput="topDownOut", destInput="topDownIn")
n.link("sensor", "level1TP", "UniformLink", "",
srcOutput="resetOut", destInput="resetIn")
elif addTP:
# Add a lone TMRegion if requested
# The input width of the TM is set to the encoder width
print "Adding TMRegion"
g_tpRegionConfig['inputWidth'] = encoder.getWidth()
n.addRegion("level1TP", "py.TMRegion", json.dumps(g_tpRegionConfig))
n.link("sensor", "level1TP", "UniformLink", "")
n.link("sensor", "level1TP", "UniformLink", "",
srcOutput="resetOut", destInput="resetIn")
return n
class OPFRegionTest(TestCaseBase):
"""Unit tests for the OPF Region Test."""
def setUp(self):
_initConfigDicts()
# ============================================================================
def testSaveAndReload(self):
"""
This function tests saving and loading. It will train a network for 500
iterations, then save it and reload it as a second network instance. It will
then run both networks for 100 iterations and ensure they return identical
results.
"""
print "Creating network..."
netOPF = _createOPFNetwork()
level1OPF = netOPF.regions['level1SP']
# ==========================================================================
print "Training network for 500 iterations"
level1OPF.setParameter('learningMode', 1)
level1OPF.setParameter('inferenceMode', 0)
netOPF.run(500)
level1OPF.setParameter('learningMode', 0)
level1OPF.setParameter('inferenceMode', 1)
# ==========================================================================
# Save network and reload as a second instance. We need to reset the data
# source for the unsaved network so that both instances start at the same
# place
print "Saving and reload network"
_, tmpNetworkFilename = _setupTempDirectory("trained.nta")
netOPF.save(tmpNetworkFilename)
netOPF2 = Network(tmpNetworkFilename)
level1OPF2 = netOPF2.regions['level1SP']
sensor = netOPF.regions['sensor'].getSelf()
trainFile = resource_filename("nupic.datafiles", "extra/gym/gym.csv")
sensor.dataSource = FileRecordStream(streamID=trainFile)
sensor.dataSource.setAutoRewind(True)
# ==========================================================================
print "Running inference on the two networks for 100 iterations"
for _ in xrange(100):
netOPF2.run(1)
netOPF.run(1)
l1outputOPF2 = level1OPF2.getOutputData("bottomUpOut")
l1outputOPF = level1OPF.getOutputData("bottomUpOut")
opfHash2 = l1outputOPF2.nonzero()[0].sum()
opfHash = l1outputOPF.nonzero()[0].sum()
self.assertEqual(opfHash2, opfHash)
# ============================================================================
def testMaxEnabledPhase(self):
""" Test maxEnabledPhase"""
print "Creating network..."
netOPF = _createOPFNetwork(addSP = True, addTP = True)
netOPF.initialize()
level1SP = netOPF.regions['level1SP']
level1SP.setParameter('learningMode', 1)
level1SP.setParameter('inferenceMode', 0)
tm = netOPF.regions['level1TP']
tm.setParameter('learningMode', 0)
tm.setParameter('inferenceMode', 0)
print "maxPhase,maxEnabledPhase = ", netOPF.maxPhase, \
netOPF.getMaxEnabledPhase()
self.assertEqual(netOPF.maxPhase, 2)
self.assertEqual(netOPF.getMaxEnabledPhase(), 2)
print "Setting setMaxEnabledPhase to 1"
netOPF.setMaxEnabledPhase(1)
print "maxPhase,maxEnabledPhase = ", netOPF.maxPhase, \
netOPF.getMaxEnabledPhase()
self.assertEqual(netOPF.maxPhase, 2)
self.assertEqual(netOPF.getMaxEnabledPhase(), 1)
netOPF.run(1)
print "RUN SUCCEEDED"
# TODO: The following does not run and is probably flawed.
"""
print "\nSetting setMaxEnabledPhase to 2"
netOPF.setMaxEnabledPhase(2)
print "maxPhase,maxEnabledPhase = ", netOPF.maxPhase, \
netOPF.getMaxEnabledPhase()
netOPF.run(1)
print "RUN SUCCEEDED"
print "\nSetting setMaxEnabledPhase to 1"
netOPF.setMaxEnabledPhase(1)
print "maxPhase,maxEnabledPhase = ", netOPF.maxPhase, \
netOPF.getMaxEnabledPhase()
netOPF.run(1)
print "RUN SUCCEEDED"
"""
def testGetInputOutputNamesOnRegions(self):
network = _createOPFNetwork(addSP = True, addTP = True)
network.run(1)
spRegion = network.getRegionsByType(SPRegion)[0]
self.assertEqual(set(spRegion.getInputNames()),
set(['sequenceIdIn', 'bottomUpIn', 'resetIn',
'topDownIn']))
self.assertEqual(set(spRegion.getOutputNames()),
set(['topDownOut', 'spatialTopDownOut',
'temporalTopDownOut', 'bottomUpOut', 'anomalyScore']))
def testGetAlgorithmOnRegions(self):
network = _createOPFNetwork(addSP = True, addTP = True)
network.run(1)
spRegions = network.getRegionsByType(SPRegion)
tpRegions = network.getRegionsByType(TMRegion)
self.assertEqual(len(spRegions), 1)
self.assertEqual(len(tpRegions), 1)
spRegion = spRegions[0]
tpRegion = tpRegions[0]
sp = spRegion.getSelf().getAlgorithmInstance()
tm = tpRegion.getSelf().getAlgorithmInstance()
self.assertEqual(type(sp), SpatialPooler)
self.assertEqual(type(tm), BacktrackingTMCPP)
if __name__ == "__main__":
unittest.main()
| 12,151 | Python | .py | 272 | 39.316176 | 80 | 0.63959 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,175 | opf_checkpoint_stress_test.py | numenta_nupic-legacy/tests/integration/nupic/opf/opf_checkpoint_stress_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This is a stress test that saves and loads an OPF checkpoint multiple times,
doing one compute step in between. This test was put in place to catch a crash
bug.
"""
import datetime
import numpy.random
import os
import shutil
import tempfile
import unittest2 as unittest
from nupic.frameworks.opf.model_factory import ModelFactory
from nupic.support.unittesthelpers.testcasebase import TestCaseBase
# Model parameters derived from the Hotgym anomaly example. This example was
# used because it uses the most components. Some of the parameters, such
# as columnCount were reduced to make the test run faster.
MODEL_PARAMS = {
'model': "HTMPrediction",
'version': 1,
'aggregationInfo': { 'days': 0,
'fields': [(u'c1', 'sum'), (u'c0', 'first')],
'hours': 1,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
'modelParams': {
'inferenceType': 'TemporalAnomaly',
'sensorParams': {
'verbosity' : 0,
'encoders': {
u'consumption': { 'clipInput': True,
'fieldname': u'consumption',
'maxval': 100.0,
'minval': 0.0,
'n': 50,
'name': u'c1',
'type': 'ScalarEncoder',
'w': 21},},
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
'spVerbosity' : 0,
'globalInhibition': 1,
'spatialImp' : 'cpp',
'columnCount': 512,
'inputWidth': 0,
'numActiveColumnsPerInhArea': 20,
'seed': 1956,
'potentialPct': 0.5,
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.005,
},
'tmEnable' : True,
'tmParams': {
'verbosity': 0,
'columnCount': 512,
'cellsPerColumn': 8,
'inputWidth': 512,
'seed': 1960,
'temporalImp': 'cpp',
'newSynapseCount': 10,
'maxSynapsesPerSegment': 20,
'maxSegmentsPerCell': 32,
'initialPerm': 0.21,
'permanenceInc': 0.1,
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
'minThreshold': 4,
'activationThreshold': 6,
'outputType': 'normal',
'pamLength': 1,
},
'clParams': {
'regionName' : 'SDRClassifierRegion',
'verbosity' : 0,
'alpha': 0.005,
'steps': '1,5',
},
'anomalyParams': { u'anomalyCacheRecords': None,
u'autoDetectThreshold': None,
u'autoDetectWaitRecords': 2184},
'trainSPNetOnlyIfRequested': False,
},
}
class CheckpointStressTest(TestCaseBase):
def testCheckpoint(self):
tmpDir = tempfile.mkdtemp()
model = ModelFactory.create(MODEL_PARAMS)
model.enableInference({'predictedField': 'consumption'})
headers = ['timestamp', 'consumption']
# Now do a bunch of small load/train/save batches
for _ in range(20):
for _ in range(2):
record = [datetime.datetime(2013, 12, 12), numpy.random.uniform(100)]
modelInput = dict(zip(headers, record))
model.run(modelInput)
# Save and load a checkpoint after each batch. Clean up.
tmpBundleName = os.path.join(tmpDir, "test_checkpoint")
self.assertIs(model.save(tmpBundleName), None, "Save command failed.")
model = ModelFactory.loadFromCheckpoint(tmpBundleName)
shutil.rmtree(tmpBundleName)
if __name__ == "__main__":
unittest.main()
| 4,775 | Python | .py | 131 | 28.603053 | 78 | 0.585403 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,176 | htmpredictionmodel_serialization_test.py | numenta_nupic-legacy/tests/integration/nupic/opf/htmpredictionmodel_serialization_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This module tests capnp serialization of HTMPredictionModel.
"""
import copy
import datetime
import numpy.random
import numpy.testing
import unittest
try:
# NOTE need to import capnp first to activate the magic necessary for
# PythonDummyRegion_capnp, etc.
import capnp
except ImportError:
capnp = None
else:
from nupic.frameworks.opf.HTMPredictionModelProto_capnp \
import HTMPredictionModelProto
from nupic.frameworks.opf.model_factory import ModelFactory
from nupic.frameworks.opf.htm_prediction_model import HTMPredictionModel
# Model parameters derived from the Hotgym anomaly example. This example was
# used because it uses the most components. Some of the parameters, such
# as columnCount were reduced to make the test run faster.
CPP_MODEL_PARAMS = {
'model': 'HTMPrediction',
'version': 1,
'aggregationInfo': {
'days': 0,
'fields': [(u'c1', 'sum'), (u'c0', 'first')],
'hours': 1,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
'modelParams': {
# inferenceType choices:
#
# TemporalNextStep, TemporalClassification, NontemporalClassification,
# TemporalAnomaly, NontemporalAnomaly, TemporalMultiStep,
# NontemporalMultiStep
#
'inferenceType': 'TemporalAnomaly',
'sensorParams': {
'verbosity' : 0,
'encoders': {
u'consumption': { 'clipInput': True,
'fieldname': u'consumption',
'maxval': 100.0,
'minval': 0.0,
'n': 50,
'name': u'c1',
'type': 'ScalarEncoder',
'w': 21},
},
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
'spatialImp' : 'cpp',
'spVerbosity' : 0,
'globalInhibition': 1,
'columnCount': 512,
'inputWidth': 0,
'numActiveColumnsPerInhArea': 20,
'seed': 1956,
'potentialPct': 0.5,
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.005,
},
'tmEnable' : True,
'tmParams': {
'temporalImp': 'cpp',
'verbosity': 0,
'columnCount': 512,
'cellsPerColumn': 8,
'inputWidth': 512,
'seed': 1960,
'newSynapseCount': 10,
'maxSynapsesPerSegment': 20,
'maxSegmentsPerCell': 32,
'initialPerm': 0.21,
'permanenceInc': 0.1,
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
'minThreshold': 4,
'activationThreshold': 6,
'outputType': 'normal',
'pamLength': 1,
},
'clParams': {
'implementation': 'cpp',
'regionName': 'SDRClassifierRegion',
'verbosity' : 0,
'alpha': 0.005,
'steps': '1,5',
},
'anomalyParams': { u'anomalyCacheRecords': None,
u'autoDetectThreshold': None,
u'autoDetectWaitRecords': 2184},
'trainSPNetOnlyIfRequested': False,
},
}
PY_MODEL_PARAMS = {
'model': 'HTMPrediction',
'version': 1,
'aggregationInfo': { 'days': 0,
'fields': [(u'c1', 'sum'), (u'c0', 'first')],
'hours': 1,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
'modelParams': {
# inferenceType choices:
#
# TemporalNextStep, TemporalClassification, NontemporalClassification,
# TemporalAnomaly, NontemporalAnomaly, TemporalMultiStep,
# NontemporalMultiStep
#
'inferenceType': 'TemporalAnomaly',
'sensorParams': {
'verbosity' : 0,
'encoders': {
u'consumption': { 'clipInput': True,
'fieldname': u'consumption',
'maxval': 100.0,
'minval': 0.0,
'n': 50,
'name': u'c1',
'type': 'ScalarEncoder',
'w': 21},
},
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
'spatialImp' : 'py',
'spVerbosity' : 0,
'globalInhibition': 1,
'columnCount': 512,
'inputWidth': 0,
'numActiveColumnsPerInhArea': 20,
'seed': 1956,
'potentialPct': 0.5,
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.005,
},
'tmEnable' : True,
'tmParams': {
'temporalImp': 'py',
'verbosity': 0,
'columnCount': 512,
'cellsPerColumn': 8,
'inputWidth': 512,
'seed': 1960,
'newSynapseCount': 10,
'maxSynapsesPerSegment': 20,
'maxSegmentsPerCell': 32,
'initialPerm': 0.21,
'permanenceInc': 0.1,
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
'minThreshold': 4,
'activationThreshold': 6,
'outputType': 'normal',
'pamLength': 1,
},
'clParams': {
'implementation': 'py',
'regionName': 'SDRClassifierRegion',
'verbosity' : 0,
'alpha': 0.005,
'steps': '1,5',
},
'anomalyParams': { u'anomalyCacheRecords': None,
u'autoDetectThreshold': None,
u'autoDetectWaitRecords': 2184},
'trainSPNetOnlyIfRequested': False,
},
}
class HTMPredictionModelSerializationTest(unittest.TestCase):
def _runModelSerializationDeserializationChecks(self, modelParams):
m1 = ModelFactory.create(modelParams)
m1.enableInference({'predictedField': 'consumption'})
headers = ['timestamp', 'consumption']
record = [datetime.datetime(2013, 12, 12), numpy.random.uniform(100)]
modelInput = dict(zip(headers, record))
m1.run(modelInput)
# Serialize
builderProto = HTMPredictionModelProto.new_message()
m1.write(builderProto)
# Construct HTMPredictionModelProto reader from populated builder
readerProto = HTMPredictionModelProto.from_bytes(builderProto.to_bytes())
# Deserialize
m2 = HTMPredictionModel.read(readerProto)
self.assertEqual(m1.getInferenceType(),
modelParams['modelParams']['inferenceType'])
self.assertEqual(m1.getInferenceType(), m2.getInferenceType())
# Run computes on m1 & m2 and compare results
record = [datetime.datetime(2013, 12, 14), numpy.random.uniform(100)]
modelInput = dict(zip(headers, record))
# Use deepcopy to guarantee no input side-effect between calls
r1 = m1.run(copy.deepcopy(modelInput))
r2 = m2.run(copy.deepcopy(modelInput))
# Compare results
self.assertEqual(r2.predictionNumber, r1.predictionNumber)
self.assertEqual(r2.rawInput, r1.rawInput)
self.assertEqual(r2.sensorInput.dataRow, r1.sensorInput.dataRow)
self.assertEqual(r2.sensorInput.dataDict, r1.sensorInput.dataDict)
numpy.testing.assert_array_equal(r2.sensorInput.dataEncodings,
r1.sensorInput.dataEncodings)
self.assertEqual(r2.sensorInput.sequenceReset, r1.sensorInput.sequenceReset)
self.assertEqual(r2.sensorInput.category, r1.sensorInput.category)
self.assertEqual(r2.inferences, r1.inferences)
self.assertEqual(r2.metrics, r1.metrics)
self.assertEqual(r2.predictedFieldIdx, r1.predictedFieldIdx)
self.assertEqual(r2.predictedFieldName, r1.predictedFieldName)
numpy.testing.assert_array_equal(r2.classifierInput.dataRow,
r1.classifierInput.dataRow)
self.assertEqual(r2.classifierInput.bucketIndex,
r1.classifierInput.bucketIndex)
# Compre regions
self.assertIsNotNone(m2._getSensorRegion())
self.assertEqual(m2._getSensorRegion(), m1._getSensorRegion())
self.assertIsNotNone(m2._getClassifierRegion())
self.assertEqual(m2._getClassifierRegion(), m1._getClassifierRegion())
self.assertIsNotNone(m2._getTPRegion())
self.assertEqual(m2._getTPRegion(), m1._getTPRegion())
self.assertIsNotNone(m2._getSPRegion())
self.assertEqual(m2._getSPRegion(), m1._getSPRegion())
@unittest.skipUnless(
capnp, 'pycapnp is not installed, skipping serialization test.')
def testPredictedFieldAndInferenceEnabledAreSaved(self):
m1 = ModelFactory.create(PY_MODEL_PARAMS)
m1.enableInference({'predictedField': 'consumption'})
self.assertTrue(m1.isInferenceEnabled())
self.assertEqual(m1.getInferenceArgs().get('predictedField'), 'consumption')
headers = ['timestamp', 'consumption']
record = [datetime.datetime(2013, 12, 12), numpy.random.uniform(100)]
modelInput = dict(zip(headers, record))
m1.run(modelInput)
# Serialize
builderProto = HTMPredictionModelProto.new_message()
m1.write(builderProto)
# Construct HTMPredictionModelProto reader from populated builder
readerProto = HTMPredictionModelProto.from_bytes(builderProto.to_bytes())
# Deserialize
m2 = HTMPredictionModel.read(readerProto)
self.assertTrue(m2.isInferenceEnabled())
self.assertEqual(m2.getInferenceArgs().get('predictedField'), 'consumption')
# Running the desrialized m2 without redundant enableInference call should
# work
record = [datetime.datetime(2013, 12, 14), numpy.random.uniform(100)]
modelInput = dict(zip(headers, record))
m2.run(modelInput)
# Check that disabled inference is saved, too (since constructor defaults to
# enabled at time of this writing)
m1.disableInference()
self.assertFalse(m1.isInferenceEnabled())
builderProto = HTMPredictionModelProto.new_message()
m1.write(builderProto)
readerProto = HTMPredictionModelProto.from_bytes(builderProto.to_bytes())
m3 = HTMPredictionModel.read(readerProto)
self.assertFalse(m3.isInferenceEnabled())
@unittest.skipUnless(
capnp, 'pycapnp is not installed, skipping serialization test.')
def testCPPModelSerialization(self):
self._runModelSerializationDeserializationChecks(CPP_MODEL_PARAMS)
@unittest.skipUnless(
capnp, 'pycapnp is not installed, skipping serialization test.')
def testPYModelSerialization(self):
self._runModelSerializationDeserializationChecks(PY_MODEL_PARAMS)
if __name__ == "__main__":
unittest.main()
| 11,384 | Python | .py | 305 | 30.406557 | 80 | 0.648921 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,177 | opf_description_template_test.py | numenta_nupic-legacy/tests/integration/nupic/opf/opf_description_template_test/opf_description_template_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Tests OPF descriptionTemplate.py-based experiment/sub-experiment pair"""
import os
import pprint
import sys
import unittest2 as unittest
from pkg_resources import resource_filename
from nupic.frameworks.opf.helpers import (
loadExperimentDescriptionScriptFromDir,
getExperimentDescriptionInterfaceFromModule
)
from nupic.support.unittesthelpers.testcasebase import (
TestCaseBase as HelperTestCaseBase)
# Our __main__ entry block sets this to an instance of MyTestEnvironment()
g_myEnv = None
g_debug = False
class MyTestEnvironment(object):
def __init__(self):
nupic_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"..", "..", "..", "..", "..")
examplesDir = os.path.join(nupic_dir, "examples")
_debugOut("examplesDir=<%s>" % (examplesDir,))
assert os.path.exists(examplesDir), \
"%s is not present in filesystem" % examplesDir
# This is where we find OPF binaries (e.g., run_opf_experiment.py, etc.)
# In the autobuild, it is a read-only directory
self.__opfBinDir = os.path.join(nupic_dir, "scripts")
assert os.path.exists(self.__opfBinDir), \
"%s is not present in filesystem" % self.__opfBinDir
_debugOut("self.__opfBinDir=<%s>" % self.__opfBinDir)
# Where this script is running from (our autotest counterpart may have
# copied it from its original location)
self.__testRunDir = os.path.abspath(os.path.dirname(__file__))
_debugOut("self.__testRunDir=<%s>" % self.__testRunDir)
# Parent directory of our private OPF experiments
self.__opfExperimentsParentDir = os.path.join(self.__testRunDir,
"experiments")
assert os.path.exists(self.__opfExperimentsParentDir), \
"%s is not present in filesystem" % self.__opfExperimentsParentDir
_debugOut("self.__opfExperimentsParentDir=<%s>"
% self.__opfExperimentsParentDir)
def getOpfRunExperimentPyPath(self):
return os.path.join(self.__opfBinDir, "run_opf_experiment.py")
def getOpfExperimentPath(self, experimentName):
"""
experimentName: e.g., "gym"; this string will be used to form
a directory path to the experiment.
Returns: absolute path to the experiment directory
"""
path = os.path.join(self.__opfExperimentsParentDir, experimentName)
assert os.path.isdir(path), \
"Experiment path %s doesn't exist or is not a directory" % (path,)
return path
class MyTestCaseBase(HelperTestCaseBase):
def setUp(self):
""" Method called to prepare the test fixture. This is called immediately
before calling the test method; any exception raised by this method will be
considered an error rather than a test failure. The default implementation
does nothing.
"""
global g_myEnv
if not g_myEnv:
# Setup environment
g_myEnv = MyTestEnvironment()
def tearDown(self):
""" Method called immediately after the test method has been called and the
result recorded. This is called even if the test method raised an exception,
so the implementation in subclasses may need to be particularly careful
about checking internal state. Any exception raised by this method will be
considered an error rather than a test failure. This method will only be
called if the setUp() succeeds, regardless of the outcome of the test
method. The default implementation does nothing.
"""
# Reset our log items
self.resetExtraLogItems()
def shortDescription(self):
""" Override to force unittest framework to use test method names instead
of docstrings in the report.
"""
return None
def executePositiveOpfExperiment(self, experimentName, short=False):
""" Executes a positive OPF RunExperiment test as a subprocess and validates
its exit status.
experimentName: e.g., "gym"; this string will be used to form
a directory path to the experiment.
short: if True, attempt to run the experiment with --testMode
flag turned on, which causes all inference and training
iteration counts to be overridden with small counts.
Returns: result from _executeExternalCmdAndReapOutputs
"""
opfRunner = g_myEnv.getOpfRunExperimentPyPath()
opfExpDir = g_myEnv.getOpfExperimentPath(experimentName)
r = self.__executePositiveRunExperimentTest(runnerPath=opfRunner,
experimentDirPath=opfExpDir,
short=short)
return r
def __executePositiveRunExperimentTest(self,
runnerPath,
experimentDirPath,
customOptions=[],
short=False):
""" Executes a positive RunExperiment.py test and performs
basic validation
runnerPath: experiment running (LPF or OPF RunExperiment.py path)
experimentDirPath: directory containing the description.py file of interest
short: if True, attempt to run the experiment with --testMode
flag turned on, which causes all inference and training
iteration counts to be overridden with small counts.
NOTE: if the (possibly aggregated) dataset has fewer
rows than the count overrides, then an LPF experiment
will fail.
Returns: result from _executeExternalCmdAndReapOutputs
"""
#----------------------------------------
# Set up args
command = [
"python",
runnerPath,
experimentDirPath,
]
command.extend(customOptions)
if short:
command.append("--testMode")
self.addExtraLogItem({'command':command})
#----------------------------------------
# Execute RunExperiment.py as subprocess and collect results
r = _executeExternalCmdAndReapOutputs(command)
self.addExtraLogItem({'result':r})
_debugOut(("_executeExternalCmdAndReapOutputs(%s)=%s") % (command, r))
#----------------------------------------
# Check subprocess exit status
self.assertEqual(r['exitStatus'], 0,
("Expected status = 0 from %s; got: %s") % \
(runnerPath, r['exitStatus'],))
self.resetExtraLogItems()
return r
class PositiveTests(MyTestCaseBase):
#========================
def test_sub_experiment_override(self):
expDir = g_myEnv.getOpfExperimentPath("gym")
module = loadExperimentDescriptionScriptFromDir(expDir)
expIface = getExperimentDescriptionInterfaceFromModule(module)
modelDesc = expIface.getModelDescription()
tpActivationThreshold = modelDesc['modelParams'] \
['tmParams']['activationThreshold']
expectedValue = 12
self.assertEqual(tpActivationThreshold, expectedValue,
"Expected tm activationThreshold=%s, but got %s" % (
expectedValue, tpActivationThreshold))
def test_run_sub_experiment(self):
self.executePositiveOpfExperiment(experimentName="gym", short=True)
################################################################################
# Support functions
################################################################################
def _executeExternalCmdAndReapOutputs(args):
"""
args: Args list as defined for the args parameter in subprocess.Popen()
Returns: result dicionary:
{
'exitStatus':<exit-status-of-external-command>,
'stdoutData':"string",
'stderrData':"string"
}
"""
import subprocess
_debugOut(("Starting...\n<%s>") % \
(args,))
p = subprocess.Popen(args,
env=os.environ,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_debugOut(("Process started for <%s>") % (args,))
(stdoutData, stderrData) = p.communicate()
_debugOut(("Process completed for <%s>: exit status=%s, " + \
"stdoutDataType=%s, stdoutData=<%s>, stderrData=<%s>") % \
(args, p.returncode, type(stdoutData), stdoutData, stderrData))
result = dict(
exitStatus = p.returncode,
stdoutData = stdoutData,
stderrData = stderrData,
)
_debugOut(("args: <%s>: result:\n%s") % \
(args, pprint.pformat(result, indent=4)))
return result
def _debugOut(msg):
if g_debug:
callerTraceback = whoisCallersCaller()
print "OPF TestDescriptionTemplate (f=%s;line=%s): %s" % \
(callerTraceback.function, callerTraceback.lineno, msg,)
sys.stdout.flush()
def whoisCallersCaller():
"""
Returns: Traceback namedtuple for our caller's caller
"""
import inspect
frameObj = inspect.stack()[2][0]
return inspect.getframeinfo(frameObj)
if __name__ == "__main__":
g_myEnv = MyTestEnvironment()
unittest.longMessage = True
unittest.main()
| 10,144 | Python | .py | 220 | 38.368182 | 80 | 0.637435 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,178 | description.py | numenta_nupic-legacy/tests/integration/nupic/opf/opf_description_template_test/experiments/gym/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""This file defines parameters for a prediction experiment."""
import os
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config = \
{ 'modelParams': { 'clParams': { },
'sensorParams': { 'encoders': { }},
'spParams': { },
'tmParams': { 'activationThreshold': 12}}}
mod = importBaseDescription('./base.py', config)
locals().update(mod.__dict__)
| 1,447 | Python | .py | 31 | 43.709677 | 78 | 0.657932 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,179 | base.py | numenta_nupic-legacy/tests/integration/nupic/opf/opf_description_template_test/experiments/gym/base.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'),
('numericFieldNameB', 'sum'),
('categoryFieldNameC', 'first')],
'hours': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalNextStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'timestamp': dict(fieldname='timestamp', type='DateEncoder',timeOfDay=(5,5)),
'attendeeCount': dict(fieldname='attendeeCount', type='ScalarEncoder',
name='attendeeCount', minval=0, maxval=270,
clipInput=True, w=5, resolution=10, forced=True),
'consumption': dict(fieldname='consumption',type='ScalarEncoder',
name='consumption', minval=0,maxval=115,
clipInput=True, w=5, resolution=5, forced=True),
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 20,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 8,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 15,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': float("nan"),
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = dict(
environment = 'opfExperiment',
# [optional] A sequence of one or more tasks that describe what to do with the
# model. Each task consists of a task label, an input spec., iteration count,
# and a task-control spec per opfTaskSchema.json
#
# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver.
# Clients that interact with OPFExperiment directly do not make use of
# the tasks specification.
#
tasks = [
{
# Task label; this label string may be used for diagnostic logging and for
# constructing filenames or directory pathnames for task-specific files, etc.
'taskLabel' : "OnlineLearning",
# Input stream specification per py/nupicengine/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'test_NoProviders',
'version': 1,
'streams': [
{
'columns': ['*'],
'info': 'my gym.csv dataset',
'source': 'file://extra/gym/gym.csv',
'first_record': 0,
'last_record': 4000
}
],
# TODO: Aggregation is not supported yet by run_opf_experiment.py
#'aggregation' : config['aggregationInfo']
},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
'taskControl' : {
# Iteration cycle list consisting of opf_task_driver.IterationPhaseSpecXXXXX
# instances.
'iterationCycle' : [
#IterationPhaseSpecLearnOnly(1000),
IterationPhaseSpecLearnAndInfer(1000, dict(predictedField="consumption")),
#IterationPhaseSpecInferOnly(10),
],
'metrics' :[
MetricSpec(metric='rmse',
field="consumption",
inferenceElement=InferenceElement.prediction),
],
# Callbacks for experimentation/research (optional)
'callbacks' : {
# Callbacks to be called at the beginning of a task, before model iterations.
# Signature: callback(<reference to OPFExperiment>); returns nothing
'setup' : [],
# Callbacks to be called after every learning/inference iteration
# Signature: callback(<reference to OPFExperiment>); returns nothing
'postIter' : [],
# Callbacks to be called when the experiment task is finished
# Signature: callback(<reference to OPFExperiment>); returns nothing
'finish' : []
}
} # End of taskControl
}, # End of task
]
)
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| 15,612 | Python | .py | 334 | 37.622754 | 108 | 0.635329 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,180 | opf_checkpoint_test.py | numenta_nupic-legacy/tests/integration/nupic/opf/opf_checkpoint_test/opf_checkpoint_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import csv
import os
import shutil
from nupic.data.file_record_stream import FileRecordStream
from nupic.frameworks.opf.experiment_runner import runExperiment, getCheckpointParentDir
from nupic.support import initLogging
from nupic.support.unittesthelpers.testcasebase import (
unittest, TestCaseBase as HelperTestCaseBase)
try:
import capnp
except ImportError:
capnp = None
_EXPERIMENT_BASE = os.path.join(os.path.abspath(
os.path.dirname(__file__)), "experiments")
class MyTestCaseBase(HelperTestCaseBase):
def shortDescription(self):
""" Override to force unittest framework to use test method names instead
of docstrings in the report.
"""
return None
def compareOPFPredictionFiles(self, path1, path2, temporal,
maxMismatches=None):
""" Compare temporal or non-temporal predictions for the given experiment
that just finished executing
experimentName: e.g., "gym"; this string will be used to form
a directory path to the experiments.
maxMismatches: Maximum number of row mismatches to report before
terminating the comparison; None means: report all
mismatches
Returns: True if equal; False if different
"""
experimentLabel = "%s prediction comparison" % \
("Temporal" if temporal else "Non-Temporal")
print "%s: Performing comparison of OPF prediction CSV files %r and %r" % (
experimentLabel, path1, path2)
# Open CSV readers
#
self.assertTrue(
os.path.isfile(path1),
msg="OPF prediction file path1 %s doesn't exist or is not a file" % (
path1))
(opf1CsvReader, opf1FieldNames) = self._openOpfPredictionCsvFile(path1)
self.assertTrue(
os.path.isfile(path2),
msg="OPF prediction file path2 %s doesn't exist or is not a file" % (
path2))
(opf2CsvReader, opf2FieldNames) = self._openOpfPredictionCsvFile(path2)
self.assertEqual(len(opf1FieldNames), len(opf2FieldNames),
("%s: Mismatch in number of prediction columns: "
"opf1: %s, opf2: %s") % (
experimentLabel, len(opf1FieldNames),
len(opf2FieldNames)))
self.assertEqual(opf1FieldNames, opf2FieldNames)
# Each data row is assumed to be arranged as follows:
#
# reset, actual-field1, prediction-field1, actual-field2,
# prediction-field2, etc.
#
# Presently, we only compare the predicted values that need to match.
opf1EOF = False
opf2EOF = False
opf1CurrentDataRowIndex = -1
opf2CurrentDataRowIndex = -1
if temporal:
# Skip the first data rows for temporal tests, since they don't contain
# prediction values.
_skipOpf1Row = opf1CsvReader.next()
opf1CurrentDataRowIndex += 1
_skipOpf2Row = opf2CsvReader.next()
opf2CurrentDataRowIndex += 1
fieldsIndexesToCompare = tuple(xrange(2, len(opf1FieldNames), 2))
self.assertGreater(len(fieldsIndexesToCompare), 0)
print ("%s: Comparing fields at indexes: %s; "
"opf1Labels: %s; opf2Labels: %s") % (
experimentLabel,
fieldsIndexesToCompare,
[opf1FieldNames[i] for i in fieldsIndexesToCompare],
[opf2FieldNames[i] for i in fieldsIndexesToCompare])
for i in fieldsIndexesToCompare:
self.assertTrue(opf1FieldNames[i].endswith("predicted"),
msg="%r doesn't end with 'predicted'" % opf1FieldNames[i])
self.assertTrue(opf2FieldNames[i].endswith("predicted"),
msg="%r doesn't end with 'predicted'" % opf2FieldNames[i])
mismatchCount = 0
while True:
try:
opf1Row = opf1CsvReader.next()
except StopIteration:
opf1EOF = True
else:
opf1CurrentDataRowIndex += 1
try:
opf2Row = opf2CsvReader.next()
except StopIteration:
opf2EOF = True
else:
opf2CurrentDataRowIndex += 1
if opf1EOF != opf2EOF:
print ("%s: ERROR: Data row counts mismatch: "
"opf1EOF: %s, opf1CurrentDataRowIndex: %s; "
"opf2EOF: %s, opf2CurrentDataRowIndex: %s") % (
experimentLabel,
opf1EOF, opf1CurrentDataRowIndex,
opf2EOF, opf2CurrentDataRowIndex)
return False
if opf1EOF and opf2EOF:
# Done with both prediction datasets
break
# Compare the rows
self.assertEqual(len(opf1Row), len(opf2Row))
for i in fieldsIndexesToCompare:
opf1FloatValue = float(opf1Row[i])
opf2FloatValue = float(opf2Row[i])
if opf1FloatValue != opf2FloatValue:
mismatchCount += 1
print ("%s: ERROR: mismatch in "
"prediction values: dataRowIndex: %s, fieldIndex: %s (%r); "
"opf1FieldValue: <%s>, opf2FieldValue: <%s>; "
"opf1FieldValueAsFloat: %s, opf2FieldValueAsFloat: %s; "
"opf1Row: %s, opf2Row: %s") % (
experimentLabel,
opf1CurrentDataRowIndex,
i,
opf1FieldNames[i],
opf1Row[i],
opf2Row[i],
opf1FloatValue,
opf2FloatValue,
opf1Row,
opf2Row)
# Stop comparison if we exceeded the allowed number of mismatches
if maxMismatches is not None and mismatchCount >= maxMismatches:
break
if mismatchCount != 0:
print "%s: ERROR: there were %s mismatches between %r and %r" % (
experimentLabel, mismatchCount, path1, path2)
return False
# A difference here would indicate a logic error in this method
self.assertEqual(opf1CurrentDataRowIndex, opf2CurrentDataRowIndex)
print ("%s: Comparison of predictions "
"completed: OK; number of prediction rows examined: %s; "
"path1: %r; path2: %r") % \
(experimentLabel,
opf1CurrentDataRowIndex + 1,
path1,
path2)
return True
def _openOpfPredictionCsvFile(self, filepath):
""" Open an OPF prediction CSV file and advance it to the first data row
Returns: the tuple (csvReader, fieldNames), where 'csvReader' is the
csv reader object, and 'fieldNames' is a sequence of field
names.
"""
# Open the OPF prediction file
csvReader = self._openCsvFile(filepath)
# Advance it past the three NUPIC header lines
names = csvReader.next()
_types = csvReader.next()
_specials = csvReader.next()
return (csvReader, names)
@staticmethod
def _openCsvFile(filepath):
# We'll be operating on csvs with arbitrarily long fields
size = 2**27
csv.field_size_limit(size)
rawFileObj = open(filepath, 'r')
csvReader = csv.reader(rawFileObj, dialect='excel')
return csvReader
def _createExperimentArgs(self, experimentDir,
newSerialization=False,
additionalArgs=()):
args = []
args.append(experimentDir)
if newSerialization:
args.append("--newSerialization")
args += additionalArgs
return args
def _testSamePredictions(self, experiment, predSteps, checkpointAt,
predictionsFilename, additionalFields=None,
newSerialization=False):
""" Test that we get the same predictions out from the following two
scenarios:
a_plus_b: Run the network for 'a' iterations followed by 'b' iterations
a, followed by b: Run the network for 'a' iterations, save it, load it
back in, then run for 'b' iterations.
Parameters:
-----------------------------------------------------------------------
experiment: base directory of the experiment. This directory should
contain the following:
base.py
a_plus_b/description.py
a/description.py
b/description.py
The sub-directory description files should import the
base.py and only change the first and last record used
from the data file.
predSteps: Number of steps ahead predictions are for
checkpointAt: Number of iterations that 'a' runs for.
IMPORTANT: This must match the number of records that
a/description.py runs for - it is NOT dynamically stuffed into
the a/description.py.
predictionsFilename: The name of the predictions file that the OPF
generates for this experiment (for example
'DefaulTask.NontemporalMultiStep.predictionLog.csv')
newSerialization: Whether to use new capnproto serialization.
"""
# Get the 3 sub-experiment directories
aPlusBExpDir = os.path.join(_EXPERIMENT_BASE, experiment, "a_plus_b")
aExpDir = os.path.join(_EXPERIMENT_BASE, experiment, "a")
bExpDir = os.path.join(_EXPERIMENT_BASE, experiment, "b")
# Run a+b
args = self._createExperimentArgs(aPlusBExpDir,
newSerialization=newSerialization)
_aPlusBExp = runExperiment(args)
# Run a, the copy the saved checkpoint into the b directory
args = self._createExperimentArgs(aExpDir,
newSerialization=newSerialization)
_aExp = runExperiment(args)
if os.path.exists(os.path.join(bExpDir, 'savedmodels')):
shutil.rmtree(os.path.join(bExpDir, 'savedmodels'))
shutil.copytree(src=os.path.join(aExpDir, 'savedmodels'),
dst=os.path.join(bExpDir, 'savedmodels'))
args = self._createExperimentArgs(bExpDir,
newSerialization=newSerialization,
additionalArgs=['--load=DefaultTask'])
_bExp = runExperiment(args)
# Now, compare the predictions at the end of a+b to those in b.
aPlusBPred = FileRecordStream(os.path.join(aPlusBExpDir, 'inference',
predictionsFilename))
bPred = FileRecordStream(os.path.join(bExpDir, 'inference',
predictionsFilename))
colNames = [x[0] for x in aPlusBPred.getFields()]
actValueColIdx = colNames.index('multiStepPredictions.actual')
predValueColIdx = colNames.index('multiStepPredictions.%d' % (predSteps))
# Skip past the 'a' records in aPlusB
for i in range(checkpointAt):
aPlusBPred.next()
# Now, read through the records that don't have predictions yet
for i in range(predSteps):
aPlusBPred.next()
bPred.next()
# Now, compare predictions in the two files
rowIdx = checkpointAt + predSteps + 4 - 1
epsilon = 0.0001
while True:
rowIdx += 1
try:
rowAPB = aPlusBPred.next()
rowB = bPred.next()
# Compare actuals
self.assertEqual(rowAPB[actValueColIdx], rowB[actValueColIdx],
"Mismatch in actual values: row %d of a+b has %s and row %d of "
"b has %s" % (rowIdx, rowAPB[actValueColIdx], rowIdx-checkpointAt,
rowB[actValueColIdx]))
# Compare predictions, within nearest epsilon
predAPB = eval(rowAPB[predValueColIdx])
predB = eval(rowB[predValueColIdx])
# Sort with highest probabilities first
predAPB = [(a, b) for b, a in predAPB.items()]
predB = [(a, b) for b, a in predB.items()]
predAPB.sort(reverse=True)
predB.sort(reverse=True)
if additionalFields is not None:
for additionalField in additionalFields:
fieldIdx = colNames.index(additionalField)
self.assertEqual(rowAPB[fieldIdx], rowB[fieldIdx],
"Mismatch in field \'%s\' values: row %d of a+b has value: (%s)\n"
" and row %d of b has value: %s" % \
(additionalField, rowIdx, rowAPB[fieldIdx],
rowIdx-checkpointAt, rowB[fieldIdx]))
self.assertEqual(len(predAPB), len(predB),
"Mismatch in predicted values: row %d of a+b has %d predictions: "
"\n (%s) and row %d of b has %d predictions:\n (%s)" % \
(rowIdx, len(predAPB), predAPB, rowIdx-checkpointAt, len(predB),
predB))
for i in range(len(predAPB)):
(aProb, aValue) = predAPB[i]
(bProb, bValue) = predB[i]
self.assertLess(abs(aValue-bValue), epsilon,
"Mismatch in predicted values: row %d of a+b predicts value %s "
"and row %d of b predicts %s" % (rowIdx, aValue,
rowIdx-checkpointAt, bValue))
self.assertLess(abs(aProb-bProb), epsilon,
"Mismatch in probabilities: row %d of a+b predicts %s with "
"probability %s and row %d of b predicts %s with probability %s" \
% (rowIdx, aValue, aProb, rowIdx-checkpointAt, bValue, bProb))
except StopIteration:
break
# clean up model checkpoint directories
shutil.rmtree(getCheckpointParentDir(aExpDir))
shutil.rmtree(getCheckpointParentDir(bExpDir))
shutil.rmtree(getCheckpointParentDir(aPlusBExpDir))
print "Predictions match!"
@staticmethod
def _testBackwardsCompatibility(experiment, checkpointName):
""" Test that we can load in a checkpoint saved by an earlier version of
the OPF.
Parameters:
-----------------------------------------------------------------------
experiment: Directory of the experiment.
checkpointName: which checkpoint to verify
"""
# Get the experiment directories
expDir = os.path.join(_EXPERIMENT_BASE, experiment)
# Copy the pertinent checkpoint
if os.path.exists(os.path.join(expDir, 'savedmodels')):
shutil.rmtree(os.path.join(expDir, 'savedmodels'))
shutil.copytree(src=os.path.join(expDir, checkpointName),
dst=os.path.join(expDir, 'savedmodels'))
# Run it from the checkpoint
_aPlusBExp = runExperiment(args=[expDir, '--load=DefaultTask',
'--noCheckpoint'])
class PositiveTests(MyTestCaseBase):
def test_NonTemporalMultiStep(self):
""" Test that we get the same predictions out of a model that was
saved and reloaded from a checkpoint as we do from one that runs
continuously.
"""
self._testSamePredictions(
experiment="non_temporal_multi_step", predSteps=24, checkpointAt=250,
predictionsFilename=
"DefaultTask.NontemporalMultiStep.predictionLog.csv")
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def test_NonTemporalMultiStepNew(self):
""" Test that we get the same predictions out of a model that was
saved and reloaded from a checkpoint as we do from one that runs
continuously.
Uses new capnproto serialization.
"""
self._testSamePredictions(
experiment="non_temporal_multi_step", predSteps=24, checkpointAt=250,
predictionsFilename=
"DefaultTask.NontemporalMultiStep.predictionLog.csv",
newSerialization=True)
@unittest.skip("Currently Fails: NUP-1864")
def test_TemporalMultiStep(self):
""" Test that we get the same predictions out of a model that was
saved and reloaded from a checkpoint as we do from one that runs
continuously.
"""
self._testSamePredictions(experiment="temporal_multi_step", predSteps=24,
checkpointAt=250,
predictionsFilename='DefaultTask.TemporalMultiStep.predictionLog.csv')
@unittest.skip("Currently Fails: NUP-1864")
def test_TemporalAnomaly(self):
""" Test that we get the same predictions out of a model that was
saved and reloaded from a checkpoint as we do from one that runs
continuously.
"""
self._testSamePredictions(experiment="temporal_anomaly", predSteps=1,
checkpointAt=250,
predictionsFilename='DefaultTask.TemporalAnomaly.predictionLog.csv',
additionalFields=['anomalyScore'])
@unittest.skip("We aren't currently supporting serialization backward "
"compatibility")
def test_BackwardsCompatibility(self):
""" Test that we can load in a checkpoint saved by an earlier version of
the OPF.
"""
self._testBackwardsCompatibility(
os.path.join('backwards_compatibility', 'a'),
'savedmodels_2012-10-05')
if __name__ == "__main__":
initLogging(verbose=True)
unittest.main()
| 17,685 | Python | .py | 386 | 36.621762 | 88 | 0.640596 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,181 | base.py | numenta_nupic-legacy/tests/integration/nupic/opf/opf_checkpoint_test/experiments/temporal_anomaly/base.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2012-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.pyc'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
VERBOSITY = 0
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': {
'days': 0,
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0,
'fields': [(u'c1', 'first'), (u'c0', 'first')],
},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : VERBOSITY,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
u'c0_timeOfDay': { 'fieldname': u'c0',
'name': u'c0_timeOfDay',
'timeOfDay': (21, 1),
'type': 'DateEncoder'},
u'c0_dayOfWeek': { 'dayOfWeek': (21, 1),
'fieldname': u'c0',
'name': u'c0_dayOfWeek',
'type': 'DateEncoder'},
u'c0_weekend': { 'fieldname': u'c0',
'name': u'c0_weekend',
'type': 'DateEncoder',
'weekend': 21},
u'c1': { 'clipInput': True,
'fieldname': u'c1',
'n': 100,
'name': u'c1',
'type': 'AdaptiveScalarEncoder',
'w': 21},
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : VERBOSITY,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : VERBOSITY,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
dataPath = os.path.abspath(os.path.join(os.path.dirname(__file__),
'data.csv'))
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupicengine/cluster/database/StreamDef.json.
#
'dataset' : { 'aggregation': config['aggregationInfo'],
u'info': u'82b42f21-7f86-47b3-bab4-3738703bf612',
u'streams': [ { u'columns': [u'c0', u'c1'],
u'info': u'82b42f21-7f86-47b3-bab4-3738703bf612',
u'source': 'file://%s' % (dataPath),
u'first_record': config['firstRecord'],
u'last_record': config['lastRecord'],
u'types': [u'datetime', u'float']}],
u'timeField': u'c0',
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{u'predictedField': u'c1', u'predictionSteps': [1]},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'c1', metric='multiStep', inferenceElement='multiStepBestPredictions', params={'window': 1000, 'steps': [1], 'errorMetric': 'altMAPE'}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| 15,007 | Python | .py | 325 | 37.6 | 158 | 0.634958 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,182 | description.py | numenta_nupic-legacy/tests/integration/nupic/opf/opf_checkpoint_test/experiments/temporal_anomaly/a_plus_b/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2011-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
###############################################################################
# IMPORTANT!!!
# This params file is dynamically generated by the RunExperimentPermutations
# script. Any changes made manually will be over-written the next time
# RunExperimentPermutations is run!!!
###############################################################################
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config ={
'modelParams' : {'sensorParams': {'encoders': {u'c0_timeOfDay': None, u'c0_dayOfWeek': None, u'c1': {'name': 'c1', 'clipInput': True, 'n': 275, 'fieldname': 'c1', 'w': 21, 'type': 'AdaptiveScalarEncoder'}, u'c0_weekend': None}}, 'spParams': {'synPermInactiveDec': 0.052500000000000005}, 'tmParams': {'minThreshold': 11, 'activationThreshold': 14, 'pamLength': 3}, 'clParams': {'alpha': 0.050050000000000004}},
'firstRecord': 0,
'lastRecord': 500,
}
mod = importBaseDescription('../base.py', config)
locals().update(mod.__dict__)
| 2,100 | Python | .py | 36 | 56.972222 | 411 | 0.635391 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,183 | description.py | numenta_nupic-legacy/tests/integration/nupic/opf/opf_checkpoint_test/experiments/temporal_anomaly/a/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2011-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
###############################################################################
# IMPORTANT!!!
# This params file is dynamically generated by the RunExperimentPermutations
# script. Any changes made manually will be over-written the next time
# RunExperimentPermutations is run!!!
###############################################################################
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config ={
'modelParams' : {'sensorParams': {'encoders': {u'c0_timeOfDay': None, u'c0_dayOfWeek': None, u'c1': {'name': 'c1', 'clipInput': True, 'n': 275, 'fieldname': 'c1', 'w': 21, 'type': 'AdaptiveScalarEncoder'}, u'c0_weekend': None}}, 'spParams': {'synPermInactiveDec': 0.052500000000000005}, 'tmParams': {'minThreshold': 11, 'activationThreshold': 14, 'pamLength': 3}, 'clParams': {'alpha': 0.050050000000000004}},
'firstRecord': 0,
'lastRecord': 250,
}
mod = importBaseDescription('../base.py', config)
locals().update(mod.__dict__)
| 2,100 | Python | .py | 36 | 56.972222 | 411 | 0.635391 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,184 | description.py | numenta_nupic-legacy/tests/integration/nupic/opf/opf_checkpoint_test/experiments/temporal_anomaly/b/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2011-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
###############################################################################
# IMPORTANT!!!
# This params file is dynamically generated by the RunExperimentPermutations
# script. Any changes made manually will be over-written the next time
# RunExperimentPermutations is run!!!
###############################################################################
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config ={
'modelParams' : {'sensorParams': {'encoders': {u'c0_timeOfDay': None, u'c0_dayOfWeek': None, u'c1': {'name': 'c1', 'clipInput': True, 'n': 275, 'fieldname': 'c1', 'w': 21, 'type': 'AdaptiveScalarEncoder'}, u'c0_weekend': None}}, 'spParams': {'synPermInactiveDec': 0.052500000000000005}, 'tmParams': {'minThreshold': 11, 'activationThreshold': 14, 'pamLength': 3}, 'clParams': {'alpha': 0.050050000000000004}},
'firstRecord': 250,
'lastRecord': 500,
}
mod = importBaseDescription('../base.py', config)
locals().update(mod.__dict__)
| 2,102 | Python | .py | 36 | 57.027778 | 411 | 0.635746 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,185 | base.py | numenta_nupic-legacy/tests/integration/nupic/opf/opf_checkpoint_test/experiments/temporal_multi_step/base.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2012-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.pyc'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
VERBOSITY = 0
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': {
'days': 0,
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0,
'fields': [(u'c1', 'first'), (u'c0', 'first')],
},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalMultiStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : VERBOSITY,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
u'c0_timeOfDay': { 'fieldname': u'c0',
'name': u'c0_timeOfDay',
'timeOfDay': (21, 1),
'type': 'DateEncoder'},
u'c0_dayOfWeek': { 'dayOfWeek': (21, 1),
'fieldname': u'c0',
'name': u'c0_dayOfWeek',
'type': 'DateEncoder'},
u'c0_weekend': { 'fieldname': u'c0',
'name': u'c0_weekend',
'type': 'DateEncoder',
'weekend': 21},
u'c1': { 'clipInput': True,
'fieldname': u'c1',
'n': 100,
'name': u'c1',
'type': 'AdaptiveScalarEncoder',
'w': 21},
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : VERBOSITY,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : VERBOSITY,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '24',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
dataPath = os.path.abspath(os.path.join(os.path.dirname(__file__),
'data.csv'))
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupicengine/cluster/database/StreamDef.json.
#
'dataset' : { 'aggregation': config['aggregationInfo'],
u'info': u'82b42f21-7f86-47b3-bab4-3738703bf612',
u'streams': [ { u'columns': [u'c0', u'c1'],
u'info': u'82b42f21-7f86-47b3-bab4-3738703bf612',
u'source': 'file://%s' % (dataPath),
u'first_record': config['firstRecord'],
u'last_record': config['lastRecord'],
u'types': [u'datetime', u'float']}],
u'timeField': u'c0',
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{u'predictedField': u'c1', u'predictionSteps': [24]},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'c1', metric='multiStep', inferenceElement='multiStepBestPredictions', params={'window': 1000, 'steps': [24], 'errorMetric': 'altMAPE'}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| 15,012 | Python | .py | 325 | 37.615385 | 159 | 0.635082 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,186 | description.py | numenta_nupic-legacy/tests/integration/nupic/opf/opf_checkpoint_test/experiments/temporal_multi_step/a_plus_b/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2011-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
###############################################################################
# IMPORTANT!!!
# This params file is dynamically generated by the RunExperimentPermutations
# script. Any changes made manually will be over-written the next time
# RunExperimentPermutations is run!!!
###############################################################################
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config ={
'modelParams' : {'sensorParams': {'encoders': {u'c0_timeOfDay': None, u'c0_dayOfWeek': None, u'c1': {'name': 'c1', 'clipInput': True, 'n': 275, 'fieldname': 'c1', 'w': 21, 'type': 'AdaptiveScalarEncoder'}, u'c0_weekend': None}}, 'spParams': {'synPermInactiveDec': 0.052500000000000005}, 'tmParams': {'minThreshold': 11, 'activationThreshold': 14, 'pamLength': 3}, 'clParams': {'alpha': 0.050050000000000004}},
'firstRecord': 0,
'lastRecord': 500,
}
mod = importBaseDescription('../base.py', config)
locals().update(mod.__dict__)
| 2,100 | Python | .py | 36 | 56.972222 | 411 | 0.635391 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,187 | description.py | numenta_nupic-legacy/tests/integration/nupic/opf/opf_checkpoint_test/experiments/temporal_multi_step/a/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2011-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
###############################################################################
# IMPORTANT!!!
# This params file is dynamically generated by the RunExperimentPermutations
# script. Any changes made manually will be over-written the next time
# RunExperimentPermutations is run!!!
###############################################################################
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config ={
'modelParams' : {'sensorParams': {'encoders': {u'c0_timeOfDay': None, u'c0_dayOfWeek': None, u'c1': {'name': 'c1', 'clipInput': True, 'n': 275, 'fieldname': 'c1', 'w': 21, 'type': 'AdaptiveScalarEncoder'}, u'c0_weekend': None}}, 'spParams': {'synPermInactiveDec': 0.052500000000000005}, 'tmParams': {'minThreshold': 11, 'activationThreshold': 14, 'pamLength': 3}, 'clParams': {'alpha': 0.050050000000000004}},
'firstRecord': 0,
'lastRecord': 250,
}
mod = importBaseDescription('../base.py', config)
locals().update(mod.__dict__)
| 2,100 | Python | .py | 36 | 56.972222 | 411 | 0.635391 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,188 | description.py | numenta_nupic-legacy/tests/integration/nupic/opf/opf_checkpoint_test/experiments/temporal_multi_step/b/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2011-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
###############################################################################
# IMPORTANT!!!
# This params file is dynamically generated by the RunExperimentPermutations
# script. Any changes made manually will be over-written the next time
# RunExperimentPermutations is run!!!
###############################################################################
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config ={
'modelParams' : {'sensorParams': {'encoders': {u'c0_timeOfDay': None, u'c0_dayOfWeek': None, u'c1': {'name': 'c1', 'clipInput': True, 'n': 275, 'fieldname': 'c1', 'w': 21, 'type': 'AdaptiveScalarEncoder'}, u'c0_weekend': None}}, 'inferenceType': 'NontemporalMultiStep', 'spParams': {'synPermInactiveDec': 0.052500000000000005}, 'tmParams': {'minThreshold': 11, 'activationThreshold': 14, 'pamLength': 3}, 'clParams': {'alpha': 0.050050000000000004}},
'firstRecord': 250,
'lastRecord': 500,
}
mod = importBaseDescription('../base.py', config)
locals().update(mod.__dict__)
| 2,143 | Python | .py | 36 | 58.166667 | 452 | 0.639048 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,189 | base.py | numenta_nupic-legacy/tests/integration/nupic/opf/opf_checkpoint_test/experiments/backwards_compatibility/base.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2012-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.pyc'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
VERBOSITY = 0
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': {
'days': 0,
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0,
'fields': [(u'c1', 'first'), (u'c0', 'first')],
},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalMultiStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : VERBOSITY,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
u'c0_timeOfDay': { 'fieldname': u'c0',
'name': u'c0_timeOfDay',
'timeOfDay': (21, 1),
'type': 'DateEncoder'},
u'c0_dayOfWeek': { 'dayOfWeek': (21, 1),
'fieldname': u'c0',
'name': u'c0_dayOfWeek',
'type': 'DateEncoder'},
u'c0_weekend': { 'fieldname': u'c0',
'name': u'c0_weekend',
'type': 'DateEncoder',
'weekend': 21},
u'c1': { 'clipInput': True,
'fieldname': u'c1',
'n': 100,
'name': u'c1',
'type': 'AdaptiveScalarEncoder',
'w': 21},
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : VERBOSITY,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : VERBOSITY,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '24',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
dataPath = os.path.abspath(os.path.join(os.path.dirname(__file__),
'data.csv'))
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupicengine/cluster/database/StreamDef.json.
#
'dataset' : { 'aggregation': config['aggregationInfo'],
u'info': u'82b42f21-7f86-47b3-bab4-3738703bf612',
u'streams': [ { u'columns': [u'c0', u'c1'],
u'info': u'82b42f21-7f86-47b3-bab4-3738703bf612',
u'source': 'file://%s' % (dataPath),
u'first_record': config['firstRecord'],
u'last_record': config['lastRecord'],
u'types': [u'datetime', u'float']}],
u'timeField': u'c0',
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{u'predictedField': u'c1', u'predictionSteps': [24]},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'c1', metric='multiStep', inferenceElement='multiStepBestPredictions', params={'window': 1000, 'steps': [24], 'errorMetric': 'altMAPE'}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| 15,012 | Python | .py | 325 | 37.615385 | 159 | 0.635082 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,190 | description.py | numenta_nupic-legacy/tests/integration/nupic/opf/opf_checkpoint_test/experiments/backwards_compatibility/a/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2011-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
###############################################################################
# IMPORTANT!!!
# This params file is dynamically generated by the RunExperimentPermutations
# script. Any changes made manually will be over-written the next time
# RunExperimentPermutations is run!!!
###############################################################################
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config ={
'modelParams' : {'sensorParams': {'encoders': {u'c0_timeOfDay': None, u'c0_dayOfWeek': None, u'c1': {'name': 'c1', 'clipInput': True, 'n': 275, 'fieldname': 'c1', 'w': 21, 'type': 'AdaptiveScalarEncoder'}, u'c0_weekend': None}}, 'inferenceType': 'NontemporalMultiStep', 'spParams': {'synPermInactiveDec': 0.052500000000000005}, 'tmParams': {'minThreshold': 11, 'activationThreshold': 14, 'pamLength': 3}, 'clParams': {'alpha': 0.050050000000000004}},
'firstRecord': 0,
'lastRecord': 10,
}
mod = importBaseDescription('../base.py', config)
locals().update(mod.__dict__)
| 2,140 | Python | .py | 36 | 58.083333 | 452 | 0.638531 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,191 | base.py | numenta_nupic-legacy/tests/integration/nupic/opf/opf_checkpoint_test/experiments/non_temporal_multi_step/base.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2012-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.pyc'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
VERBOSITY = 0
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': {
'days': 0,
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0,
'fields': [(u'c1', 'first'), (u'c0', 'first')],
},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalMultiStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : VERBOSITY,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
u'c0_timeOfDay': { 'fieldname': u'c0',
'name': u'c0_timeOfDay',
'timeOfDay': (21, 1),
'type': 'DateEncoder'},
u'c0_dayOfWeek': { 'dayOfWeek': (21, 1),
'fieldname': u'c0',
'name': u'c0_dayOfWeek',
'type': 'DateEncoder'},
u'c0_weekend': { 'fieldname': u'c0',
'name': u'c0_weekend',
'type': 'DateEncoder',
'weekend': 21},
u'c1': { 'clipInput': True,
'fieldname': u'c1',
'n': 100,
'name': u'c1',
'type': 'AdaptiveScalarEncoder',
'w': 21},
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : VERBOSITY,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : VERBOSITY,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '24',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
dataPath = os.path.abspath(os.path.join(os.path.dirname(__file__),
'data.csv'))
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupicengine/cluster/database/StreamDef.json.
#
'dataset' : { 'aggregation': config['aggregationInfo'],
u'info': u'82b42f21-7f86-47b3-bab4-3738703bf612',
u'streams': [ { u'columns': [u'c0', u'c1'],
u'info': u'82b42f21-7f86-47b3-bab4-3738703bf612',
u'source': 'file://%s' % (dataPath),
u'first_record': config['firstRecord'],
u'last_record': config['lastRecord'],
u'types': [u'datetime', u'float']}],
u'timeField': u'c0',
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{u'predictedField': u'c1', u'predictionSteps': [24]},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'c1', metric='multiStep', inferenceElement='multiStepBestPredictions', params={'window': 1000, 'steps': [24], 'errorMetric': 'altMAPE'}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| 15,015 | Python | .py | 325 | 37.624615 | 159 | 0.635157 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,192 | description.py | numenta_nupic-legacy/tests/integration/nupic/opf/opf_checkpoint_test/experiments/non_temporal_multi_step/a_plus_b/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2011-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
###############################################################################
# IMPORTANT!!!
# This params file is dynamically generated by the RunExperimentPermutations
# script. Any changes made manually will be over-written the next time
# RunExperimentPermutations is run!!!
###############################################################################
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config ={
'modelParams' : {'sensorParams': {'encoders': {u'c0_timeOfDay': None, u'c0_dayOfWeek': None, u'c1': {'name': 'c1', 'clipInput': True, 'n': 275, 'fieldname': 'c1', 'w': 21, 'type': 'AdaptiveScalarEncoder'}, u'c0_weekend': None}}, 'spParams': {'synPermInactiveDec': 0.052500000000000005}, 'tmParams': {'minThreshold': 11, 'activationThreshold': 14, 'pamLength': 3}, 'clParams': {'alpha': 0.050050000000000004}},
'firstRecord': 0,
'lastRecord': 500,
}
mod = importBaseDescription('../base.py', config)
locals().update(mod.__dict__)
| 2,100 | Python | .py | 36 | 56.972222 | 411 | 0.635391 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,193 | description.py | numenta_nupic-legacy/tests/integration/nupic/opf/opf_checkpoint_test/experiments/non_temporal_multi_step/a/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2011-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
###############################################################################
# IMPORTANT!!!
# This params file is dynamically generated by the RunExperimentPermutations
# script. Any changes made manually will be over-written the next time
# RunExperimentPermutations is run!!!
###############################################################################
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config ={
'modelParams' : {'sensorParams': {'encoders': {u'c0_timeOfDay': None, u'c0_dayOfWeek': None, u'c1': {'name': 'c1', 'clipInput': True, 'n': 275, 'fieldname': 'c1', 'w': 21, 'type': 'AdaptiveScalarEncoder'}, u'c0_weekend': None}}, 'spParams': {'synPermInactiveDec': 0.052500000000000005}, 'tmParams': {'minThreshold': 11, 'activationThreshold': 14, 'pamLength': 3}, 'clParams': {'alpha': 0.050050000000000004}},
'firstRecord': 0,
'lastRecord': 250,
}
mod = importBaseDescription('../base.py', config)
locals().update(mod.__dict__)
| 2,100 | Python | .py | 36 | 56.972222 | 411 | 0.635391 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,194 | description.py | numenta_nupic-legacy/tests/integration/nupic/opf/opf_checkpoint_test/experiments/non_temporal_multi_step/b/description.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2011-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
###############################################################################
# IMPORTANT!!!
# This params file is dynamically generated by the RunExperimentPermutations
# script. Any changes made manually will be over-written the next time
# RunExperimentPermutations is run!!!
###############################################################################
from nupic.frameworks.opf.exp_description_helpers import importBaseDescription
# the sub-experiment configuration
config ={
'modelParams' : {'sensorParams': {'encoders': {u'c0_timeOfDay': None, u'c0_dayOfWeek': None, u'c1': {'name': 'c1', 'clipInput': True, 'n': 275, 'fieldname': 'c1', 'w': 21, 'type': 'AdaptiveScalarEncoder'}, u'c0_weekend': None}}, 'spParams': {'synPermInactiveDec': 0.052500000000000005}, 'tmParams': {'minThreshold': 11, 'activationThreshold': 14, 'pamLength': 3}, 'clParams': {'alpha': 0.050050000000000004}},
'firstRecord': 250,
'lastRecord': 500,
}
mod = importBaseDescription('../base.py', config)
locals().update(mod.__dict__)
| 2,102 | Python | .py | 36 | 57.027778 | 411 | 0.635746 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,195 | testfixture_test.py | numenta_nupic-legacy/tests/external/testfixture_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Unit tests for our dependencies in the pytest package; at the time of this
writing, we were using an unreleased version of pytest that added support for
the unittest setUpModule fixture and friends. Some of our tests rely on
setUpModule. Once, there was a conflict with pytest installation in our build
system, and an older version of pytest was installed that didn't support
setUpModule, which resulted in subtle side-effects in some of these tests.
"""
import unittest2 as unittest
g_setUpModuleCalled = False
def setUpModule():
global g_setUpModuleCalled
g_setUpModuleCalled = True
class TestPytest(unittest.TestCase):
def testSetUpModuleCalled(self):
self.assertTrue(g_setUpModuleCalled)
if __name__ == "__main__":
unittest.main()
| 1,744 | Python | .py | 38 | 44.131579 | 77 | 0.724689 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,196 | __init__.py | numenta_nupic-legacy/tests/external/__init__.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
| 976 | Python | .py | 20 | 47.8 | 72 | 0.665272 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,197 | asteval_test.py | numenta_nupic-legacy/tests/external/asteval_test.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Test asteval module is installed."""
import unittest2 as unittest
class TestCase(unittest.TestCase):
def testImportAndVersions(self):
import asteval
from pkg_resources import parse_version
self.assertGreater(parse_version(asteval.__version__), parse_version("0.9"))
if __name__ == "__main__":
unittest.main()
| 1,314 | Python | .py | 29 | 43.413793 | 80 | 0.683922 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,198 | __init__.py | numenta_nupic-legacy/tests/swarming/__init__.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
| 976 | Python | .py | 20 | 47.8 | 72 | 0.665272 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |
26,199 | __init__.py | numenta_nupic-legacy/tests/swarming/nupic/__init__.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
| 976 | Python | .py | 20 | 47.8 | 72 | 0.665272 | numenta/nupic-legacy | 6,330 | 1,556 | 464 | AGPL-3.0 | 9/5/2024, 5:13:42 PM (Europe/Amsterdam) |