desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Try running a simple permutations'
def testSmartSpeculation(self, onCluster=True, env=None, **kwargs):
self._printTestHeader() expDir = os.path.join(g_myEnv.testSrcExpDir, 'smart_speculation_temporal') if (env is None): env = dict() env['NTA_TEST_numIterations'] = '99' env['NTA_CONF_PROP_nupic_hypersearch_swarmMaturityWindow'] = ('%d' % g_repeatableSwarmMaturityWindow) env['NTA_CONF_PROP_...
'Test that smart speculation does the right thing with spatial classification models. This also applies to temporal models where the predicted field is optional (or excluded) since Hypersearch treats them the same.'
def testSmartSpeculationSpatialClassification(self, onCluster=True, env=None, **kwargs):
self._printTestHeader() expDir = os.path.join(g_myEnv.testSrcExpDir, 'smart_speculation_spatial_classification') if (env is None): env = dict() env['NTA_TEST_numIterations'] = '99' env['NTA_CONF_PROP_nupic_hypersearch_swarmMaturityWindow'] = ('%d' % g_repeatableSwarmMaturityWindow) env['...
'Try running a simple permutations'
def testFieldBranching(self, onCluster=True, env=None, **kwargs):
self._printTestHeader() expDir = os.path.join(g_myEnv.testSrcExpDir, 'max_branching_temporal') if (env is None): env = dict() env['NTA_TEST_numIterations'] = '99' env['NTA_CONF_PROP_nupic_hypersearch_swarmMaturityWindow'] = ('%d' % g_repeatableSwarmMaturityWindow) env['NTA_CONF_PROP_nupi...
'Test minimum field contribution threshold for a field to be included in further sprints'
def testFieldThreshold(self, onCluster=True, env=None, **kwargs):
self._printTestHeader() inst = OneNodeTests(self._testMethodName) return inst.testFieldThreshold(onCluster=True)
'Try running a simple permutations'
def testFieldContributions(self, onCluster=True, env=None, **kwargs):
self._printTestHeader() expDir = os.path.join(g_myEnv.testSrcExpDir, 'field_contrib_temporal') if (env is None): env = dict() env['NTA_TEST_numIterations'] = '99' env['NTA_CONF_PROP_nupic_hypersearch_swarmMaturityWindow'] = ('%d' % g_repeatableSwarmMaturityWindow) (jobID, jobInfo, result...
'Try running a simple permutations through a real CLA model'
def testHTMPredictionModelV2(self):
self._printTestHeader() inst = OneNodeTests(self._testMethodName) return inst.testHTMPredictionModelV2(onCluster=True, maxModels=4)
'Try running a simple permutations through a real CLA model that uses multistep'
def testCLAMultistepModel(self):
self._printTestHeader() inst = OneNodeTests(self._testMethodName) return inst.testCLAMultistepModel(onCluster=True, maxModels=4)
'Try running a simple permutations through a real CLA model that uses multistep'
def testLegacyCLAMultistepModel(self):
self._printTestHeader() inst = OneNodeTests(self._testMethodName) return inst.testLegacyCLAMultistepModel(onCluster=True, maxModels=4)
'Try running a simple permutations where certain field combinations take longer to complete, this lets us test that we successfully kill models in bad swarms that are still running.'
def testSimpleV2VariableWaits(self):
self._printTestHeader() env = dict() env['NTA_TEST_variableWaits'] = 'True' env['NTA_TEST_numIterations'] = '100' inst = OneNodeTests('testSimpleV2') return inst.testSimpleV2(onCluster=True, env=env)
'Run a worker on a model for a while, then have it exit before the model finishes. Then, run another worker, which should detect the orphaned model.'
def testOrphanedModel(self, modelRange=(0, 2)):
self._printTestHeader() expDir = os.path.join(g_myEnv.testSrcExpDir, 'simpleV2') env = dict() env['NTA_TEST_numIterations'] = '99' env['NTA_TEST_sysExitModelRange'] = ('%d,%d' % (modelRange[0], modelRange[1])) env['NTA_CONF_PROP_nupic_hypersearch_modelOrphanIntervalSecs'] = '1' env['NTA_CONF...
'Test behavior when a worker marks 2 models orphaned at the same time.'
def testTwoOrphanedModels(self, modelRange=(0, 2)):
self._printTestHeader() expDir = os.path.join(g_myEnv.testSrcExpDir, 'oneField') env = dict() env['NTA_TEST_numIterations'] = '99' env['NTA_TEST_delayModelRange'] = ('%d,%d' % (modelRange[0], modelRange[1])) env['NTA_CONF_PROP_nupic_hypersearch_modelOrphanIntervalSecs'] = '1' env['NTA_CONF_P...
'Run a worker on a model for a while, then have it exit before the model finishes. Then, run another worker, which should detect the orphaned model.'
def testOrphanedModelGen1(self):
self._printTestHeader() inst = MultiNodeTests(self._testMethodName) return inst.testOrphanedModel(modelRange=(10, 11))
'Test to make sure that the maxModels parameter doesn\'t include orphaned models. Run a test with maxModels set to 2, where one becomes orphaned. At the end, there should be 3 models in the models table, one of which will be the new model that adopted the orphaned model'
def testOrphanedModelMaxModels(self):
self._printTestHeader() expDir = os.path.join(g_myEnv.testSrcExpDir, 'dummyV2') numModels = 5 env = dict() env['NTA_CONF_PROP_nupic_hypersearch_modelOrphanIntervalSecs'] = '3' env['NTA_TEST_max_num_models'] = str(numModels) (jobID, jobInfo, resultInfos, metricResults, minErrScore) = self.run...
'Test for the correct behavior when a model uses a different connection id than what is stored in the db. The correct behavior is for the worker to log this as a warning and move on to a new model'
def testOrphanedModelConnection(self):
self._printTestHeader() expDir = os.path.join(g_myEnv.testSrcExpDir, 'dummy_multi_v2') numModels = 2 env = dict() env['NTA_CONF_PROP_nupic_hypersearch_modelOrphanIntervalSecs'] = '1' (jobID, jobInfo, resultInfos, metricResults, minErrScore) = self.runPermutations(expDir, hsImp='v2', loggingLevel...
'Run a worker on a model for a while, then have it exit before the model finishes. Then, run another worker, which should detect the orphaned model.'
def testErredModel(self, modelRange=(6, 7)):
self._printTestHeader() inst = OneNodeTests(self._testMethodName) return inst.testErredModel(onCluster=True)
'Run a worker on a model for a while, then have it exit before the model finishes. Then, run another worker, which should detect the orphaned model.'
def testJobFailModel(self):
self._printTestHeader() inst = OneNodeTests(self._testMethodName) return inst.testJobFailModel(onCluster=True)
'Run a worker on a model for a while, then have it exit before the model finishes. Then, run another worker, which should detect the orphaned model.'
def testTooManyErredModels(self, modelRange=(5, 10)):
self._printTestHeader() inst = OneNodeTests(self._testMethodName) return inst.testTooManyErredModels(onCluster=True)
'Try running a simple permutations'
def testSpatialClassification(self):
self._printTestHeader() inst = OneNodeTests(self._testMethodName) return inst.testSpatialClassification(onCluster=True)
'Test to make sure that the best model continues running even when it has matured. The 2nd model (constant) will be marked as mature first and will continue to run till the end. The 2nd model reaches maturity and should stop before all the records are consumed, and should be the best model because it has a lower error'...
def testMatureInterleaved(self):
self._printTestHeader() self.expDir = os.path.join(g_myEnv.testSrcExpDir, ('dummy_multi_v%d' % 2)) self.env['NTA_TEST_max_num_models'] = '2' (jobID, _, _, _, _) = self.runPermutations(self.expDir, hsImp=self.hsImp, maxModels=2, loggingLevel=g_myEnv.options.logLevel, env=self.env, onCluster=True, dummyMo...
'Sanity check to make sure that when only 1 model is running, it continues to run even when it has reached maturity'
def testConstant(self):
self._printTestHeader() (jobID, _, _, _, _) = self.runPermutations(self.expDir, hsImp=self.hsImp, maxModels=1, loggingLevel=g_myEnv.options.logLevel, env=self.env, dummyModel={'metricFunctions': ['lambda x: 100'], 'iterations': 350, 'experimentDirectory': self.expDir}) cjDB = ClientJobsDAO.get() m...
'Run with one really bad swarm to see if terminator picks it up correctly'
def testSimple(self, useCluster=False):
if (not g_myEnv.options.runInProc): self.skipTest('Skipping One Node test since runInProc is not specified') self._printTestHeader() expDir = os.path.join(g_myEnv.testSrcExpDir, 'swarm_v2') (jobID, jobInfo, resultInfos, metricResults, minErrScore) = self.runPermutations(e...
'Parse our command-line args/options and strip them from sys.argv Returns the tuple (parsedOptions, remainingArgs)'
@classmethod def _processArgs(cls):
helpString = '%prog [options...] [-- unittestoptions...] [suitename.testname | suitename]\n Run the Hypersearch unit tests. To see unit test framework options, enter:\n python %prog -- --help\n\n Example usag...
'Returns the test arguments after parsing'
@classmethod def parseArgs(cls):
return cls._processArgs()[0]
'Consumes the test arguments and returns the remaining arguments meant for unittest.man'
@classmethod def consumeArgs(cls):
return cls._processArgs()[1]
'Test a single set of sequences once and check that individual predictions reflect the true relative frequencies. Return a success code as well as the trained TM. Success code is 1 for pass, 0 for fail. The trainingSet is a set of 3 sequences that share the same first 4 elements but differ in the 5th element. After fee...
def _testSequence(self, trainingSet, nSequencePresentations=1, tm=None, testSequences=None, doResets=True, relativeFrequencies=None):
trainingSequences = trainingSet[0] trainingFrequencies = trainingSet[1] allTrainingPatterns = trainingSet[2] trainingCummulativeFrequencies = numpy.cumsum(trainingFrequencies) if (testSequences == None): testSequences = trainingSequences if (VERBOSITY > 1): print '============= ...
'Test with fast learning, make sure PAM allows us to train with fewer repeats of the training data.'
def testFastLearning(self):
numOnBitsPerPattern = 3 baseParams = dict(seqFunction=buildOverlappedSequences, numSequences=2, seqLen=10, sharedElements=[2, 3], numOnBitsPerPattern=numOnBitsPerPattern, includeCPP=INCLUDE_CPP_TM, numCols=None, activationThreshold=numOnBitsPerPattern, minThreshold=numOnBitsPerPattern, newSynapseCount=numOnBits...
'Test with slow learning, make sure PAM allows us to train with fewer repeats of the training data.'
def testSlowLearning(self):
numOnBitsPerPattern = 3 baseParams = dict(seqFunction=buildOverlappedSequences, numSequences=2, seqLen=10, sharedElements=[2, 3], numOnBitsPerPattern=numOnBitsPerPattern, includeCPP=INCLUDE_CPP_TM, numCols=None, activationThreshold=numOnBitsPerPattern, minThreshold=numOnBitsPerPattern, newSynapseCount=numOnBits...
'Test with slow learning, some overlap in the patterns, and TM thresholds of 80% of newSynapseCount Make sure PAM allows us to train with fewer repeats of the training data.'
def testSlowLearningWithOverlap(self):
if SHORT: self.skipTest('Test skipped by default. Enable with --long.') numOnBitsPerPattern = 5 baseParams = dict(seqFunction=buildOverlappedSequences, numSequences=2, seqLen=10, sharedElements=[2, 3], numOnBitsPerPattern=numOnBitsPerPattern, patternOverlap=2, includeCPP=INCLUDE_CP...
'Test with "Forbes-like" data. A bunch of sequences of lengths between 2 and 10 elements long. We will test with both fast and slow learning. Make sure PAM allows us to train with fewer repeats of the training data.'
def testForbesLikeData(self):
if SHORT: self.skipTest('Test skipped by default. Enable with --long.') numOnBitsPerPattern = 3 baseParams = dict(seqFunction=buildSequencePool, numSequences=20, seqLen=[3, 10], numPatterns=10, numOnBitsPerPattern=numOnBitsPerPattern, patternOverlap=1, includeCPP=INCLUDE_CPP_TM, nu...
'Test the KNN classifier in this module. short can be: 0 (short), 1 (medium), or 2 (long)'
def runTestKNNClassifier(self, short=0):
failures = '' if (short != 2): numpy.random.seed(42) else: seed_value = int(time.time()) numpy.random.seed(seed_value) LOGGER.info('Seed used: %d', seed_value) f = open('seedval', 'a') f.write(str(seed_value)) f.write('\n') f.close() ...
'Basic first order sequences'
def testFirstOrder(self):
self.init() sequence = self.sequenceMachine.generateFromNumbers([0, 1, 2, 3, None]) self.feedTM(sequence) self.assertEqual(len(self.tm.mmGetTracePredictedActiveColumns().data[3]), 0) self.feedTM(sequence, num=2) self.feedTM(sequence) self.assertEqual(len(self.tm.mmGetTracePredictedActiveColu...
'High order sequences (in order)'
def testHighOrder(self):
self.init() sequenceA = self.sequenceMachine.generateFromNumbers([0, 1, 2, 3, None]) sequenceB = self.sequenceMachine.generateFromNumbers([4, 1, 2, 5, None]) self.feedTM(sequenceA, num=5) self.feedTM(sequenceA, learn=False) self.assertEqual(len(self.tm.mmGetTracePredictedActiveColumns().data[3])...
'High order sequences (alternating)'
def testHighOrderAlternating(self):
self.init() sequence = self.sequenceMachine.generateFromNumbers([0, 1, 2, 3, None]) sequence += self.sequenceMachine.generateFromNumbers([4, 1, 2, 5, None]) self.feedTM(sequence) self.feedTM(sequence, num=10) self.feedTM(sequence, learn=False) self.assertEqual(len(self.tm.mmGetTracePredicted...
'Endlessly repeating sequence of 2 elements'
def testEndlesslyRepeating(self):
self.init({'columnDimensions': [2]}) sequence = self.sequenceMachine.generateFromNumbers([0, 1]) for _ in xrange(7): self.feedTM(sequence) self.feedTM(sequence, num=50)
'Endlessly repeating sequence of 2 elements with maxNewSynapseCount=1'
def testEndlesslyRepeatingWithNoNewSynapses(self):
self.init({'columnDimensions': [2], 'maxNewSynapseCount': 1, 'cellsPerColumn': 10}) sequence = self.sequenceMachine.generateFromNumbers([0, 1]) for _ in xrange(7): self.feedTM(sequence) self.feedTM(sequence, num=100)
'Long repeating sequence with novel pattern at the end'
def testLongRepeatingWithNovelEnding(self):
self.init({'columnDimensions': [3]}) sequence = self.sequenceMachine.generateFromNumbers([0, 1]) sequence *= 10 sequence += [self.patternMachine.get(2), None] for _ in xrange(4): self.feedTM(sequence) self.feedTM(sequence, num=10)
'A single endlessly repeating pattern'
def testSingleEndlesslyRepeating(self):
self.init({'columnDimensions': [1]}) sequence = [self.patternMachine.get(0)] for _ in xrange(4): self.feedTM(sequence) for _ in xrange(2): self.feedTM(sequence, num=10)
'Print a single vector succinctly.'
def _printOneTrainingVector(self, x):
print ''.join((('1' if (k != 0) else '.') for k in x))
'Print all vectors'
def _printAllTrainingSequences(self, trainingSequences):
for (i, trainingSequence) in enumerate(trainingSequences): print '============= Sequence', i, '=================' for pattern in trainingSequence: self._printOneTrainingVector(pattern)
'Set verbosity level on the TM'
def _setVerbosity(self, verbosity, tm, tmPy):
tm.cells4.setVerbosity(verbosity) tm.verbosity = verbosity tmPy.verbosity = verbosity
'Create an instance of the appropriate temporal memory. We isolate all parameters as constants specified here.'
def _createTMs(self, numCols, fixedResources=False, checkSynapseConsistency=True):
minThreshold = 4 activationThreshold = 8 newSynapseCount = 15 initialPerm = 0.3 connectedPerm = 0.5 permanenceInc = 0.1 permanenceDec = 0.05 if fixedResources: permanenceDec = 0.1 maxSegmentsPerCell = 5 maxSynapsesPerSegment = 15 globalDecay = 0 ma...
'Very simple patterns. Each pattern has numOnes consecutive bits on. There are numPatterns*numOnes bits in the vector. These patterns are used as elements of sequences when building up a training set.'
def _getSimplePatterns(self, numOnes, numPatterns):
numCols = (numOnes * numPatterns) p = [] for i in xrange(numPatterns): x = numpy.zeros(numCols, dtype='float32') x[(i * numOnes):((i + 1) * numOnes)] = 1 p.append(x) return p
'A simple sequence of 5 patterns. The left half of the vector contains the pattern elements, each with numOnes consecutive bits. The right half contains numOnes random bits. The function returns a pair: trainingSequences: A list containing numRepetitions instances of the above sequence testSequence: A single...
def _buildSegmentLearningTrainingSet(self, numOnes=10, numRepetitions=10):
numPatterns = 5 numCols = ((2 * numPatterns) * numOnes) halfCols = (numPatterns * numOnes) numNoiseBits = numOnes p = self._getSimplePatterns(numOnes, numPatterns) trainingSequences = [] for i in xrange(numRepetitions): sequence = [] for j in xrange(numPatterns): ...
'Three simple sequences, composed of the same 5 static patterns. The left half of the vector contains the pattern elements, each with numOnes consecutive bits. The right half contains numOnes random bits. Sequence 1 is: p0, p1, p2, p3, p4 Sequence 2 is: p4, p3, p2, p1, p0 Sequence 3 is: p2, p0, p4, p1, p3 The function ...
def _buildSL2TrainingSet(self, numOnes=10, numRepetitions=10):
numPatterns = 5 numCols = ((2 * numPatterns) * numOnes) halfCols = (numPatterns * numOnes) numNoiseBits = numOnes p = self._getSimplePatterns(numOnes, numPatterns) numSequences = 3 indices = [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0], [2, 0, 4, 1, 3]] trainingSequences = [] for i in xrange((n...
'Train the given TM once on the entire training set. on the Test a single set of sequences once and check that individual predictions reflect the true relative frequencies. Return a success code. Success code is 1 for pass, 0 for fail.'
def _testSegmentLearningSequence(self, tms, trainingSequences, testSequences, doResets=True):
if (testSequences == None): testSequences = trainingSequences (cppTM, pyTM) = (tms[0], tms[1]) if (cppTM is not None): assert (fdrutils.tmDiff2(cppTM, pyTM, g_options.verbosity) == True) if (g_options.verbosity > 0): print '============= Training =================' ...
'Test segment learning'
def _testSL1(self, numOnes=10, numRepetitions=6, fixedResources=False, checkSynapseConsistency=True):
if fixedResources: testName = 'TestSL1_FS' else: testName = 'TestSL1' print ('\nRunning %s...' % testName) (trainingSet, testSet) = self._buildSegmentLearningTrainingSet(numOnes, numRepetitions) numCols = len(trainingSet[0][0]) tms = self._createTMs(numCols=numCols, fixedResou...
'Test segment learning'
def _testSL2(self, numOnes=10, numRepetitions=10, fixedResources=False, checkSynapseConsistency=True):
if fixedResources: testName = 'TestSL2_FS' else: testName = 'TestSL2' print ('\nRunning %s...' % testName) (trainingSet, testSet) = self._buildSL2TrainingSet(numOnes, numRepetitions) numCols = len(trainingSet[0][0]) tms = self._createTMs(numCols=numCols, fixedResources=fixedRe...
'Test segment learning without fixed resources'
def test_SL1NoFixedResources(self):
self._testSL1(fixedResources=False, checkSynapseConsistency=g_options.long)
'Test segment learning with fixed resources'
def test_SL1WithFixedResources(self):
if (not g_options.long): print ('Test %s only enabled with the --long option' % self._testMethodName) return self._testSL1(fixedResources=True, checkSynapseConsistency=g_options.long)
'Test segment learning without fixed resources'
def test_SL2NoFixedResources(self):
if (not g_options.long): print ('Test %s only enabled with the --long option' % self._testMethodName) return self._testSL2(fixedResources=False, checkSynapseConsistency=g_options.long)
'Test segment learning with fixed resources'
def test_SL2WithFixedResources(self):
if (not g_options.long): print ('Test %s only enabled with the --long option' % self._testMethodName) return self._testSL2(fixedResources=True, checkSynapseConsistency=g_options.long)
'Basic sequence learner. M=1, N=100, P=1.'
def testB1(self):
self.init() numbers = self.sequenceMachine.generateNumbers(1, 100) sequence = self.sequenceMachine.generateFromNumbers(numbers) self.feedTM(sequence) self._testTM(sequence) self.assertAllActiveWerePredicted() self.assertAllInactiveWereUnpredicted()
'N=300, M=1, P=1. (See how high we can go with N)'
def testB3(self):
self.init() numbers = self.sequenceMachine.generateNumbers(1, 300) sequence = self.sequenceMachine.generateFromNumbers(numbers) self.feedTM(sequence) self._testTM(sequence) self.assertAllActiveWerePredicted() self.assertAllInactiveWereUnpredicted()
'N=100, M=3, P=1. (See how high we can go with N*M)'
def testB4(self):
self.init() numbers = self.sequenceMachine.generateNumbers(3, 100) sequence = self.sequenceMachine.generateFromNumbers(numbers) self.feedTM(sequence) self._testTM(sequence) self.assertAllActiveWerePredicted()
'Like B1 but with cellsPerColumn = 4. First order sequences should still work just fine.'
def testB5(self):
self.init({'cellsPerColumn': 4}) numbers = self.sequenceMachine.generateNumbers(1, 100) sequence = self.sequenceMachine.generateFromNumbers(numbers) self.feedTM(sequence) self._testTM(sequence) self.assertAllActiveWerePredicted() self.assertAllInactiveWereUnpredicted()
'Like B4 but with cellsPerColumn = 4. First order sequences should still work just fine.'
def testB6(self):
self.init({'cellsPerColumn': 4}) numbers = self.sequenceMachine.generateNumbers(3, 100) sequence = self.sequenceMachine.generateFromNumbers(numbers) self.feedTM(sequence) self._testTM(sequence) self.assertAllActiveWerePredicted() self.assertAllInactiveWereUnpredicted()
'Like B1 but with slower learning. Set the following parameters differently: initialPermanence = 0.2 connectedPermanence = 0.7 permanenceIncrement = 0.2 Now we train the TM with the B1 sequence 4 times (P=4). This will increment the permanences to be above 0.8 and at that point the inference will be correct. This test ...
def testB7(self):
self.init({'initialPermanence': 0.2, 'connectedPermanence': 0.7, 'permanenceIncrement': 0.2}) numbers = self.sequenceMachine.generateNumbers(1, 100) sequence = self.sequenceMachine.generateFromNumbers(numbers) for _ in xrange(4): self.feedTM(sequence) self._testTM(sequence) self.assertAl...
'Like B7 but with 4 cells per column. Should still work.'
def testB8(self):
self.init({'initialPermanence': 0.2, 'connectedPermanence': 0.7, 'permanenceIncrement': 0.2, 'cellsPerColumn': 4}) numbers = self.sequenceMachine.generateNumbers(1, 100) sequence = self.sequenceMachine.generateFromNumbers(numbers) for _ in xrange(4): self.feedTM(sequence) self._testTM(sequen...
'Like B7 but present the sequence less than 4 times. The inference should be incorrect.'
def testB9(self):
self.init({'initialPermanence': 0.2, 'connectedPermanence': 0.7, 'permanenceIncrement': 0.2}) numbers = self.sequenceMachine.generateNumbers(1, 100) sequence = self.sequenceMachine.generateFromNumbers(numbers) for _ in xrange(3): self.feedTM(sequence) self._testTM(sequence) self.assertAl...
'Like B5, but with activationThreshold = 8 and with each pattern corrupted by a small amount of spatial noise (X = 0.05).'
def testB11(self):
self.init({'cellsPerColumn': 4, 'activationThreshold': 8, 'minThreshold': 8}) numbers = self.sequenceMachine.generateNumbers(1, 100) sequence = self.sequenceMachine.generateFromNumbers(numbers) self.feedTM(sequence) sequence = self.sequenceMachine.addSpatialNoise(sequence, 0.05) self._testTM(seq...
'Learn two sequences with a short shared pattern. Parameters should be the same as B1. Since cellsPerColumn == 1, it should make more predictions than necessary.'
def testH1(self):
self.init() numbers = self.sequenceMachine.generateNumbers(2, 20, (10, 15)) sequence = self.sequenceMachine.generateFromNumbers(numbers) self.feedTM(sequence) self._testTM(sequence) self.assertAllActiveWerePredicted() predictedInactiveColumnsMetric = self.tm.mmGetMetricFromTrace(self.tm.mmGe...
'Same as H1, but with cellsPerColumn == 4, and train multiple times. It should make just the right number of predictions.'
def testH2(self):
self.init({'cellsPerColumn': 4}) numbers = self.sequenceMachine.generateNumbers(2, 20, (10, 15)) sequence = self.sequenceMachine.generateFromNumbers(numbers) for _ in xrange(10): self.feedTM(sequence) self._testTM(sequence) self.assertAllActiveWerePredicted() predictedInactiveColumns...
'Like H2, except the shared subsequence is in the beginning. (e.g. "ABCDEF" and "ABCGHIJ") At the point where the shared subsequence ends, all possible next patterns should be predicted. As soon as you see the first unique pattern, the predictions should collapse to be a perfect prediction.'
def testH3(self):
self.init({'cellsPerColumn': 4}) numbers = self.sequenceMachine.generateNumbers(2, 20, (0, 5)) sequence = self.sequenceMachine.generateFromNumbers(numbers) self.feedTM(sequence) self._testTM(sequence) self.assertAllActiveWerePredicted() predictedInactiveColumnsMetric = self.tm.mmGetMetricFro...
'Shared patterns. Similar to H2 except that patterns are shared between sequences. All sequences are different shufflings of the same set of N patterns (there is no shared subsequence).'
def testH4(self):
self.init({'cellsPerColumn': 4}) numbers = [] for _ in xrange(2): numbers += self.sequenceMachine.generateNumbers(1, 20) sequence = self.sequenceMachine.generateFromNumbers(numbers) for _ in xrange(20): self.feedTM(sequence) self._testTM(sequence) self.assertAllActiveWerePred...
'Combination of H4) and H2). Shared patterns in different sequences, with a shared subsequence.'
def testH5(self):
self.init({'cellsPerColumn': 4}) numbers = [] shared = self.sequenceMachine.generateNumbers(1, 5)[:(-1)] for _ in xrange(2): sublist = self.sequenceMachine.generateNumbers(1, 20) sublist = [x for x in sublist if (x not in xrange(5))] numbers += ((sublist[0:10] + shared) + sublist...
'Sensitivity to small amounts of spatial noise during inference (X = 0.05). Parameters the same as B11, and sequences like H2.'
def testH9(self):
self.init({'cellsPerColumn': 4, 'activationThreshold': 8, 'minThreshold': 8}) numbers = self.sequenceMachine.generateNumbers(2, 20, (10, 15)) sequence = self.sequenceMachine.generateFromNumbers(numbers) for _ in xrange(10): self.feedTM(sequence) sequence = self.sequenceMachine.addSpatialNois...
'Orphan Decay mechanism reduce predicted inactive cells (extra predictions). Test feeds in noisy sequences (X = 0.05) to TM with and without orphan decay. TM with orphan decay should has many fewer predicted inactive columns. Parameters the same as B11, and sequences like H9.'
def testH10(self):
self.init({'cellsPerColumn': 4, 'activationThreshold': 8, 'minThreshold': 8}) numbers = self.sequenceMachine.generateNumbers(2, 20, (10, 15)) sequence = self.sequenceMachine.generateFromNumbers(numbers) sequenceNoisy = dict() for i in xrange(10): sequenceNoisy[i] = self.sequenceMachine.addSp...
'experimentName: e.g., "gym"; this string will be used to form a directory path to the experiment. Returns: absolute path to the experiment directory'
def getOpfExperimentPath(self, experimentName):
path = os.path.join(self.__opfExperimentsParentDir, experimentName) assert os.path.isdir(path), ("Experiment path %s doesn't exist or is not a directory" % (path,)) return path
'Method called to prepare the test fixture. This is called immediately before calling the test method; any exception raised by this method will be considered an error rather than a test failure. The default implementation does nothing.'
def setUp(self):
global g_myEnv if (not g_myEnv): g_myEnv = MyTestEnvironment()
'Method called immediately after the test method has been called and the result recorded. This is called even if the test method raised an exception, so the implementation in subclasses may need to be particularly careful about checking internal state. Any exception raised by this method will be considered an error rat...
def tearDown(self):
self.resetExtraLogItems()
'Override to force unittest framework to use test method names instead of docstrings in the report.'
def shortDescription(self):
return None
'Executes a positive OPF RunExperiment test as a subprocess and validates its exit status. experimentName: e.g., "gym"; this string will be used to form a directory path to the experiment. short: if True, attempt to run the experiment with --testMode flag turned on, which causes all inference and train...
def executePositiveOpfExperiment(self, experimentName, short=False):
opfRunner = g_myEnv.getOpfRunExperimentPyPath() opfExpDir = g_myEnv.getOpfExperimentPath(experimentName) r = self.__executePositiveRunExperimentTest(runnerPath=opfRunner, experimentDirPath=opfExpDir, short=short) return r
'Executes a positive RunExperiment.py test and performs basic validation runnerPath: experiment running (LPF or OPF RunExperiment.py path) experimentDirPath: directory containing the description.py file of interest short: if True, attempt to run the experiment with --testMode flag turned on, which...
def __executePositiveRunExperimentTest(self, runnerPath, experimentDirPath, customOptions=[], short=False):
command = ['python', runnerPath, experimentDirPath] command.extend(customOptions) if short: command.append('--testMode') self.addExtraLogItem({'command': command}) r = _executeExternalCmdAndReapOutputs(command) self.addExtraLogItem({'result': r}) _debugOut(('_executeExternalCmdAndRea...
'Method called to prepare the test fixture. This is called by the unittest framework immediately before calling the test method; any exception raised by this method will be considered an error rather than a test failure. The default implementation does nothing.'
def setUp(self):
global g_myEnv if (not g_myEnv): params = type('obj', (object,), {'installDir': resource_filename('nupic', '')}) g_myEnv = MyTestEnvironment(params)
'Method called immediately after the test method has been called and the result recorded. This is called even if the test method raised an exception, so the implementation in subclasses may need to be particularly careful about checking internal state. Any exception raised by this method will be considered an error rat...
def tearDown(self):
self.resetExtraLogItems() g_myEnv.cleanUp()
'Override to force unittest framework to use test method names instead of docstrings in the report.'
def shortDescription(self):
return None
'This does the following: 1.) Calls ExpGenerator to generate a base description file and permutations file from expDescription. 2.) Verifies that description.py and permutations.py are valid python modules that can be loaded 3.) Returns the loaded base description module and permutations module Parameters: expDesc: ...
def getModules(self, expDesc, hsVersion='v2'):
shutil.rmtree(g_myEnv.testOutDir, ignore_errors=True) args = [('--description=%s' % json.dumps(expDesc)), ('--outDir=%s' % g_myEnv.testOutDir), ('--version=%s' % hsVersion)] self.addExtraLogItem({'args': args}) experiment_generator.expGenerator(args) descriptionPyPath = os.path.join(g_myEnv.testOutD...
'This does the following: 1.) Calls ExpGenerator to generate a base description file and permutations file from expDescription. 2.) Verifies that description.py and permutations.py are valid python modules that can be loaded 3.) Runs the base description.py as an experiment using OPF RunExperiment. 4.) Runs a Hypersear...
def runBaseDescriptionAndPermutations(self, expDesc, hsVersion, maxModels=2):
self.getModules(expDesc, hsVersion=hsVersion) permutationsPyPath = os.path.join(g_myEnv.testOutDir, 'permutations.py') args = [g_myEnv.testOutDir] from nupic.frameworks.opf.experiment_runner import runExperiment LOGGER.info('') LOGGER.info('=======================================================...
'Test that the set of aggregations produced for a swarm are correct Parameters: expDesc: JSON experiment description expectedAttempts: list of (minAggregationMultiple, predictionSteps) pairs that we expect to find in the aggregation choices.'
def assertValidSwarmingAggregations(self, expDesc, expectedAttempts):
minAggregation = dict(expDesc['streamDef']['aggregation']) minAggregation.pop('fields') (base, perms) = self.getModules(expDesc) predictionSteps = expDesc['inferenceArgs']['predictionSteps'][0] self.assertEqual(base.control['inferenceArgs']['predictionSteps'], expDesc['inferenceArgs']['predictionSte...
'Test showing the schema'
def test_ShowSchema(self):
args = ['--showSchema'] self.addExtraLogItem({'args': args}) experiment_generator.expGenerator(args) return
'Test correct behavior in response to different settings in the prediction element'
def test_PredictionElement(self):
streamDef = dict(version=1, info='test_NoProviders', streams=[dict(source=('file://%s' % HOTGYM_INPUT), info='hotGym.csv', columns=['*'])]) expDesc = {'inferenceType': 'MultiStep', 'inferenceArgs': {'predictedField': 'consumption', 'predictionSteps': [1]}, 'environment': OpfEnvironment.Experiment, 'streamDef': ...
'Test to make sure that the correct metrics are generated'
def test_Metrics(self):
streamDef = dict(version=1, info='test_category_predicted_field', streams=[dict(source='file://dummy', info='dummy.csv', columns=['*'])]) expDesc = {'inferenceType': 'MultiStep', 'inferenceArgs': {'predictedField': 'playType', 'predictionSteps': [1]}, 'streamDef': streamDef, 'includedFields': [{'fieldName': 'ti...
'Test correct behavior in response to different settings in the includedFields element'
def test_IncludedFields(self):
streamDef = dict(version=1, info='test_NoProviders', streams=[dict(source=('file://%s' % HOTGYM_INPUT), info='hotGym.csv', columns=['*'])]) expDesc = {'inferenceType': 'TemporalNextStep', 'inferenceArgs': {'predictedField': 'consumption'}, 'environment': OpfEnvironment.Experiment, 'streamDef': streamDef, 'inclu...
'Test that aggregation gets pulled out of the streamDef as it should'
def test_Aggregation(self):
streamDef = dict(version=1, info='TestAggregation', streams=[dict(source=('file://%s' % HOTGYM_INPUT), info='hotGym.csv', columns=['*'])], aggregation={'years': 1, 'months': 2, 'weeks': 3, 'days': 4, 'hours': 5, 'minutes': 6, 'seconds': 7, 'milliseconds': 8, 'microseconds': 9, 'fields': [('consumption', 'sum'), ('g...
'Test that reset period gets handled correctly'
def test_ResetPeriod(self):
streamDef = dict(version=1, info='test_NoProviders', streams=[dict(source=('file://%s' % HOTGYM_INPUT), info='hotGym.csv', columns=['*'])]) expDesc = {'inferenceType': 'TemporalNextStep', 'inferenceArgs': {'predictedField': 'consumption'}, 'environment': OpfEnvironment.Experiment, 'streamDef': streamDef, 'inclu...
'Try running a basic Hypersearch V2 experiment and permutations'
def test_RunningExperimentHSv2(self):
streamDef = dict(version=1, info='test_NoProviders', streams=[dict(source=('file://%s' % HOTGYM_INPUT), info='hotGym.csv', columns=['*'])]) expDesc = {'inferenceType': 'TemporalMultiStep', 'inferenceArgs': {'predictedField': 'consumption'}, 'environment': OpfEnvironment.Nupic, 'streamDef': streamDef, 'includedF...
'Test the we correctly generate a multi-step prediction experiment'
def test_MultiStep(self):
streamDef = dict(version=1, info='test_NoProviders', streams=[dict(source=('file://%s' % HOTGYM_INPUT), info='hotGym.csv', columns=['*'], last_record=20)], aggregation={'years': 0, 'months': 0, 'weeks': 0, 'days': 0, 'hours': 1, 'minutes': 0, 'seconds': 0, 'milliseconds': 0, 'microseconds': 0, 'fields': [('consumpt...
'Test the we correctly generate a multi-step prediction experiment that uses aggregation swarming'
def test_AggregationSwarming(self):
minAggregation = {'years': 0, 'months': 0, 'weeks': 0, 'days': 0, 'hours': 0, 'minutes': 15, 'seconds': 0, 'milliseconds': 0, 'microseconds': 0} streamAggregation = dict(minAggregation) streamAggregation.update({'fields': [('consumption', 'sum'), ('gym', 'first'), ('timestamp', 'first')]}) streamDef = d...
'Test correct behavior in response to different settings in the swarmSize element'
def test_SwarmSize(self):
streamDef = dict(version=1, info='test_NoProviders', streams=[dict(source=('file://%s' % HOTGYM_INPUT), info='hotGym.csv', columns=['*'])]) expDesc = {'swarmSize': 'large', 'inferenceType': 'TemporalNextStep', 'inferenceArgs': {'predictedField': 'consumption'}, 'environment': OpfEnvironment.Nupic, 'streamDef': ...
'Test correct behavior in response to setting the fixedFields swarming option.'
def test_FixedFields(self):
streamDef = dict(version=1, info='test_NoProviders', streams=[dict(source=('file://%s' % HOTGYM_INPUT), info='hotGym.csv', columns=['*'])]) expDesc = {'swarmSize': 'large', 'inferenceType': 'TemporalNextStep', 'inferenceArgs': {'predictedField': 'consumption'}, 'environment': OpfEnvironment.Nupic, 'streamDef': ...
'Test correct behavior in response to setting the fastSwarmModelParams swarming option.'
def test_FastSwarmModelParams(self):
streamDef = dict(version=1, info='test_NoProviders', streams=[dict(source=('file://%s' % HOTGYM_INPUT), info='hotGym.csv', columns=['*'])]) fastSwarmModelParams = {'this is': 'a test'} expDesc = {'swarmSize': 'large', 'inferenceType': 'TemporalNextStep', 'inferenceArgs': {'predictedField': 'consumptio...
'Test correct behavior in response to setting the anomalyParams experiment description options'
def test_AnomalyParams(self):
streamDef = dict(version=1, info='test_NoProviders', streams=[dict(source=('file://%s' % HOTGYM_INPUT), info='hotGym.csv', columns=['*'])]) expDesc = {'environment': OpfEnvironment.Nupic, 'inferenceArgs': {'predictedField': 'consumption', 'predictionSteps': [1]}, 'inferenceType': 'TemporalAnomaly', 'streamDef':...
'Test the we correctly generate a Nontemporal classification experiment'
def test_NontemporalClassification(self):
streamDef = dict(version=1, info='test_NoProviders', streams=[dict(source=('file://%s' % HOTGYM_INPUT), info='hotGym.csv', columns=['*'], last_record=10)], aggregation={'years': 0, 'months': 0, 'weeks': 0, 'days': 0, 'hours': 1, 'minutes': 0, 'seconds': 0, 'milliseconds': 0, 'microseconds': 0, 'fields': [('consumpt...
'This function tests saving and loading. It will train a network for 500 iterations, then save it and reload it as a second network instance. It will then run both networks for 100 iterations and ensure they return identical results.'
def testSaveAndReload(self):
print 'Creating network...' netOPF = _createOPFNetwork() level1OPF = netOPF.regions['level1SP'] print 'Training network for 500 iterations' level1OPF.setParameter('learningMode', 1) level1OPF.setParameter('inferenceMode', 0) netOPF.run(500) level1OPF.setParameter('learning...
'Test maxEnabledPhase'
def testMaxEnabledPhase(self):
print 'Creating network...' netOPF = _createOPFNetwork(addSP=True, addTP=True) netOPF.initialize() level1SP = netOPF.regions['level1SP'] level1SP.setParameter('learningMode', 1) level1SP.setParameter('inferenceMode', 0) tm = netOPF.regions['level1TP'] tm.setParameter('learningMode', 0...
'Run specific experiments and verify that they are producing the correct results. opfDir is the examples/opf directory in the install path and is used to find run_opf_experiment.py The testdir is the directory that contains the experiments we will be running. When running in the auto-build setup, this will be a tempora...
def testExperimentResults(self):
nupic_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', '..', '..') opfDir = os.path.join(nupic_dir, 'examples', 'opf') testDir = opfDir if (not os.path.exists(os.path.join(testDir, 'experiments/classification'))): testDir = opfDir command = ['python', os.path.join(...
'Override to force unittest framework to use test method names instead of docstrings in the report.'
def shortDescription(self):
return None
'Compare temporal or non-temporal predictions for the given experiment that just finished executing experimentName: e.g., "gym"; this string will be used to form a directory path to the experiments. maxMismatches: Maximum number of row mismatches to report before terminating the comparison; None means: report ...
def compareOPFPredictionFiles(self, path1, path2, temporal, maxMismatches=None):
experimentLabel = ('%s prediction comparison' % ('Temporal' if temporal else 'Non-Temporal')) print ('%s: Performing comparison of OPF prediction CSV files %r and %r' % (experimentLabel, path1, path2)) self.assertTrue(os.path.isfile(path1), msg=("OPF prediction file...
'Open an OPF prediction CSV file and advance it to the first data row Returns: the tuple (csvReader, fieldNames), where \'csvReader\' is the csv reader object, and \'fieldNames\' is a sequence of field names.'
def _openOpfPredictionCsvFile(self, filepath):
csvReader = self._openCsvFile(filepath) names = csvReader.next() _types = csvReader.next() _specials = csvReader.next() return (csvReader, names)
'Test that we get the same predictions out from the following two scenarios: a_plus_b: Run the network for \'a\' iterations followed by \'b\' iterations a, followed by b: Run the network for \'a\' iterations, save it, load it back in, then run for \'b\' iterations. Parameters: experiment: base directory of the experi...
def _testSamePredictions(self, experiment, predSteps, checkpointAt, predictionsFilename, additionalFields=None, newSerialization=False):
aPlusBExpDir = os.path.join(_EXPERIMENT_BASE, experiment, 'a_plus_b') aExpDir = os.path.join(_EXPERIMENT_BASE, experiment, 'a') bExpDir = os.path.join(_EXPERIMENT_BASE, experiment, 'b') args = self._createExperimentArgs(aPlusBExpDir, newSerialization=newSerialization) _aPlusBExp = runExperiment(args...