desc stringlengths 3 26.7k | decl stringlengths 11 7.89k | bodies stringlengths 8 553k |
|---|---|---|
'Test the distribution of predictions with overlapping input SDRs
Here, we intend the classifier to learn the associations:
SDR1 => bucketIdx 0 (30%)
=> bucketIdx 1 (30%)
=> bucketIdx 2 (40%)
SDR2 => bucketIdx 1 (50%)
=> bucketIdx 3 (50%)
SDR1 and SDR2 has 10% overlaps (2 bits out of 20)
The classifier should get... | def testPredictionDistributionOverlap(self):
| c = self._classifier([0], 0.0005, 0.1, 0)
recordNum = 0
SDR1 = numpy.arange(0, 39, step=2)
SDR2 = numpy.arange(1, 40, step=2)
SDR2[3] = SDR1[5]
SDR2[5] = SDR1[11]
random.seed(42)
for _ in xrange(5000):
randomNumber = random.random()
if (randomNumber < 0.3):
bu... |
'Test the distribution of predictions.
Here, we intend the classifier to learn the associations:
[1,3,5] => bucketIdx 0 & 1
[2,4,6] => bucketIdx 2 & 3
The classifier should get the distribution almost right given enough
repetitions and a small learning rate'
| def testPredictionMultipleCategories(self):
| c = self._classifier([0], 0.001, 0.1, 0)
SDR1 = [1, 3, 5]
SDR2 = [2, 4, 6]
recordNum = 0
random.seed(42)
for _ in xrange(5000):
c.compute(recordNum=recordNum, patternNZ=SDR1, classification={'bucketIdx': [0, 1], 'actValue': [0, 1]}, learn=True, infer=False)
recordNum += 1
... |
'Test continuous learning
First, we intend the classifier to learn the associations:
SDR1 => bucketIdx 0 (30%)
=> bucketIdx 1 (30%)
=> bucketIdx 2 (40%)
SDR2 => bucketIdx 1 (50%)
=> bucketIdx 3 (50%)
After 20000 iterations, we change the association to
SDR1 => bucketIdx 0 (30%)
=> bucketIdx 1 (20%)
=> bucketId... | def testPredictionDistributionContinuousLearning(self):
| c = self._classifier([0], 0.001, 0.1, 0)
recordNum = 0
SDR1 = [1, 3, 5]
SDR2 = [2, 4, 6]
random.seed(42)
for _ in xrange(10000):
randomNumber = random.random()
if (randomNumber < 0.3):
bucketIdx = 0
elif (randomNumber < 0.6):
bucketIdx = 1
... |
'Test multi-step predictions
We train the 0-step and the 1-step classifiers simultaneously on
data stream
(SDR1, bucketIdx0)
(SDR2, bucketIdx1)
(SDR1, bucketIdx0)
(SDR2, bucketIdx1)
We intend the 0-step classifier to learn the associations:
SDR1 => bucketIdx 0
SDR2 => bucketIdx 1
and the 1-step classifier to lear... | def testMultiStepPredictions(self):
| c = self._classifier([0, 1], 1.0, 0.1, 0)
SDR1 = [1, 3, 5]
SDR2 = [2, 4, 6]
recordNum = 0
for _ in xrange(100):
c.compute(recordNum=recordNum, patternNZ=SDR1, classification={'bucketIdx': 0, 'actValue': 0}, learn=True, infer=False)
recordNum += 1
c.compute(recordNum=recordNum... |
'Test creation, pickling, and basic run of learning and inference.'
| def _basicTest(self, tm=None):
| trainingSet = _getSimplePatterns(10, 10)
for _ in range(2):
for seq in trainingSet[0:5]:
for _ in range(10):
tm.learn(seq)
tm.reset()
print 'Learning completed'
print 'Running inference'
tm.collectStats = True
for seq in trainingSet[0:5]:
... |
'Test cumulative anomaly scores.'
| def testAnomalyCumulative(self):
| anomalyComputer = anomaly.Anomaly(slidingWindowSize=3)
predicted = (array([1, 2, 6]), array([1, 2, 6]), array([1, 2, 6]), array([1, 2, 6]), array([1, 2, 6]), array([1, 2, 6]), array([1, 2, 6]), array([1, 2, 6]), array([1, 2, 6]))
actual = (array([1, 2, 6]), array([1, 2, 6]), array([1, 4, 6]), array([10, 11,... |
'serialization using pickle'
| def testSerialization(self):
| aDef = Anomaly()
aLike = Anomaly(mode=Anomaly.MODE_LIKELIHOOD)
aWeig = Anomaly(mode=Anomaly.MODE_WEIGHTED)
aAll = Anomaly(mode=Anomaly.MODE_LIKELIHOOD, slidingWindowSize=5)
inst = [aDef, aLike, aWeig, aAll]
for a in inst:
stored = pickle.dumps(a)
restored = pickle.loads(stored)
... |
'Feed in some vectors and retrieve outputs. Ensure the right number of
columns win, that we always get binary outputs, and that nothing crashes.'
| def basicComputeLoop(self, imp, params, inputSize, columnDimensions, seed=None):
| sp = CreateSP(imp, params)
numRecords = 100
randomState = getNumpyRandomGenerator(seed)
inputMatrix = (randomState.rand(numRecords, inputSize) > 0.8).astype(uintType)
y = numpy.zeros(columnDimensions, dtype=uintType)
dutyCycles = numpy.zeros(columnDimensions, dtype=uintType)
for v in inputMa... |
'Run basicComputeLoop with mostly default parameters'
| def testBasicCompute1(self):
| inputSize = 30
columnDimensions = 50
params = {'inputDimensions': [inputSize], 'columnDimensions': [columnDimensions], 'potentialRadius': inputSize, 'globalInhibition': True, 'seed': int(((time.time() % 10000) * 10))}
print 'testBasicCompute1, SP seed set to:', params['seed']
self.basicC... |
'Run basicComputeLoop with learning turned off.'
| def testBasicCompute2(self):
| inputSize = 100
columnDimensions = 100
params = {'inputDimensions': [inputSize], 'columnDimensions': [columnDimensions], 'potentialRadius': inputSize, 'globalInhibition': True, 'synPermActiveInc': 0.0, 'synPermInactiveDec': 0.0, 'seed': int(((time.time() % 10000) * 10))}
print 'testBasicCompute2, SP ... |
'Checks that feeding in the same input vector leads to polarized
permanence values: either zeros or ones, but no fractions'
| def testCompute1(self):
| sp = SpatialPooler(inputDimensions=[9], columnDimensions=[5], potentialRadius=3, potentialPct=0.5, globalInhibition=False, localAreaDensity=(-1.0), numActiveColumnsPerInhArea=3, stimulusThreshold=1, synPermInactiveDec=0.1, synPermActiveInc=0.1, synPermConnected=0.1, minPctOverlapDutyCycle=0.1, dutyCyclePeriod=10, b... |
'Checks that columns only change the permanence values for
inputs that are within their potential pool'
| def testCompute2(self):
| sp = SpatialPooler(inputDimensions=[10], columnDimensions=[5], potentialRadius=3, potentialPct=0.5, globalInhibition=False, localAreaDensity=(-1.0), numActiveColumnsPerInhArea=3, stimulusThreshold=1, synPermInactiveDec=0.01, synPermActiveInc=0.1, synPermConnected=0.1, minPctOverlapDutyCycle=0.1, dutyCyclePeriod=10,... |
'When stimulusThreshold is 0, allow columns without any overlap to become
active. This test focuses on the global inhibition code path.'
| def testZeroOverlap_NoStimulusThreshold_GlobalInhibition(self):
| inputSize = 10
nColumns = 20
sp = SpatialPooler(inputDimensions=[inputSize], columnDimensions=[nColumns], potentialRadius=10, globalInhibition=True, numActiveColumnsPerInhArea=3, stimulusThreshold=0, seed=getSeed())
inputVector = numpy.zeros(inputSize)
activeArray = numpy.zeros(nColumns)
sp.comp... |
'When stimulusThreshold is > 0, don\'t allow columns without any overlap to
become active. This test focuses on the global inhibition code path.'
| def testZeroOverlap_StimulusThreshold_GlobalInhibition(self):
| inputSize = 10
nColumns = 20
sp = SpatialPooler(inputDimensions=[inputSize], columnDimensions=[nColumns], potentialRadius=10, globalInhibition=True, numActiveColumnsPerInhArea=3, stimulusThreshold=1, seed=getSeed())
inputVector = numpy.zeros(inputSize)
activeArray = numpy.zeros(nColumns)
sp.comp... |
'When stimulusThreshold is 0, allow columns without any overlap to become
active. This test focuses on the local inhibition code path.'
| def testZeroOverlap_NoStimulusThreshold_LocalInhibition(self):
| inputSize = 10
nColumns = 20
sp = SpatialPooler(inputDimensions=[inputSize], columnDimensions=[nColumns], potentialRadius=5, globalInhibition=False, numActiveColumnsPerInhArea=1, stimulusThreshold=0, seed=getSeed())
sp.setInhibitionRadius(2)
inputVector = numpy.zeros(inputSize)
activeArray = num... |
'When stimulusThreshold is > 0, don\'t allow columns without any overlap to
become active. This test focuses on the local inhibition code path.'
| def testZeroOverlap_StimulusThreshold_LocalInhibition(self):
| inputSize = 10
nColumns = 20
sp = SpatialPooler(inputDimensions=[inputSize], columnDimensions=[nColumns], potentialRadius=10, globalInhibition=False, numActiveColumnsPerInhArea=3, stimulusThreshold=1, seed=getSeed())
inputVector = numpy.zeros(inputSize)
activeArray = numpy.zeros(nColumns)
sp.com... |
'Checks that overlaps and boostedOverlaps are correctly returned'
| def testOverlapsOutput(self):
| sp = SpatialPooler(inputDimensions=[5], columnDimensions=[3], potentialRadius=5, numActiveColumnsPerInhArea=5, globalInhibition=True, seed=1, synPermActiveInc=0.1, synPermInactiveDec=0.1)
inputVector = numpy.ones(5)
activeArray = numpy.zeros(3)
expOutput = numpy.array([2, 0, 0], dtype=realDType)
boo... |
'Given a specific input and initialization params the SP should return this
exact output.
Previously output varied between platforms (OSX/Linux etc)'
| def testExactOutput(self):
| expectedOutput = [57, 80, 135, 215, 281, 350, 431, 534, 556, 565, 574, 595, 663, 759, 777, 823, 932, 933, 1031, 1126, 1184, 1262, 1468, 1479, 1516, 1531, 1585, 1672, 1793, 1807, 1906, 1927, 1936, 1939, 1940, 1944, 1957, 1978, 2040, 2047]
sp = SpatialPooler(inputDimensions=[1, 188], columnDimensions=[2048, 1], p... |
'Test that column computes overlap and percent overlap correctly.'
| def testCalculateOverlap(self):
| sp = SpatialPooler(inputDimensions=[10], columnDimensions=[5])
sp._connectedSynapses = BinaryCorticalColumns([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1]])
sp._connectedCounts = numpy.array([10... |
'test initial permanence generation. ensure that
a correct amount of synapses are initialized in
a connected state, with permanence values drawn from
the correct ranges'
| def testInitPermanence1(self):
| sp = self._sp
sp._inputDimensions = numpy.array([10])
sp._numInputs = 10
sp._raisePermanenceToThreshold = Mock()
sp._potentialRadius = 2
connectedPct = 1
mask = numpy.array([1, 1, 1, 0, 0, 0, 0, 0, 1, 1])
perm = sp._initPermanence(mask, connectedPct)
connected = (perm >= sp._synPermC... |
'Test initial permanence generation. ensure that permanence values
are only assigned to bits within a column\'s potential pool.'
| def testInitPermanence2(self):
| sp = self._sp
sp._raisePermanenceToThreshold = Mock()
sp._numInputs = 10
connectedPct = 1
mask = numpy.array([1, 1, 0, 0, 0, 0, 0, 0, 0, 0])
perm = sp._initPermanence(mask, connectedPct)
connected = list((perm > 0).astype(int))
trueConnected = [1, 1, 0, 0, 0, 0, 0, 0, 0, 0]
self.asse... |
'Tests that duty cycles are updated properly according
to the mathematical formula. also check the effects of
supplying a maxPeriod to the function.'
| def testUpdateDutyCycleHelper(self):
| dc = numpy.zeros(5)
dc = numpy.array([1000.0, 1000.0, 1000.0, 1000.0, 1000.0])
period = 1000
newvals = numpy.zeros(5)
newDc = SpatialPooler._updateDutyCyclesHelper(dc, newvals, period)
trueNewDc = [999, 999, 999, 999, 999]
self.assertListEqual(list(newDc), trueNewDc)
dc = numpy.array([10... |
'Tests that global inhibition correctly picks the
correct top number of overlap scores as winning columns.'
| def testInhibitColumnsGlobal(self):
| sp = self._sp
density = 0.3
sp._numColumns = 10
overlaps = numpy.array([1, 2, 1, 4, 8, 3, 12, 5, 4, 1], dtype=realDType)
active = list(sp._inhibitColumnsGlobal(overlaps, density))
trueActive = numpy.zeros(sp._numColumns)
trueActive = [4, 6, 7]
self.assertListEqual(list(trueActive), sorte... |
'After feeding in a record the number of active columns should
always be equal to numActivePerInhArea'
| @unittest.skip('Ported from the removed FlatSpatialPooler but fails. See: https://github.com/numenta/nupic/issues/1897')
def testActiveColumnsEqualNumActive(self):
| for i in [1, 10, 50]:
numActive = i
inputShape = 10
sp = SpatialPooler(inputDimensions=[inputShape], columnDimensions=[100], numActiveColumnsPerInhArea=numActive)
inputArray = (numpy.random.rand(inputShape) > 0.5).astype(uintDType)
inputArray2 = (numpy.random.rand(inputShape)... |
'Creates a segment, destroys it, and makes sure it got destroyed along
with all of its synapses.'
| def testDestroySegment(self):
| connections = Connections(1024)
connections.createSegment(10)
segment2 = connections.createSegment(20)
connections.createSegment(30)
connections.createSegment(40)
connections.createSynapse(segment2, 80, 0.85)
connections.createSynapse(segment2, 81, 0.85)
connections.createSynapse(segment... |
'Creates a segment, creates a number of synapses on it, destroys a
synapse, and makes sure it got destroyed.'
| def testDestroySynapse(self):
| connections = Connections(1024)
segment = connections.createSegment(20)
synapse1 = connections.createSynapse(segment, 80, 0.85)
synapse2 = connections.createSynapse(segment, 81, 0.85)
synapse3 = connections.createSynapse(segment, 82, 0.15)
self.assertEqual(3, connections.numSynapses())
conne... |
'Creates segments and synapses, then destroys segments and synapses on
either side of them and verifies that existing Segment and Synapse
instances still point to the same segment / synapse as before.'
| def testPathsNotInvalidatedByOtherDestroys(self):
| connections = Connections(1024)
segment1 = connections.createSegment(11)
connections.createSegment(12)
segment3 = connections.createSegment(13)
connections.createSegment(14)
segment5 = connections.createSegment(15)
synapse1 = connections.createSynapse(segment3, 201, 0.85)
synapse2 = conn... |
'Destroy a segment that has a destroyed synapse and a non-destroyed
synapse. Make sure nothing gets double-destroyed.'
| def testDestroySegmentWithDestroyedSynapses(self):
| connections = Connections(1024)
segment1 = connections.createSegment(11)
segment2 = connections.createSegment(12)
connections.createSynapse(segment1, 101, 0.85)
synapse2a = connections.createSynapse(segment2, 201, 0.85)
connections.createSynapse(segment2, 202, 0.85)
self.assertEqual(3, conne... |
'Destroy a segment that has a destroyed synapse and a non-destroyed
synapse. Create a new segment in the same place. Make sure its synapse
count is correct.'
| def testReuseSegmentWithDestroyedSynapses(self):
| connections = Connections(1024)
segment = connections.createSegment(11)
synapse1 = connections.createSynapse(segment, 201, 0.85)
connections.createSynapse(segment, 202, 0.85)
connections.destroySynapse(synapse1)
self.assertEqual(1, connections.numSynapses(segment))
connections.destroySegment... |
'Creates a synapse and updates its permanence, and makes sure that its
data was correctly updated.'
| def testUpdateSynapsePermanence(self):
| connections = Connections(1024)
segment = connections.createSegment(10)
synapse = connections.createSynapse(segment, 50, 0.34)
connections.updateSynapsePermanence(synapse, 0.21)
synapseData = connections.dataForSynapse(synapse)
self.assertAlmostEqual(synapseData.permanence, 0.21)
|
'Creates a sample set of connections, and makes sure that computing the
activity for a collection of cells with no activity returns the right
activity data.'
| def testComputeActivity(self):
| connections = Connections(1024)
segment1a = connections.createSegment(10)
connections.createSynapse(segment1a, 150, 0.85)
connections.createSynapse(segment1a, 151, 0.15)
segment2a = connections.createSegment(20)
connections.createSynapse(segment2a, 80, 0.85)
connections.createSynapse(segment... |
'Run the PY and CPP implementations side by side on random inputs.
If seed is None a random seed will be chosen based on time, otherwise
the fixed seed will be used.
If learnMode is None learning will be randomly turned on and off.
If it is False or True then set it accordingly.
If convertEveryIteration is True, the CP... | def runSideBySide(self, params, seed=None, learnMode=None, convertEveryIteration=False):
| randomState = getNumpyRandomGenerator(seed)
cppSp = CreateSP('cpp', params)
pySp = CreateSP('py', params)
self.compare(pySp, cppSp)
numColumns = pySp.getNumColumns()
numInputs = pySp.getNumInputs()
threshold = 0.8
inputMatrix = (randomState.rand(numRecords, numInputs) > threshold).astype... |
'Check SP implementations have same behavior with 1D input.'
| @unittest.skip('Currently fails due to non-fixed randomness in C++ SP.')
def testCompatibilityCppPyDirectCall1D(self):
| pySp = PySpatialPooler(inputDimensions=[121], columnDimensions=[300])
cppSp = CPPSpatialPooler(inputDimensions=[121], columnDimensions=[300])
data = numpy.zeros([121], dtype=uintType)
for i in xrange(21):
data[i] = 1
nCols = 300
d1 = numpy.zeros(nCols, dtype=uintType)
d2 = numpy.zero... |
'Check SP implementations have same behavior with 2D input.'
| @unittest.skip('Currently fails due to non-fixed randomness in C++ SP.')
def testCompatibilityCppPyDirectCall2D(self):
| pySp = PySpatialPooler(inputDimensions=[121, 1], columnDimensions=[30, 30])
cppSp = CPPSpatialPooler(inputDimensions=[121, 1], columnDimensions=[30, 30])
data = numpy.zeros([121, 1], dtype=uintType)
for i in xrange(21):
data[i][0] = 1
nCols = 900
d1 = numpy.zeros(nCols, dtype=uintType)
... |
'Tests standard learning case for raw overlap'
| def testOverlapDistanceMethodStandard(self):
| params = {'distanceMethod': 'rawOverlap'}
classifier = KNNClassifier(**params)
dimensionality = 40
a = np.array([1, 3, 7, 11, 13, 17, 19, 23, 29], dtype=np.int32)
b = np.array([2, 4, 8, 12, 14, 18, 20, 28, 30], dtype=np.int32)
numPatterns = classifier.learn(a, 0, isSparse=dimensionality)
sel... |
'Tests overlap distance with min sparsity'
| def testMinSparsity(self):
| params = {'distanceMethod': 'rawOverlap', 'minSparsity': 0.2}
classifier = KNNClassifier(**params)
dimensionality = 30
a = np.array([1, 3, 7, 11, 13, 17, 19, 23, 29], dtype=np.int32)
b = np.array([2, 4, 8, 12, 14, 18, 20, 21, 28], dtype=np.int32)
c = np.array([2, 3, 8, 11, 14, 18], dtype=np.int3... |
'Tests that paritionId properly excludes training data points during
inference'
| def testPartitionIdExcluded(self):
| params = {'distanceMethod': 'rawOverlap'}
classifier = KNNClassifier(**params)
dimensionality = 40
a = np.array([1, 3, 7, 11, 13, 17, 19, 23, 29], dtype=np.int32)
b = np.array([2, 4, 8, 12, 14, 18, 20, 28, 30], dtype=np.int32)
denseA = np.zeros(dimensionality)
denseA[a] = 1.0
denseB = np... |
'Test a sequence of calls to KNN to ensure we can retrieve partition Id:
- We first learn on some patterns (including one pattern with no
partitionId in the middle) and test that we can retrieve Ids.
- We then invoke inference and then check partitionId again.
- We check incorrect indices to ensure we get an exception.... | def testGetPartitionId(self):
| params = {'distanceMethod': 'rawOverlap'}
classifier = KNNClassifier(**params)
dimensionality = 40
a = np.array([1, 3, 7, 11, 13, 17, 19, 23, 29], dtype=np.int32)
b = np.array([2, 4, 8, 12, 14, 18, 20, 28, 30], dtype=np.int32)
c = np.array([1, 2, 3, 14, 16, 19, 22, 24, 33], dtype=np.int32)
d... |
'Tests that we can correctly retrieve partition Id even if the first few
vectors do not have Ids'
| def testGetPartitionIdWithNoIdsAtFirst(self):
| params = {'distanceMethod': 'rawOverlap'}
classifier = KNNClassifier(**params)
dimensionality = 40
a = np.array([1, 3, 7, 11, 13, 17, 19, 23, 29], dtype=np.int32)
b = np.array([2, 4, 8, 12, 14, 18, 20, 28, 30], dtype=np.int32)
c = np.array([1, 2, 3, 14, 16, 19, 22, 24, 33], dtype=np.int32)
d... |
'Sparsity (input dimensionality) less than input array'
| @unittest.skipUnless(__debug__, 'Only applicable when asserts are enabled')
def testOverlapDistanceMethodBadSparsity(self):
| params = {'distanceMethod': 'rawOverlap'}
classifier = KNNClassifier(**params)
a = np.array([1, 3, 7, 11, 13, 17, 19, 23, 29], dtype=np.int32)
with self.assertRaises(AssertionError):
classifier.learn(a, 0, isSparse=20)
|
'Inconsistent sparsity (input dimensionality)'
| def testOverlapDistanceMethodInconsistentDimensionality(self):
| params = {'distanceMethod': 'rawOverlap'}
classifier = KNNClassifier(**params)
dimensionality = 40
a = np.array([1, 3, 7, 11, 13, 17, 19, 23, 29], dtype=np.int32)
numPatterns = classifier.learn(a, 0, isSparse=31)
self.assertEquals(numPatterns, 1)
denseA = np.zeros(dimensionality)
denseA[... |
'If sparse representation indices are unsorted expect error.'
| @unittest.skipUnless(__debug__, 'Only applicable when asserts are enabled')
def testOverlapDistanceMethodStandardUnsorted(self):
| params = {'distanceMethod': 'rawOverlap'}
classifier = KNNClassifier(**params)
dimensionality = 40
a = np.array([29, 3, 7, 11, 13, 17, 19, 23, 1], dtype=np.int32)
b = np.array([2, 4, 20, 12, 14, 18, 8, 28, 30], dtype=np.int32)
with self.assertRaises(AssertionError):
classifier.learn(a, 0... |
'Tests case where pattern has no ON bits'
| def testOverlapDistanceMethodEmptyArray(self):
| params = {'distanceMethod': 'rawOverlap'}
classifier = KNNClassifier(**params)
dimensionality = 40
a = np.array([], dtype=np.int32)
numPatterns = classifier.learn(a, 0, isSparse=dimensionality)
self.assertEquals(numPatterns, 1)
denseA = np.zeros(dimensionality)
denseA[a] = 1.0
(cat, ... |
'More complex test of checkpointing in the middle of a sequence.'
| @unittest.skipUnless(capnp, 'pycapnp not installed')
def testSerializationMiddleOfSequence2(self):
| tm1 = BacktrackingTM(2048, 32, 0.21, 0.5, 11, 20, 0.1, 0.1, 1.0, 0.0, 14, False, 5, 2, False, 1960, 0, False, 3, 10, 5, 0, 32, 128, 32, 'normal')
tm2 = BacktrackingTM(2048, 32, 0.21, 0.5, 11, 20, 0.1, 0.1, 1.0, 0.0, 14, False, 5, 2, False, 1960, 0, False, 3, 10, 5, 0, 32, 128, 32, 'normal')
with open(resour... |
'More complex test of checkpointing in the middle of a sequence.'
| def testCheckpointMiddleOfSequence2(self):
| tm1 = BacktrackingTM(2048, 32, 0.21, 0.5, 11, 20, 0.1, 0.1, 1.0, 0.0, 14, False, 5, 2, False, 1960, 0, False, 3, 10, 5, 0, 32, 128, 32, 'normal')
tm2 = BacktrackingTM(2048, 32, 0.21, 0.5, 11, 20, 0.1, 0.1, 1.0, 0.0, 14, False, 5, 2, False, 1960, 0, False, 3, 10, 5, 0, 32, 128, 32, 'normal')
with open(resour... |
'Asserts that two TM instances are the same.
This is temporarily disabled since it does not work with the C++
implementation of the TM.'
| def assertTMsEqual(self, tm1, tm2):
| self.assertEqual(tm1, tm2, tm1.diff(tm2))
self.assertTrue(fdrutilities.tmDiff2(tm1, tm2, 1, False))
|
'Generates a sequence of n patterns.'
| @staticmethod
def generateSequence(n=10, numCols=100, minOnes=21, maxOnes=25):
| return ([None] + [BacktrackingTMTest.generatePattern(numCols, minOnes, maxOnes) for _ in xrange(n)])
|
'Generate a single test pattern with given parameters.
Parameters:
numCols: Number of columns in each pattern.
minOnes: The minimum number of 1\'s in each pattern.
maxOnes: The maximum number of 1\'s in each pattern.'
| @staticmethod
def generatePattern(numCols=100, minOnes=21, maxOnes=25):
| assert (minOnes < maxOnes)
assert (maxOnes < numCols)
nOnes = random.randint(minOnes, maxOnes)
ind = random.sample(xrange(numCols), nOnes)
x = numpy.zeros(numCols, dtype='float32')
x[ind] = 1
return x
|
'Set various constants. Create the input patterns and the spatial pooler'
| def setUp(self):
| self.inputSize = 90
self.columnDimensions = 600
self.x = numpy.zeros((5, self.inputSize), dtype=uintType)
self.x[0, 0:20] = 1
self.x[1, 10:30] = 1
self.x[2, 30:50] = 1
self.x[3, 50:70] = 1
self.x[4, 70:90] = 1
self.winningIteration = numpy.zeros(self.columnDimensions)
self.lastSD... |
'Helpful debug print statements while debugging this test.'
| def debugPrint(self):
| activeDutyCycle = numpy.zeros(self.columnDimensions, dtype=GetNTAReal())
self.sp.getActiveDutyCycles(activeDutyCycle)
boost = numpy.zeros(self.columnDimensions, dtype=GetNTAReal())
self.sp.getBoostFactors(boost)
print '\n--------- ITERATION', self.sp.getIterationNum(), '-----------------------'
... |
'Verify that all SDRs have the properties desired for this test.
The bounds for checking overlap are set fairly loosely here since there is
some variance due to randomness and the artificial parameters used in this
test.'
| def verifySDRProperties(self):
| self.assertTrue(_areAllSDRsUnique(self.lastSDR), "All SDR's are not unique")
self.assertGreater(_computeOverlap(self.lastSDR[0], self.lastSDR[1]), 9, "First two SDR's don't overlap much")
for i in [2, 3, 4]:
for j in range(5):
if (i != j):
self.... |
'Main test loop.'
| def boostTestLoop(self, imp):
| self.sp = CreateSP(imp, self.params)
self.spImplementation = imp
self.winningIteration.fill(0)
self.lastSDR = {}
self.boostTestPhase1()
self.boostTestPhase2()
self.boostTestPhase3()
self.boostTestPhase4()
|
'Test if the firing number of coincidences after inhibition
equals spatial pooler numActiveColumnsPerInhArea.'
| @unittest.skip("Currently fails due to switch from FDRCSpatial2 to SpatialPooler.The new SP doesn't have explicit methods to get inhibition.")
def testInhibition(self):
| n = 100
w = 15
inputLen = 300
columnDimensions = 2048
numActiveColumnsPerInhArea = 40
stimulusThreshold = 0
spSeed = 1956
stimulusThresholdInh = 1e-05
kDutyCycleFactor = 0.01
spVerbosity = 0
testIter = 100
spTest = SpatialPooler(columnDimensions=(columnDimensions, 1), inp... |
'Basic test (creation, pickling, basic run of learning and inference)'
| def basicTest(self):
| tm = BacktrackingTMCPP(numberOfCols=10, cellsPerColumn=3, initialPerm=0.2, connectedPerm=0.8, minThreshold=2, newSynapseCount=5, permanenceInc=0.1, permanenceDec=0.05, permanenceMax=1, globalDecay=0.05, activationThreshold=4, doPooling=False, segUpdateValidDuration=5, seed=SEED, verbosity=VERBOSITY)
tm.retrieve... |
'Basic test (basic run of learning and inference)'
| def basicTest2(self, tm, numPatterns=100, numRepetitions=3, activity=15, testTrimming=False, testRebuild=False):
| tmPy = BacktrackingTM(numberOfCols=tm.numberOfCols, cellsPerColumn=tm.cellsPerColumn, initialPerm=tm.initialPerm, connectedPerm=tm.connectedPerm, minThreshold=tm.minThreshold, newSynapseCount=tm.newSynapseCount, permanenceInc=tm.permanenceInc, permanenceDec=tm.permanenceDec, permanenceMax=tm.permanenceMax, globalDe... |
'Call basicTest2 with multiple parameter settings and ensure the C++ and
PY versions are identical throughout.'
| def testTMs(self, short=True):
| if (short == True):
print 'Testing short version'
else:
print 'Testing long version'
if short:
print '\nTesting with fixed resource CLA - test max segment and synapses'
tm = BacktrackingTMCPP(numberOfCols=30, cellsPerColumn=5, initial... |
'When a segment becomes active, grow synapses to previous winner cells.
The number of grown synapses is calculated from the "matching segment"
overlap, not the "active segment" overlap.'
| def testActiveSegmentGrowSynapsesAccordingToPotentialOverlap(self):
| tm = TemporalMemory(columnDimensions=[32], cellsPerColumn=1, activationThreshold=2, initialPermanence=0.21, connectedPermanence=0.5, minThreshold=1, maxNewSynapseCount=4, permanenceIncrement=0.1, permanenceDecrement=0.1, predictedSegmentDecrement=0.0, seed=42)
previousActiveColumns = [0, 1, 2, 3, 4]
prevWin... |
'Destroy some segments then verify that the maxSegmentsPerCell is still
correctly applied.'
| def testDestroySegmentsThenReachLimit(self):
| tm = TemporalMemory(columnDimensions=[32], cellsPerColumn=1, activationThreshold=3, initialPermanence=0.5, connectedPermanence=0.5, minThreshold=2, maxNewSynapseCount=3, permanenceIncrement=0.02, permanenceDecrement=0.02, predictedSegmentDecrement=0.0, seed=42, maxSegmentsPerCell=2)
segment1 = tm.createSegment(... |
'Hit the maxSegmentsPerCell threshold multiple times. Make sure it
works more than once.'
| def testReachSegmentLimitMultipleTimes(self):
| tm = TemporalMemory(columnDimensions=[32], cellsPerColumn=1, activationThreshold=3, initialPermanence=0.5, connectedPermanence=0.5, minThreshold=2, maxNewSynapseCount=3, permanenceIncrement=0.02, permanenceDecrement=0.02, predictedSegmentDecrement=0.0, seed=42, maxSegmentsPerCell=2)
tm.createSegment(10)
sel... |
'ensure historicWindowSize is greater than estimationSamples'
| def testParamterError(self):
| try:
anomalyLikelihoodRegion = AnomalyLikelihoodRegion(estimationSamples=100, historicWindowSize=99)
self.assertEqual(False, True, 'Should have failed with ValueError')
except ValueError:
pass
|
'test to see if the region keeps track of state correctly and produces
the same likelihoods as the AnomalyLikelihood module'
| def testLikelihoodValues(self):
| anomalyLikelihoodRegion = AnomalyLikelihoodRegion()
anomalyLikelihood = AnomalyLikelihood()
inputs = AnomalyLikelihoodRegion.getSpec()['inputs']
outputs = AnomalyLikelihoodRegion.getSpec()['outputs']
with open(_INPUT_DATA_FILE) as f:
reader = csv.reader(f)
reader.next()
for r... |
'test to ensure serialization preserves the state of the region
correctly.'
| @unittest.skipUnless(capnp, 'pycapnp is not installed, skipping serialization test.')
def testSerialization(self):
| anomalyLikelihoodRegion1 = AnomalyLikelihoodRegion()
inputs = AnomalyLikelihoodRegion.getSpec()['inputs']
outputs = AnomalyLikelihoodRegion.getSpec()['outputs']
for _ in xrange(0, 6):
inputs['rawAnomalyScore'] = numpy.array([random.random()])
inputs['metricValue'] = numpy.array([random.r... |
'This test ensures that records in classifier are removed when they are no
longer being used when the trainRecords is set.'
| @patch.object(KNNAnomalyClassifierRegion, '_constructClassificationRecord')
def testSetGetWaitRecordsRecalculate(self, getRecord):
| self.helper.cacheSize = 5
self.helper.anomalyThreshold = 0.8
self.helper._anomalyVectorLength = 20
records = [Mock(ROWID=10, anomalyLabel=['Test'], anomalyScore=1, setByUser=False, anomalyVector=numpy.array([1, 4])), Mock(ROWID=11, anomalyLabel=['Test'], anomalyScore=0, setByUser=False, anomalyVector=nu... |
'Testing ScalarEncoder...'
| def testScalarEncoder(self):
| mv = ScalarEncoder(name='mv', n=14, w=3, minval=1, maxval=8, periodic=False, forced=True)
empty = mv.encode(SENTINEL_VALUE_FOR_MISSING_DATA)
self.assertEqual(empty.sum(), 0)
|
'test NaNs'
| def testNaNs(self):
| mv = ScalarEncoder(name='mv', n=14, w=3, minval=1, maxval=8, periodic=False, forced=True)
empty = mv.encode(float('nan'))
self.assertEqual(empty.sum(), 0)
|
'Test bottom-up encoding for a Periodic encoder'
| def testBottomUpEncodingPeriodicEncoder(self):
| l = ScalarEncoder(n=14, w=3, minval=1, maxval=8, periodic=True, forced=True)
self.assertEqual(l.getDescription(), [('[1:8]', 0)])
l = ScalarEncoder(name='scalar', n=14, w=3, minval=1, maxval=8, periodic=True, forced=True)
self.assertEqual(l.getDescription(), [('scalar', 0)])
self.assertTrue(numpy.ar... |
'Test that we get the same encoder when we construct it using resolution
instead of n'
| def testCreateResolution(self):
| l = self._l
d = l.__dict__
l = ScalarEncoder(name='scalar', resolution=0.5, w=3, minval=1, maxval=8, periodic=True, forced=True)
self.assertEqual(l.__dict__, d)
l = ScalarEncoder(name='scalar', radius=1.5, w=3, minval=1, maxval=8, periodic=True, forced=True)
self.assertEqual(l.__dict__, d)
|
'Test the input description generation, top-down compute, and bucket
support on a periodic encoder'
| def testDecodeAndResolution(self):
| l = self._l
v = l.minval
while (v < l.maxval):
output = l.encode(v)
decoded = l.decode(output)
(fieldsDict, fieldNames) = decoded
self.assertEqual(len(fieldsDict), 1)
self.assertEqual(len(fieldNames), 1)
self.assertEqual(fieldNames, fieldsDict.keys())
... |
'Test closenessScores for a periodic encoder'
| def testCloseness(self):
| encoder = ScalarEncoder(w=7, minval=0, maxval=7, radius=1, periodic=True, name='day of week', forced=True)
scores = encoder.closenessScores((2, 4, 7), (4, 2, 1), fractional=False)
for (actual, score) in itertools.izip((2, 2, 1), scores):
self.assertEqual(actual, score)
|
'Test Non-periodic encoder bottom-up'
| def testNonPeriodicBottomUp(self):
| l = ScalarEncoder(name='scalar', n=14, w=5, minval=1, maxval=10, periodic=False, forced=True)
self.assertTrue(numpy.array_equal(l.encode(1), numpy.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=defaultDtype)))
self.assertTrue(numpy.array_equal(l.encode(2), numpy.array([0, 1, 1, 1, 1, 1, 0, 0, 0, 0,... |
'Ensures that passing resolution as an int doesn\'t truncate values.'
| def testGetBucketInfoIntResolution(self):
| encoder = ScalarEncoder(w=3, resolution=1, minval=1, maxval=8, periodic=True, forced=True)
self.assertEqual(4.5, encoder.topDownCompute(encoder.encode(4.5))[0].scalar)
|
'Test ScalarEncoder Cap\'n Proto serialization implementation.'
| @unittest.skipUnless(capnp, 'pycapnp is not installed, skipping serialization test.')
def testReadWrite(self):
| originalValue = self._l.encode(1)
proto1 = ScalarEncoderProto.new_message()
self._l.write(proto1)
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = ScalarEncoderProto.read(f)
encoder = ScalarEncoder.read(proto2)
self.assertIsInstance(encoder, ScalarEn... |
'Setting n when maxval/minval = None creates instance.'
| def testSettingNWithMaxvalMinvalNone(self):
| encoder = ScalarEncoder(3, None, None, name='scalar', n=14, radius=0, resolution=0, forced=True)
self.assertIsInstance(encoder, ScalarEncoder)
|
'Setting both scalar and resolution not allowed.'
| def testSettingScalarAndResolution(self):
| with self.assertRaises(ValueError):
ScalarEncoder(3, None, None, name='scalar', n=0, radius=None, resolution=0.5, forced=True)
|
'If radius when maxval/minval = None creates instance.'
| def testSettingRadiusWithMaxvalMinvalNone(self):
| encoder = ScalarEncoder(3, None, None, name='scalar', n=0, radius=1.5, resolution=0, forced=True)
self.assertIsInstance(encoder, ScalarEncoder)
|
'Send bitmap as array'
| def testEncodeArray(self):
| e = self._encoder(self.n, name=self.name)
bitmap = [0, 0, 0, 1, 0, 0, 0, 0, 0]
out = e.encode(bitmap)
self.assertEqual(out.sum(), sum(bitmap))
x = e.decode(out)
self.assertIsInstance(x[0], dict)
self.assertTrue((self.name in x[0]))
|
'Send bitmap as numpy bit array'
| def testEncodeBitArray(self):
| e = self._encoder(self.n, name=self.name)
bitmap = numpy.zeros(self.n, dtype=numpy.uint8)
bitmap[3] = 1
bitmap[5] = 1
out = e.encode(bitmap)
expectedSum = sum(bitmap)
realSum = out.sum()
self.assertEqual(realSum, expectedSum)
|
'Compare two bitmaps for closeness'
| def testClosenessScores(self):
| e = self._encoder(self.n, name=self.name)
'Identical => 1'
bitmap1 = [0, 0, 0, 1, 1, 1, 0, 0, 0]
bitmap2 = [0, 0, 0, 1, 1, 1, 0, 0, 0]
out1 = e.encode(bitmap1)
out2 = e.encode(bitmap2)
c = e.closenessScores(out1, out2)
self.assertEqual(c[0], 1.0)
'No overlap => 0'
... |
'testing auto-grow'
| def testAutogrow(self):
| fieldWidth = 100
bitsOn = 10
s = SDRCategoryEncoder(n=fieldWidth, w=bitsOn, name='foo', verbosity=2, forced=True)
encoded = numpy.zeros(fieldWidth)
self.assertEqual(s.topDownCompute(encoded).value, '<UNKNOWN>')
s.encodeIntoArray('catA', encoded)
self.assertEqual(encoded.sum(), bitsOn)
se... |
'Test basic encoding functionality. Create encodings without crashing and
check they contain the correct number of on and off bits. Check some
encodings for expected overlap. Test that encodings for old values don\'t
change once we generate new buckets.'
| def testEncoding(self):
| encoder = RandomDistributedScalarEncoder(name='encoder', resolution=1.0, w=23, n=500, offset=0.0)
e0 = encoder.encode((-0.1))
self.assertEqual(e0.sum(), 23, 'Number of on bits is incorrect')
self.assertEqual(e0.size, 500, 'Width of the vector is incorrect')
self.assertE... |
'Test that missing values and NaN return all zero\'s.'
| def testMissingValues(self):
| encoder = RandomDistributedScalarEncoder(name='encoder', resolution=1.0)
empty = encoder.encode(SENTINEL_VALUE_FOR_MISSING_DATA)
self.assertEqual(empty.sum(), 0)
empty = encoder.encode(float('nan'))
self.assertEqual(empty.sum(), 0)
|
'Test that numbers within the same resolution return the same encoding.
Numbers outside the resolution should return different encodings.'
| def testResolution(self):
| encoder = RandomDistributedScalarEncoder(name='encoder', resolution=1.0)
e23 = encoder.encode(23.0)
e23p1 = encoder.encode(23.1)
e22p9 = encoder.encode(22.9)
e24 = encoder.encode(24.0)
self.assertEqual(e23.sum(), encoder.w)
self.assertEqual((e23 == e23p1).sum(), encoder.getWidth(), "Numbers ... |
'Test that mapBucketIndexToNonZeroBits works and that max buckets and
clipping are handled properly.'
| def testMapBucketIndexToNonZeroBits(self):
| encoder = RandomDistributedScalarEncoder(resolution=1.0, w=11, n=150)
encoder._initializeBucketMap(10, None)
encoder.encode(0.0)
encoder.encode((-7.0))
encoder.encode(7.0)
self.assertEqual(len(encoder.bucketMap), encoder._maxBuckets, '_maxBuckets exceeded')
self.assertTrue(numpy.array_equ... |
'Test that some bad construction parameters get handled.'
| def testParameterChecks(self):
| with self.assertRaises(ValueError):
RandomDistributedScalarEncoder(name='mv', resolution=1.0, n=int((5.9 * 21)))
with self.assertRaises(ValueError):
RandomDistributedScalarEncoder(name='mv', resolution=1.0, n=(5.9 * 21))
with self.assertRaises(ValueError):
RandomDistributedScalarEnco... |
'Check that the overlaps for the encodings are within the expected range.
Here we ask the encoder to create a bunch of representations under somewhat
stressful conditions, and then verify they are correct. We rely on the fact
that the _overlapOK and _countOverlapIndices methods are working correctly.'
| def testOverlapStatistics(self):
| seed = getSeed()
encoder = RandomDistributedScalarEncoder(resolution=1.0, w=11, n=150, seed=seed)
encoder.encode(0.0)
encoder.encode((-300.0))
encoder.encode(300.0)
self.assertTrue(validateEncoder(encoder, subsampling=3), 'Illegal overlap encountered in encoder')
|
'Test that the getWidth, getDescription, and getDecoderOutputFieldTypes
methods work.'
| def testGetMethods(self):
| encoder = RandomDistributedScalarEncoder(name='theName', resolution=1.0, n=500)
self.assertEqual(encoder.getWidth(), 500, "getWidth doesn't return the correct result")
self.assertEqual(encoder.getDescription(), [('theName', 0)], "getDescription doesn't return the correct result... |
'Test that offset is working properly'
| def testOffset(self):
| encoder = RandomDistributedScalarEncoder(name='encoder', resolution=1.0)
encoder.encode(23.0)
self.assertEqual(encoder._offset, 23.0, 'Offset not specified and not initialized to first input')
encoder = RandomDistributedScalarEncoder(name='encoder', resolution=1.0, offset=25.0)
... |
'Test that initializing twice with the same seed returns identical encodings
and different when not specified'
| def testSeed(self):
| encoder1 = RandomDistributedScalarEncoder(name='encoder1', resolution=1.0, seed=42)
encoder2 = RandomDistributedScalarEncoder(name='encoder2', resolution=1.0, seed=42)
encoder3 = RandomDistributedScalarEncoder(name='encoder3', resolution=1.0, seed=(-1))
encoder4 = RandomDistributedScalarEncoder(name='en... |
'Test that the internal method _countOverlapIndices works as expected.'
| def testCountOverlapIndices(self):
| encoder = RandomDistributedScalarEncoder(name='encoder', resolution=1.0, w=5, n=(5 * 20))
midIdx = (encoder._maxBuckets / 2)
encoder.bucketMap[(midIdx - 2)] = numpy.array(range(3, 8))
encoder.bucketMap[(midIdx - 1)] = numpy.array(range(4, 9))
encoder.bucketMap[midIdx] = numpy.array(range(5, 10))
... |
'Test that the internal method _overlapOK works as expected.'
| def testOverlapOK(self):
| encoder = RandomDistributedScalarEncoder(name='encoder', resolution=1.0, w=5, n=(5 * 20))
midIdx = (encoder._maxBuckets / 2)
encoder.bucketMap[(midIdx - 3)] = numpy.array(range(4, 9))
encoder.bucketMap[(midIdx - 2)] = numpy.array(range(3, 8))
encoder.bucketMap[(midIdx - 1)] = numpy.array(range(4, 9)... |
'Test that the internal method _countOverlap works as expected.'
| def testCountOverlap(self):
| encoder = RandomDistributedScalarEncoder(name='encoder', resolution=1.0, n=500)
r1 = numpy.array([1, 2, 3, 4, 5, 6])
r2 = numpy.array([1, 2, 3, 4, 5, 6])
self.assertEqual(encoder._countOverlap(r1, r2), 6, '_countOverlap result is incorrect')
r1 = numpy.array([1, 2, 3, 4, 5, 6])
r2 = num... |
'Test that nothing is printed out when verbosity=0'
| def testVerbosity(self):
| _stdout = sys.stdout
sys.stdout = _stringio = StringIO()
encoder = RandomDistributedScalarEncoder(name='mv', resolution=1.0, verbosity=0)
output = numpy.zeros(encoder.getWidth(), dtype=defaultDtype)
encoder.encodeIntoArray(23.0, output)
encoder.getBucketIndices(23.0)
sys.stdout = _stdout
... |
'missing values'
| def testMissingValues(self):
| mv = AdaptiveScalarEncoder(name='mv', n=14, w=3, minval=1, maxval=8, periodic=False, forced=True)
empty = mv.encode(SENTINEL_VALUE_FOR_MISSING_DATA)
self.assertEqual(empty.sum(), 0)
|
'Non-periodic encoder, min and max specified'
| def testNonPeriodicEncoderMinMaxSpec(self):
| self.assertTrue(numpy.array_equal(self._l.encode(1), numpy.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=defaultDtype)))
self.assertTrue(numpy.array_equal(self._l.encode(2), numpy.array([0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], dtype=defaultDtype)))
self.assertTrue(numpy.array_equal(self._l.enco... |
'Test the input description generation and topDown decoding'
| def testTopDownDecode(self):
| l = self._l
v = l.minval
while (v < l.maxval):
output = l.encode(v)
decoded = l.decode(output)
(fieldsDict, _) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, _) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 1)
(rangeMin, rangeMax) = r... |
'Make sure we can fill in holes'
| def testFillHoles(self):
| l = self._l
decoded = l.decode(numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1]))
(fieldsDict, _) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, _) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 1)
self.assertSequenceEqual(ranges[0], [10, 10])
decoded = l.decode(nu... |
'Non-periodic encoder, min and max not specified'
| def testNonPeriodicEncoderMinMaxNotSpec(self):
| l = AdaptiveScalarEncoder(name='scalar', n=14, w=5, minval=None, maxval=None, periodic=False, forced=True)
def _verify(v, encoded, expV=None):
if (expV is None):
expV = v
self.assertTrue(numpy.array_equal(l.encode(v), numpy.array(encoded, dtype=defaultDtype)))
self.assertLess... |
'Test setting the min and max using setFieldStats'
| def testSetFieldStats(self):
| def _dumpParams(enc):
return (enc.n, enc.w, enc.minval, enc.maxval, enc.resolution, enc._learningEnabled, enc.recordNum, enc.radius, enc.rangeInternal, enc.padding, enc.nInternal)
sfs = AdaptiveScalarEncoder(name='scalar', n=14, w=5, minval=1, maxval=10, periodic=False, forced=True)
reg = AdaptiveSc... |
'Test that radius will round to the nearest integer'
| def testRadiusForSpeedInt(self):
| scale = 30
timestep = 62
speed = 25
encoder = GeospatialCoordinateEncoder(scale, timestep)
radius = encoder.radiusForSpeed(speed)
self.assertEqual(radius, 38)
|
'Testing MultiEncoder...'
| def testMultiEncoder(self):
| e = MultiEncoder()
e.addEncoder('dow', ScalarEncoder(w=3, resolution=1, minval=1, maxval=8, periodic=True, name='day of week', forced=True))
e.addEncoder('myval', ScalarEncoder(w=5, resolution=1, minval=1, maxval=10, periodic=False, name='aux', forced=True))
self.assertEqual(e.getWidth(), 21)
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.