idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
9,800
def getUnionTemporalPoolerInput ( self ) : activeCells = numpy . zeros ( self . tm . numberOfCells ( ) ) . astype ( realDType ) activeCells [ list ( self . tm . activeCellsIndices ( ) ) ] = 1 predictedActiveCells = numpy . zeros ( self . tm . numberOfCells ( ) ) . astype ( realDType ) predictedActiveCells [ list ( self . tm . predictedActiveCellsIndices ( ) ) ] = 1 burstingColumns = numpy . zeros ( self . tm . numberOfColumns ( ) ) . astype ( realDType ) burstingColumns [ list ( self . tm . unpredictedActiveColumns ) ] = 1 return activeCells , predictedActiveCells , burstingColumns
Gets the Union Temporal Pooler input from the Temporal Memory
9,801
def compute ( self , inputVector , learn , activeArray ) : x = inputVector y = self . encode ( x ) active_units = np . where ( y == 1. ) [ 0 ] if learn : self . update_statistics ( [ y ] ) self . update_weights ( [ x ] , [ y ] ) activeArray [ active_units ] = 1. return active_units
This method resembles the primary public method of the SpatialPooler class . It takes a input vector and outputs the indices of the active columns . If learn is set to True this method also performs weight updates and updates to the activity statistics according to the respective methods implemented below .
9,802
def encode_batch ( self , inputBatch ) : X = inputBatch encode = self . encode Y = np . array ( [ encode ( x ) for x in X ] ) return Y
Encodes a whole batch of input arrays without learning .
9,803
def learn ( self , x ) : y = self . encode ( x ) self . update_statistics ( [ y ] ) self . update_weights ( [ x ] , [ y ] ) return y
Encodes an input array and performs weight updates and updates to the activity statistics according to the respective methods implemented below .
9,804
def learn_batch ( self , inputBatch ) : X = inputBatch Y = self . encode_batch ( X ) self . update_statistics ( Y ) self . update_weights ( X , Y ) return Y
Encodes a whole batch of input arrays and performs weight updates and updates to the activity statistics according to the respective methods implemented below .
9,805
def update_statistics ( self , activityVectors ) : Y = activityVectors n = self . output_size A = np . zeros ( ( n , n ) ) batchSize = len ( Y ) for y in Y : active_units = np . where ( y == 1 ) [ 0 ] for i in active_units : for j in active_units : A [ i , j ] += 1. A = A / batchSize self . average_activity = self . exponential_moving_average ( self . average_activity , A , self . smoothing_period )
Updates the variable that maintains exponential moving averages of individual and pairwise unit activiy
9,806
def getSparseWeights ( weightSparsity , inputSize , outputSize ) : w = torch . Tensor ( outputSize , inputSize ) stdv = 1. / math . sqrt ( w . size ( 1 ) ) w . data . uniform_ ( - stdv , stdv ) numZeros = int ( round ( ( 1.0 - weightSparsity ) * inputSize ) ) outputIndices = np . arange ( outputSize ) inputIndices = np . array ( [ np . random . permutation ( inputSize ) [ : numZeros ] for _ in outputIndices ] , dtype = np . long ) zeroIndices = np . empty ( ( outputSize , numZeros , 2 ) , dtype = np . long ) zeroIndices [ : , : , 0 ] = outputIndices [ : , None ] zeroIndices [ : , : , 1 ] = inputIndices zeroIndices = torch . LongTensor ( zeroIndices . reshape ( - 1 , 2 ) ) zeroWts = ( zeroIndices [ : , 0 ] , zeroIndices [ : , 1 ] ) w . data [ zeroWts ] = 0.0 return w
Return a randomly initialized weight matrix Size is outputSize X inputSize with sparsity weightSparsity%
9,807
def plotOverlapHistogram ( v , w , title , base = "random" ) : overlaps = v . matmul ( w . t ( ) ) bins = np . linspace ( float ( overlaps . min ( ) ) , float ( overlaps . max ( ) ) , 28 ) plt . hist ( overlaps , bins , alpha = 0.5 , label = 'All cols' ) plt . legend ( loc = 'upper right' ) plt . xlabel ( "Overlap scores" ) plt . ylabel ( "Frequency" ) plt . title ( title ) plt . savefig ( base + "_1" ) plt . close ( ) return overlaps
Given a vector v compute the overlap with the weight matrix w and save the histogram of overlaps .
9,808
def plotOverlaps ( vList , w , base = "random" , k = 20 ) : for i , v in enumerate ( vList ) : if i == 0 : col = "m" label = "Random vector" else : col = "c" label = "" if i == 1 : label = "Test images" overlaps = v . matmul ( w . t ( ) ) sortedOverlaps = overlaps . sort ( ) [ 0 ] . tolist ( ) [ 0 ] [ : : - 1 ] plt . plot ( sortedOverlaps , col , label = label ) plt . axvspan ( 0 , k , facecolor = "g" , alpha = 0.3 , label = "Active units" ) plt . legend ( loc = "upper right" ) plt . xlabel ( "Units" ) plt . ylabel ( "Overlap scores" ) plt . title ( "Sorted unit overlaps of a sparse net." ) plt . savefig ( base + "_2" ) plt . close ( )
Given a list of vectors v compute the overlap of each with the weight matrix w and plot the overlap curves .
9,809
def random_mini_batches ( X , Y , minibatch_size , seed = None ) : d = X . shape [ 1 ] size = minibatch_size minibatches = [ ] if Y is None : Y = np . zeros ( ( 1 , d ) ) np . random . seed ( seed ) perm = np . random . permutation ( d ) for t in range ( 0 , d , size ) : subset = perm [ t : t + size ] minibatches . append ( ( X [ : , subset ] , Y [ : , subset ] ) ) return minibatches
Compute a list of minibatches from inputs X and targets Y . A datapoint is expected to be represented as a column in the data matrices X and Y .
9,810
def generateRandomSDR ( numSDR , numDims , numActiveInputBits , seed = 42 ) : randomSDRs = np . zeros ( ( numSDR , numDims ) , dtype = uintType ) indices = np . array ( range ( numDims ) ) np . random . seed ( seed ) for i in range ( numSDR ) : randomIndices = np . random . permutation ( indices ) activeBits = randomIndices [ : numActiveInputBits ] randomSDRs [ i , activeBits ] = 1 return randomSDRs
Generate a set of random SDR s
9,811
def percentOverlap ( x1 , x2 ) : nonZeroX1 = np . count_nonzero ( x1 ) nonZeroX2 = np . count_nonzero ( x2 ) percentOverlap = 0 minX1X2 = min ( nonZeroX1 , nonZeroX2 ) if minX1X2 > 0 : overlap = float ( np . dot ( x1 . T , x2 ) ) percentOverlap = overlap / minX1X2 return percentOverlap
Computes the percentage of overlap between vectors x1 and x2 .
9,812
def addNoiseToVector ( inputVector , noiseLevel , vectorType ) : if vectorType == 'sparse' : corruptSparseVector ( inputVector , noiseLevel ) elif vectorType == 'dense' : corruptDenseVector ( inputVector , noiseLevel ) else : raise ValueError ( "vectorType must be 'sparse' or 'dense' " )
Add noise to SDRs
9,813
def corruptDenseVector ( vector , noiseLevel ) : size = len ( vector ) for i in range ( size ) : rnd = random . random ( ) if rnd < noiseLevel : if vector [ i ] == 1 : vector [ i ] = 0 else : vector [ i ] = 1
Corrupts a binary vector by inverting noiseLevel percent of its bits .
9,814
def corruptSparseVector ( sdr , noiseLevel ) : numNoiseBits = int ( noiseLevel * np . sum ( sdr ) ) if numNoiseBits <= 0 : return sdr activeBits = np . where ( sdr > 0 ) [ 0 ] inActiveBits = np . where ( sdr == 0 ) [ 0 ] turnOffBits = np . random . permutation ( activeBits ) turnOnBits = np . random . permutation ( inActiveBits ) turnOffBits = turnOffBits [ : numNoiseBits ] turnOnBits = turnOnBits [ : numNoiseBits ] sdr [ turnOffBits ] = 0 sdr [ turnOnBits ] = 1
Add noise to sdr by turning off numNoiseBits active bits and turning on numNoiseBits in active bits
9,815
def calculateOverlapCurve ( sp , inputVectors ) : columnNumber = np . prod ( sp . getColumnDimensions ( ) ) numInputVector , inputSize = inputVectors . shape outputColumns = np . zeros ( ( numInputVector , columnNumber ) , dtype = uintType ) outputColumnsCorrupted = np . zeros ( ( numInputVector , columnNumber ) , dtype = uintType ) noiseLevelList = np . linspace ( 0 , 1.0 , 21 ) inputOverlapScore = np . zeros ( ( numInputVector , len ( noiseLevelList ) ) ) outputOverlapScore = np . zeros ( ( numInputVector , len ( noiseLevelList ) ) ) for i in range ( numInputVector ) : for j in range ( len ( noiseLevelList ) ) : inputVectorCorrupted = copy . deepcopy ( inputVectors [ i ] [ : ] ) corruptSparseVector ( inputVectorCorrupted , noiseLevelList [ j ] ) sp . compute ( inputVectors [ i ] [ : ] , False , outputColumns [ i ] [ : ] ) sp . compute ( inputVectorCorrupted , False , outputColumnsCorrupted [ i ] [ : ] ) inputOverlapScore [ i ] [ j ] = percentOverlap ( inputVectors [ i ] [ : ] , inputVectorCorrupted ) outputOverlapScore [ i ] [ j ] = percentOverlap ( outputColumns [ i ] [ : ] , outputColumnsCorrupted [ i ] [ : ] ) return noiseLevelList , inputOverlapScore , outputOverlapScore
Evalulate noise robustness of SP for a given set of SDRs
9,816
def classifySPoutput ( targetOutputColumns , outputColumns ) : numTargets , numDims = targetOutputColumns . shape overlap = np . zeros ( ( numTargets , ) ) for i in range ( numTargets ) : overlap [ i ] = percentOverlap ( outputColumns , targetOutputColumns [ i , : ] ) classLabel = np . argmax ( overlap ) return classLabel
Classify the SP output
9,817
def classificationAccuracyVsNoise ( sp , inputVectors , noiseLevelList ) : numInputVector , inputSize = inputVectors . shape if sp is None : targetOutputColumns = copy . deepcopy ( inputVectors ) else : columnNumber = np . prod ( sp . getColumnDimensions ( ) ) targetOutputColumns = np . zeros ( ( numInputVector , columnNumber ) , dtype = uintType ) for i in range ( numInputVector ) : sp . compute ( inputVectors [ i ] [ : ] , False , targetOutputColumns [ i ] [ : ] ) outcomes = np . zeros ( ( len ( noiseLevelList ) , numInputVector ) ) for i in range ( len ( noiseLevelList ) ) : for j in range ( numInputVector ) : corruptedInputVector = copy . deepcopy ( inputVectors [ j ] [ : ] ) corruptSparseVector ( corruptedInputVector , noiseLevelList [ i ] ) if sp is None : outputColumns = copy . deepcopy ( corruptedInputVector ) else : outputColumns = np . zeros ( ( columnNumber , ) , dtype = uintType ) sp . compute ( corruptedInputVector , False , outputColumns ) predictedClassLabel = classifySPoutput ( targetOutputColumns , outputColumns ) outcomes [ i ] [ j ] = predictedClassLabel == j predictionAccuracy = np . mean ( outcomes , 1 ) return predictionAccuracy
Evaluate whether the SP output is classifiable with varying amount of noise
9,818
def plotExampleInputOutput ( sp , inputVectors , saveFigPrefix = None ) : numInputVector , inputSize = inputVectors . shape numColumns = np . prod ( sp . getColumnDimensions ( ) ) outputColumns = np . zeros ( ( numInputVector , numColumns ) , dtype = uintType ) inputOverlap = np . zeros ( ( numInputVector , numColumns ) , dtype = uintType ) connectedCounts = np . zeros ( ( numColumns , ) , dtype = uintType ) sp . getConnectedCounts ( connectedCounts ) winnerInputOverlap = np . zeros ( numInputVector ) for i in range ( numInputVector ) : sp . compute ( inputVectors [ i ] [ : ] , False , outputColumns [ i ] [ : ] ) inputOverlap [ i ] [ : ] = sp . getOverlaps ( ) activeColumns = np . where ( outputColumns [ i ] [ : ] > 0 ) [ 0 ] if len ( activeColumns ) > 0 : winnerInputOverlap [ i ] = np . mean ( inputOverlap [ i ] [ np . where ( outputColumns [ i ] [ : ] > 0 ) [ 0 ] ] ) fig , axs = plt . subplots ( 2 , 1 ) axs [ 0 ] . imshow ( inputVectors [ : , : 200 ] , cmap = 'gray' , interpolation = "nearest" ) axs [ 0 ] . set_ylabel ( 'input #' ) axs [ 0 ] . set_title ( 'input vectors' ) axs [ 1 ] . imshow ( outputColumns [ : , : 200 ] , cmap = 'gray' , interpolation = "nearest" ) axs [ 1 ] . set_ylabel ( 'input #' ) axs [ 1 ] . set_title ( 'output vectors' ) if saveFigPrefix is not None : plt . savefig ( 'figures/{}_example_input_output.pdf' . format ( saveFigPrefix ) ) inputDensity = np . sum ( inputVectors , 1 ) / float ( inputSize ) outputDensity = np . sum ( outputColumns , 1 ) / float ( numColumns ) fig , axs = plt . subplots ( 2 , 1 ) axs [ 0 ] . plot ( inputDensity ) axs [ 0 ] . set_xlabel ( 'input #' ) axs [ 0 ] . set_ylim ( [ 0 , 0.2 ] ) axs [ 1 ] . plot ( outputDensity ) axs [ 1 ] . set_xlabel ( 'input #' ) axs [ 1 ] . set_ylim ( [ 0 , 0.05 ] ) if saveFigPrefix is not None : plt . savefig ( 'figures/{}_example_input_output_density.pdf' . format ( saveFigPrefix ) )
Plot example input & output
9,819
def inspectSpatialPoolerStats ( sp , inputVectors , saveFigPrefix = None ) : numInputVector , inputSize = inputVectors . shape numColumns = np . prod ( sp . getColumnDimensions ( ) ) outputColumns = np . zeros ( ( numInputVector , numColumns ) , dtype = uintType ) inputOverlap = np . zeros ( ( numInputVector , numColumns ) , dtype = uintType ) connectedCounts = np . zeros ( ( numColumns , ) , dtype = uintType ) sp . getConnectedCounts ( connectedCounts ) winnerInputOverlap = np . zeros ( numInputVector ) for i in range ( numInputVector ) : sp . compute ( inputVectors [ i ] [ : ] , False , outputColumns [ i ] [ : ] ) inputOverlap [ i ] [ : ] = sp . getOverlaps ( ) activeColumns = np . where ( outputColumns [ i ] [ : ] > 0 ) [ 0 ] if len ( activeColumns ) > 0 : winnerInputOverlap [ i ] = np . mean ( inputOverlap [ i ] [ np . where ( outputColumns [ i ] [ : ] > 0 ) [ 0 ] ] ) avgInputOverlap = np . mean ( inputOverlap , 0 ) entropy = calculateEntropy ( outputColumns ) activationProb = np . mean ( outputColumns . astype ( realDType ) , 0 ) dutyCycleDist , binEdge = np . histogram ( activationProb , bins = 10 , range = [ - 0.005 , 0.095 ] ) dutyCycleDist = dutyCycleDist . astype ( 'float32' ) / np . sum ( dutyCycleDist ) binCenter = ( binEdge [ 1 : ] + binEdge [ : - 1 ] ) / 2 fig , axs = plt . subplots ( 2 , 2 ) axs [ 0 , 0 ] . hist ( connectedCounts ) axs [ 0 , 0 ] . set_xlabel ( '# Connected Synapse' ) axs [ 0 , 1 ] . hist ( winnerInputOverlap ) axs [ 0 , 1 ] . set_xlabel ( '# winner input overlap' ) axs [ 1 , 0 ] . bar ( binEdge [ : - 1 ] + 0.001 , dutyCycleDist , width = .008 ) axs [ 1 , 0 ] . set_xlim ( [ - 0.005 , .1 ] ) axs [ 1 , 0 ] . set_xlabel ( 'Activation Frequency' ) axs [ 1 , 0 ] . set_title ( 'Entropy: {}' . format ( entropy ) ) axs [ 1 , 1 ] . plot ( connectedCounts , activationProb , '.' ) axs [ 1 , 1 ] . set_xlabel ( 'connection #' ) axs [ 1 , 1 ] . set_ylabel ( 'activation freq' ) plt . tight_layout ( ) if saveFigPrefix is not None : plt . savefig ( 'figures/{}_network_stats.pdf' . format ( saveFigPrefix ) ) return fig
Inspect the statistics of a spatial pooler given a set of input vectors
9,820
def calculateEntropy ( activeColumns , type = 'binary' ) : activationProb = np . mean ( activeColumns , 0 ) if type == 'binary' : totalEntropy = np . sum ( binaryEntropyVectorized ( activationProb ) ) elif type == 'renyi' : totalEntropy = np . sum ( renyiEntropyVectorized ( activationProb ) ) else : raise ValueError ( 'unknown entropy type' ) numberOfColumns = activeColumns . shape [ 1 ] return totalEntropy / numberOfColumns
calculate the mean entropy given activation history
9,821
def meanMutualInformation ( sp , activeColumnsCurrentEpoch , columnsUnderInvestigation = [ ] ) : if len ( columnsUnderInvestigation ) == 0 : columns = range ( np . prod ( sp . getColumnDimensions ( ) ) ) else : columns = columnsUnderInvestigation numCols = len ( columns ) sumMutualInfo = 0 normalizingConst = numCols * ( numCols - 1 ) / 2 for i in range ( numCols ) : for j in range ( i + 1 , numCols ) : sumMutualInfo += mutualInformation ( sp , activeColumnsCurrentEpoch , columns [ i ] , columns [ j ] ) return sumMutualInfo / normalizingConst
Computes the mean of the mutual information of pairs taken from a list of columns .
9,822
def learnL6Pattern ( self , l6Pattern , cellsToLearnOn ) : cellIndices = [ self . trnCellIndex ( x ) for x in cellsToLearnOn ] newSegments = self . trnConnections . createSegments ( cellIndices ) self . trnConnections . growSynapses ( newSegments , l6Pattern , 1.0 )
Learn the given l6Pattern on TRN cell dendrites . The TRN cells to learn are given in cellsTeLearnOn . Each of these cells will learn this pattern on a single dendritic segment .
9,823
def computeFeedForwardActivity ( self , feedForwardInput ) : ff = feedForwardInput . copy ( ) for x in range ( self . relayWidth ) : for y in range ( self . relayHeight ) : inputCells = self . _preSynapticFFCells ( x , y ) for idx in inputCells : if feedForwardInput [ idx ] != 0 : ff [ x , y ] = 1.0 continue ff2 = ff * 0.4 + self . burstReadyCells * ff return ff2
Activate trnCells according to the l6Input . These in turn will impact bursting mode in relay cells that are connected to these trnCells . Given the feedForwardInput compute which cells will be silent tonic or bursting .
9,824
def reset ( self ) : self . trnOverlaps = [ ] self . activeTRNSegments = [ ] self . activeTRNCellIndices = [ ] self . relayOverlaps = [ ] self . activeRelaySegments = [ ] self . burstReadyCellIndices = [ ] self . burstReadyCells = np . zeros ( ( self . relayWidth , self . relayHeight ) )
Set everything back to zero
9,825
def _initializeTRNToRelayCellConnections ( self ) : for x in range ( self . relayWidth ) : for y in range ( self . relayHeight ) : relayCellIndex = self . relayCellIndex ( ( x , y ) ) trnCells = self . _preSynapticTRNCells ( x , y ) for trnCell in trnCells : newSegment = self . relayConnections . createSegments ( [ relayCellIndex ] ) self . relayConnections . growSynapses ( newSegment , [ self . trnCellIndex ( trnCell ) ] , 1.0 )
Initialize TRN to relay cell connectivity . For each relay cell create a dendritic segment for each TRN cell it connects to .
9,826
def countWordOverlapFrequencies ( filename = "goodOverlapPairs.pkl" ) : with open ( filename , "rb" ) as f : goodOverlapPairs = pickle . load ( f ) with open ( "word_bitmaps_40_bits_minimum.pkl" , "rb" ) as f : bitmaps = pickle . load ( f ) wordFrequencies = { } for w1 , w2 , overlap in goodOverlapPairs : wordFrequencies [ w1 ] = wordFrequencies . get ( w1 , 0 ) + 1 printTemplate = PrettyTable ( [ "Num High Overlaps" , "Word" , "On Bits" ] , sortby = "Num High Overlaps" , reversesort = True ) for word in wordFrequencies . iterkeys ( ) : printTemplate . add_row ( [ wordFrequencies [ word ] , word , len ( bitmaps [ word ] ) ] ) print printTemplate
Count how many high overlaps each word has and print it out
9,827
def activateCells ( self , activeColumns , basalReinforceCandidates , apicalReinforceCandidates , basalGrowthCandidates , apicalGrowthCandidates , learn = True ) : ( correctPredictedCells , burstingColumns ) = np2 . setCompare ( self . predictedCells , activeColumns , self . predictedCells / self . cellsPerColumn , rightMinusLeft = True ) newActiveCells = np . concatenate ( ( correctPredictedCells , np2 . getAllCellsInColumns ( burstingColumns , self . cellsPerColumn ) ) ) ( learningActiveBasalSegments , learningActiveApicalSegments , learningMatchingBasalSegments , learningMatchingApicalSegments , basalSegmentsToPunish , apicalSegmentsToPunish , newSegmentCells , learningCells ) = self . _calculateLearning ( activeColumns , burstingColumns , correctPredictedCells , self . activeBasalSegments , self . activeApicalSegments , self . matchingBasalSegments , self . matchingApicalSegments , self . basalPotentialOverlaps , self . apicalPotentialOverlaps ) if learn : for learningSegments in ( learningActiveBasalSegments , learningMatchingBasalSegments ) : self . _learn ( self . basalConnections , self . rng , learningSegments , basalReinforceCandidates , basalGrowthCandidates , self . basalPotentialOverlaps , self . initialPermanence , self . sampleSize , self . permanenceIncrement , self . permanenceDecrement , self . maxSynapsesPerSegment ) for learningSegments in ( learningActiveApicalSegments , learningMatchingApicalSegments ) : self . _learn ( self . apicalConnections , self . rng , learningSegments , apicalReinforceCandidates , apicalGrowthCandidates , self . apicalPotentialOverlaps , self . initialPermanence , self . sampleSize , self . permanenceIncrement , self . permanenceDecrement , self . maxSynapsesPerSegment ) if self . basalPredictedSegmentDecrement != 0.0 : self . basalConnections . adjustActiveSynapses ( basalSegmentsToPunish , basalReinforceCandidates , - self . basalPredictedSegmentDecrement ) if self . apicalPredictedSegmentDecrement != 0.0 : self . apicalConnections . adjustActiveSynapses ( apicalSegmentsToPunish , apicalReinforceCandidates , - self . apicalPredictedSegmentDecrement ) if len ( basalGrowthCandidates ) > 0 and len ( apicalGrowthCandidates ) > 0 : self . _learnOnNewSegments ( self . basalConnections , self . rng , newSegmentCells , basalGrowthCandidates , self . initialPermanence , self . sampleSize , self . maxSynapsesPerSegment ) self . _learnOnNewSegments ( self . apicalConnections , self . rng , newSegmentCells , apicalGrowthCandidates , self . initialPermanence , self . sampleSize , self . maxSynapsesPerSegment ) newActiveCells . sort ( ) learningCells . sort ( ) self . activeCells = newActiveCells self . winnerCells = learningCells self . predictedActiveCells = correctPredictedCells
Activate cells in the specified columns using the result of the previous depolarizeCells as predictions . Then learn .
9,828
def _calculateLearning ( self , activeColumns , burstingColumns , correctPredictedCells , activeBasalSegments , activeApicalSegments , matchingBasalSegments , matchingApicalSegments , basalPotentialOverlaps , apicalPotentialOverlaps ) : learningActiveBasalSegments = self . basalConnections . filterSegmentsByCell ( activeBasalSegments , correctPredictedCells ) learningActiveApicalSegments = self . apicalConnections . filterSegmentsByCell ( activeApicalSegments , correctPredictedCells ) cellsForMatchingBasal = self . basalConnections . mapSegmentsToCells ( matchingBasalSegments ) cellsForMatchingApical = self . apicalConnections . mapSegmentsToCells ( matchingApicalSegments ) matchingCells = np . intersect1d ( cellsForMatchingBasal , cellsForMatchingApical ) ( matchingCellsInBurstingColumns , burstingColumnsWithNoMatch ) = np2 . setCompare ( matchingCells , burstingColumns , matchingCells / self . cellsPerColumn , rightMinusLeft = True ) ( learningMatchingBasalSegments , learningMatchingApicalSegments ) = self . _chooseBestSegmentPairPerColumn ( matchingCellsInBurstingColumns , matchingBasalSegments , matchingApicalSegments , basalPotentialOverlaps , apicalPotentialOverlaps ) newSegmentCells = self . _getCellsWithFewestSegments ( burstingColumnsWithNoMatch ) if self . basalPredictedSegmentDecrement > 0.0 : correctMatchingBasalMask = np . in1d ( cellsForMatchingBasal / self . cellsPerColumn , activeColumns ) basalSegmentsToPunish = matchingBasalSegments [ ~ correctMatchingBasalMask ] else : basalSegmentsToPunish = ( ) if self . apicalPredictedSegmentDecrement > 0.0 : correctMatchingApicalMask = np . in1d ( cellsForMatchingApical / self . cellsPerColumn , activeColumns ) apicalSegmentsToPunish = matchingApicalSegments [ ~ correctMatchingApicalMask ] else : apicalSegmentsToPunish = ( ) learningCells = np . concatenate ( ( correctPredictedCells , self . basalConnections . mapSegmentsToCells ( learningMatchingBasalSegments ) , newSegmentCells ) ) return ( learningActiveBasalSegments , learningActiveApicalSegments , learningMatchingBasalSegments , learningMatchingApicalSegments , basalSegmentsToPunish , apicalSegmentsToPunish , newSegmentCells , learningCells )
Learning occurs on pairs of segments . Correctly predicted cells always have active basal and apical segments and we learn on these segments . In bursting columns we either learn on an existing segment pair or we grow a new pair of segments .
9,829
def _learnOnNewSegments ( connections , rng , newSegmentCells , growthCandidates , initialPermanence , sampleSize , maxSynapsesPerSegment ) : numNewSynapses = len ( growthCandidates ) if sampleSize != - 1 : numNewSynapses = min ( numNewSynapses , sampleSize ) if maxSynapsesPerSegment != - 1 : numNewSynapses = min ( numNewSynapses , maxSynapsesPerSegment ) newSegments = connections . createSegments ( newSegmentCells ) connections . growSynapsesToSample ( newSegments , growthCandidates , numNewSynapses , initialPermanence , rng )
Create new segments and grow synapses on them .
9,830
def _chooseBestSegmentPairPerColumn ( self , matchingCellsInBurstingColumns , matchingBasalSegments , matchingApicalSegments , basalPotentialOverlaps , apicalPotentialOverlaps ) : basalCandidateSegments = self . basalConnections . filterSegmentsByCell ( matchingBasalSegments , matchingCellsInBurstingColumns ) apicalCandidateSegments = self . apicalConnections . filterSegmentsByCell ( matchingApicalSegments , matchingCellsInBurstingColumns ) self . basalConnections . sortSegmentsByCell ( basalCandidateSegments ) self . apicalConnections . sortSegmentsByCell ( apicalCandidateSegments ) oneBasalPerCellFilter = np2 . argmaxMulti ( basalPotentialOverlaps [ basalCandidateSegments ] , self . basalConnections . mapSegmentsToCells ( basalCandidateSegments ) , assumeSorted = True ) basalCandidateSegments = basalCandidateSegments [ oneBasalPerCellFilter ] oneApicalPerCellFilter = np2 . argmaxMulti ( apicalPotentialOverlaps [ apicalCandidateSegments ] , self . apicalConnections . mapSegmentsToCells ( apicalCandidateSegments ) , assumeSorted = True ) apicalCandidateSegments = apicalCandidateSegments [ oneApicalPerCellFilter ] cellScores = ( basalPotentialOverlaps [ basalCandidateSegments ] + apicalPotentialOverlaps [ apicalCandidateSegments ] ) columnsForCandidates = ( self . basalConnections . mapSegmentsToCells ( basalCandidateSegments ) / self . cellsPerColumn ) onePerColumnFilter = np2 . argmaxMulti ( cellScores , columnsForCandidates , assumeSorted = True ) learningBasalSegments = basalCandidateSegments [ onePerColumnFilter ] learningApicalSegments = apicalCandidateSegments [ onePerColumnFilter ] return ( learningBasalSegments , learningApicalSegments )
Choose the best pair of matching segments - one basal and one apical - for each column . Pairs are ranked by the sum of their potential overlaps . When there s a tie the first pair wins .
9,831
def compute ( self , activeColumns , basalInput , apicalInput = ( ) , basalGrowthCandidates = None , apicalGrowthCandidates = None , learn = True ) : activeColumns = np . asarray ( activeColumns ) basalInput = np . asarray ( basalInput ) apicalInput = np . asarray ( apicalInput ) if basalGrowthCandidates is None : basalGrowthCandidates = basalInput basalGrowthCandidates = np . asarray ( basalGrowthCandidates ) if apicalGrowthCandidates is None : apicalGrowthCandidates = apicalInput apicalGrowthCandidates = np . asarray ( apicalGrowthCandidates ) self . depolarizeCells ( basalInput , apicalInput , learn ) self . activateCells ( activeColumns , basalInput , apicalInput , basalGrowthCandidates , apicalGrowthCandidates , learn )
Perform one timestep . Use the basal and apical input to form a set of predictions then activate the specified columns then learn .
9,832
def compute ( self , activeColumns , apicalInput = ( ) , apicalGrowthCandidates = None , learn = True ) : activeColumns = np . asarray ( activeColumns ) apicalInput = np . asarray ( apicalInput ) if apicalGrowthCandidates is None : apicalGrowthCandidates = apicalInput apicalGrowthCandidates = np . asarray ( apicalGrowthCandidates ) self . prevPredictedCells = self . predictedCells self . activateCells ( activeColumns , self . activeCells , self . prevApicalInput , self . winnerCells , self . prevApicalGrowthCandidates , learn ) self . depolarizeCells ( self . activeCells , apicalInput , learn ) self . prevApicalInput = apicalInput . copy ( ) self . prevApicalGrowthCandidates = apicalGrowthCandidates . copy ( )
Perform one timestep . Activate the specified columns using the predictions from the previous timestep then learn . Then form a new set of predictions using the new active cells and the apicalInput .
9,833
def createLocationEncoder ( t , w = 15 ) : encoder = CoordinateEncoder ( name = "positionEncoder" , n = t . l6CellCount , w = w ) return encoder
A default coordinate encoder for encoding locations into sparse distributed representations .
9,834
def getUnionLocations ( encoder , x , y , r , step = 1 ) : output = np . zeros ( encoder . getWidth ( ) , dtype = defaultDtype ) locations = set ( ) for dx in range ( - r , r + 1 , step ) : for dy in range ( - r , r + 1 , step ) : if dx * dx + dy * dy <= r * r : e = encodeLocation ( encoder , x + dx , y + dy , output ) locations = locations . union ( set ( e ) ) return locations
Return a union of location encodings that correspond to the union of all locations within the specified circle .
9,835
def _calculateBasalLearning ( self , activeColumns , burstingColumns , correctPredictedCells , activeBasalSegments , matchingBasalSegments , basalPotentialOverlaps ) : learningActiveBasalSegments = self . basalConnections . filterSegmentsByCell ( activeBasalSegments , correctPredictedCells ) cellsForMatchingBasal = self . basalConnections . mapSegmentsToCells ( matchingBasalSegments ) matchingCells = np . unique ( cellsForMatchingBasal ) ( matchingCellsInBurstingColumns , burstingColumnsWithNoMatch ) = np2 . setCompare ( matchingCells , burstingColumns , matchingCells / self . cellsPerColumn , rightMinusLeft = True ) learningMatchingBasalSegments = self . _chooseBestSegmentPerColumn ( self . basalConnections , matchingCellsInBurstingColumns , matchingBasalSegments , basalPotentialOverlaps , self . cellsPerColumn ) newBasalSegmentCells = self . _getCellsWithFewestSegments ( self . basalConnections , self . rng , burstingColumnsWithNoMatch , self . cellsPerColumn ) learningCells = np . concatenate ( ( correctPredictedCells , self . basalConnections . mapSegmentsToCells ( learningMatchingBasalSegments ) , newBasalSegmentCells ) ) correctMatchingBasalMask = np . in1d ( cellsForMatchingBasal / self . cellsPerColumn , activeColumns ) basalSegmentsToPunish = matchingBasalSegments [ ~ correctMatchingBasalMask ] return ( learningActiveBasalSegments , learningMatchingBasalSegments , basalSegmentsToPunish , newBasalSegmentCells , learningCells )
Basic Temporal Memory learning . Correctly predicted cells always have active basal segments and we learn on these segments . In bursting columns we either learn on an existing basal segment or we grow a new one .
9,836
def _calculateApicalLearning ( self , learningCells , activeColumns , activeApicalSegments , matchingApicalSegments , apicalPotentialOverlaps ) : learningActiveApicalSegments = self . apicalConnections . filterSegmentsByCell ( activeApicalSegments , learningCells ) learningCellsWithoutActiveApical = np . setdiff1d ( learningCells , self . apicalConnections . mapSegmentsToCells ( learningActiveApicalSegments ) ) cellsForMatchingApical = self . apicalConnections . mapSegmentsToCells ( matchingApicalSegments ) learningCellsWithMatchingApical = np . intersect1d ( learningCellsWithoutActiveApical , cellsForMatchingApical ) learningMatchingApicalSegments = self . _chooseBestSegmentPerCell ( self . apicalConnections , learningCellsWithMatchingApical , matchingApicalSegments , apicalPotentialOverlaps ) newApicalSegmentCells = np . setdiff1d ( learningCellsWithoutActiveApical , learningCellsWithMatchingApical ) correctMatchingApicalMask = np . in1d ( cellsForMatchingApical / self . cellsPerColumn , activeColumns ) apicalSegmentsToPunish = matchingApicalSegments [ ~ correctMatchingApicalMask ] return ( learningActiveApicalSegments , learningMatchingApicalSegments , apicalSegmentsToPunish , newApicalSegmentCells )
Calculate apical learning for each learning cell .
9,837
def _calculateApicalSegmentActivity ( connections , activeInput , connectedPermanence , activationThreshold , minThreshold ) : overlaps = connections . computeActivity ( activeInput , connectedPermanence ) activeSegments = np . flatnonzero ( overlaps >= activationThreshold ) potentialOverlaps = connections . computeActivity ( activeInput ) matchingSegments = np . flatnonzero ( potentialOverlaps >= minThreshold ) return ( activeSegments , matchingSegments , potentialOverlaps )
Calculate the active and matching apical segments for this timestep .
9,838
def _calculatePredictedCells ( self , activeBasalSegments , activeApicalSegments ) : cellsForBasalSegments = self . basalConnections . mapSegmentsToCells ( activeBasalSegments ) cellsForApicalSegments = self . apicalConnections . mapSegmentsToCells ( activeApicalSegments ) fullyDepolarizedCells = np . intersect1d ( cellsForBasalSegments , cellsForApicalSegments ) partlyDepolarizedCells = np . setdiff1d ( cellsForBasalSegments , fullyDepolarizedCells ) inhibitedMask = np . in1d ( partlyDepolarizedCells / self . cellsPerColumn , fullyDepolarizedCells / self . cellsPerColumn ) predictedCells = np . append ( fullyDepolarizedCells , partlyDepolarizedCells [ ~ inhibitedMask ] ) if self . useApicalTiebreak == False : predictedCells = cellsForBasalSegments return predictedCells
Calculate the predicted cells given the set of active segments .
9,839
def _chooseBestSegmentPerCell ( cls , connections , cells , allMatchingSegments , potentialOverlaps ) : candidateSegments = connections . filterSegmentsByCell ( allMatchingSegments , cells ) onePerCellFilter = np2 . argmaxMulti ( potentialOverlaps [ candidateSegments ] , connections . mapSegmentsToCells ( candidateSegments ) ) learningSegments = candidateSegments [ onePerCellFilter ] return learningSegments
For each specified cell choose its matching segment with largest number of active potential synapses . When there s a tie the first segment wins .
9,840
def _chooseBestSegmentPerColumn ( cls , connections , matchingCells , allMatchingSegments , potentialOverlaps , cellsPerColumn ) : candidateSegments = connections . filterSegmentsByCell ( allMatchingSegments , matchingCells ) cellScores = potentialOverlaps [ candidateSegments ] columnsForCandidates = ( connections . mapSegmentsToCells ( candidateSegments ) / cellsPerColumn ) onePerColumnFilter = np2 . argmaxMulti ( cellScores , columnsForCandidates ) learningSegments = candidateSegments [ onePerColumnFilter ] return learningSegments
For all the columns covered by matchingCells choose the column s matching segment with largest number of active potential synapses . When there s a tie the first segment wins .
9,841
def infer ( self , sensationList , reset = True , objectName = None ) : self . _unsetLearningMode ( ) statistics = collections . defaultdict ( list ) for sensations in sensationList : for col in xrange ( self . numColumns ) : location , feature = sensations [ col ] self . sensorInputs [ col ] . addDataToQueue ( list ( feature ) , 0 , 0 ) self . externalInputs [ col ] . addDataToQueue ( list ( location ) , 0 , 0 ) self . network . run ( 1 ) self . _updateInferenceStats ( statistics , objectName ) if reset : self . _sendReset ( ) statistics [ "numSteps" ] = len ( sensationList ) statistics [ "object" ] = objectName if objectName is not None else "Unknown" self . statistics . append ( statistics )
Infer on given sensations .
9,842
def _saveL2Representation ( self , objectName ) : self . objectL2Representations [ objectName ] = self . getL2Representations ( ) try : objectIndex = self . objectNameToIndex [ objectName ] except KeyError : if self . objectNamesAreIndices : objectIndex = objectName if objectIndex >= self . objectL2RepresentationsMatrices [ 0 ] . nRows ( ) : for matrix in self . objectL2RepresentationsMatrices : matrix . resize ( objectIndex + 1 , matrix . nCols ( ) ) else : objectIndex = self . objectL2RepresentationsMatrices [ 0 ] . nRows ( ) for matrix in self . objectL2RepresentationsMatrices : matrix . resize ( matrix . nRows ( ) + 1 , matrix . nCols ( ) ) self . objectNameToIndex [ objectName ] = objectIndex for colIdx , matrix in enumerate ( self . objectL2RepresentationsMatrices ) : activeCells = self . L2Columns [ colIdx ] . _pooler . getActiveCells ( ) matrix . setRowFromSparse ( objectIndex , activeCells , np . ones ( len ( activeCells ) , dtype = "float32" ) )
Record the current active L2 cells as the representation for objectName .
9,843
def plotInferenceStats ( self , fields , plotDir = "plots" , experimentID = 0 , onePlot = True ) : if not os . path . exists ( plotDir ) : os . makedirs ( plotDir ) plt . figure ( ) stats = self . statistics [ experimentID ] objectName = stats [ "object" ] for i in xrange ( self . numColumns ) : if not onePlot : plt . figure ( ) for field in fields : fieldKey = field + " C" + str ( i ) plt . plot ( stats [ fieldKey ] , marker = '+' , label = fieldKey ) plt . legend ( loc = "upper right" ) plt . xlabel ( "Sensation #" ) plt . xticks ( range ( stats [ "numSteps" ] ) ) plt . ylabel ( "Number of active bits" ) plt . ylim ( plt . ylim ( ) [ 0 ] - 5 , plt . ylim ( ) [ 1 ] + 5 ) plt . title ( "Object inference for object {}" . format ( objectName ) ) if not onePlot : relPath = "{}_exp_{}_C{}.png" . format ( self . name , experimentID , i ) path = os . path . join ( plotDir , relPath ) plt . savefig ( path ) plt . close ( ) if onePlot : relPath = "{}_exp_{}.png" . format ( self . name , experimentID ) path = os . path . join ( plotDir , relPath ) plt . savefig ( path ) plt . close ( )
Plots and saves the desired inference statistics .
9,844
def averageConvergencePoint ( self , prefix , minOverlap , maxOverlap , settlingTime = 1 , firstStat = 0 , lastStat = None ) : convergenceSum = 0.0 numCorrect = 0.0 inferenceLength = 1000000 for stats in self . statistics [ firstStat : lastStat ] : convergencePoint = 0.0 for key in stats . iterkeys ( ) : if prefix in key : inferenceLength = len ( stats [ key ] ) columnConvergence = L4L2Experiment . _locateConvergencePoint ( stats [ key ] , minOverlap , maxOverlap ) convergencePoint = max ( convergencePoint , columnConvergence ) convergenceSum += ceil ( float ( convergencePoint ) / settlingTime ) if ceil ( float ( convergencePoint ) / settlingTime ) <= inferenceLength : numCorrect += 1 if len ( self . statistics [ firstStat : lastStat ] ) == 0 : return 10000.0 , 0.0 return ( convergenceSum / len ( self . statistics [ firstStat : lastStat ] ) , numCorrect / len ( self . statistics [ firstStat : lastStat ] ) )
For each object compute the convergence time - the first point when all L2 columns have converged .
9,845
def getAlgorithmInstance ( self , layer = "L2" , column = 0 ) : assert ( ( column >= 0 ) and ( column < self . numColumns ) ) , ( "Column number not " "in valid range" ) if layer == "L2" : return self . L2Columns [ column ] . getAlgorithmInstance ( ) elif layer == "L4" : return self . L4Columns [ column ] . getAlgorithmInstance ( ) else : raise Exception ( "Invalid layer. Must be 'L4' or 'L2'" )
Returns an instance of the underlying algorithm . For example layer = L2 and column = 1 could return the actual instance of ColumnPooler that is responsible for column 1 .
9,846
def getCurrentObjectOverlaps ( self ) : overlaps = np . zeros ( ( self . numColumns , len ( self . objectL2Representations ) ) , dtype = "uint32" ) for i , representations in enumerate ( self . objectL2RepresentationsMatrices ) : activeCells = self . L2Columns [ i ] . _pooler . getActiveCells ( ) overlaps [ i , : ] = representations . rightVecSumAtNZSparse ( activeCells ) return overlaps
Get every L2 s current overlap with each L2 object representation that has been learned .
9,847
def isObjectClassified ( self , objectName , minOverlap = None , maxL2Size = None ) : L2Representation = self . getL2Representations ( ) objectRepresentation = self . objectL2Representations [ objectName ] sdrSize = self . config [ "L2Params" ] [ "sdrSize" ] if minOverlap is None : minOverlap = sdrSize / 2 if maxL2Size is None : maxL2Size = 1.5 * sdrSize numCorrectClassifications = 0 for col in xrange ( self . numColumns ) : overlapWithObject = len ( objectRepresentation [ col ] & L2Representation [ col ] ) if ( overlapWithObject >= minOverlap and len ( L2Representation [ col ] ) <= maxL2Size ) : numCorrectClassifications += 1 return numCorrectClassifications == self . numColumns
Return True if objectName is currently unambiguously classified by every L2 column . Classification is correct and unambiguous if the current L2 overlap with the true object is greater than minOverlap and if the size of the L2 representation is no more than maxL2Size
9,848
def getDefaultL4Params ( self , inputSize , numInputBits ) : sampleSize = int ( 1.5 * numInputBits ) if numInputBits == 20 : activationThreshold = 13 minThreshold = 13 elif numInputBits == 10 : activationThreshold = 8 minThreshold = 8 else : activationThreshold = int ( numInputBits * .6 ) minThreshold = activationThreshold return { "columnCount" : inputSize , "cellsPerColumn" : 16 , "learn" : True , "initialPermanence" : 0.51 , "connectedPermanence" : 0.6 , "permanenceIncrement" : 0.1 , "permanenceDecrement" : 0.02 , "minThreshold" : minThreshold , "basalPredictedSegmentDecrement" : 0.0 , "apicalPredictedSegmentDecrement" : 0.0 , "activationThreshold" : activationThreshold , "reducedBasalThreshold" : int ( activationThreshold * 0.6 ) , "sampleSize" : sampleSize , "implementation" : "ApicalTiebreak" , "seed" : self . seed }
Returns a good default set of parameters to use in the L4 region .
9,849
def getDefaultL2Params ( self , inputSize , numInputBits ) : if numInputBits == 20 : sampleSizeProximal = 10 minThresholdProximal = 5 elif numInputBits == 10 : sampleSizeProximal = 6 minThresholdProximal = 3 else : sampleSizeProximal = int ( numInputBits * .6 ) minThresholdProximal = int ( sampleSizeProximal * .6 ) return { "inputWidth" : inputSize * 16 , "cellCount" : 4096 , "sdrSize" : 40 , "synPermProximalInc" : 0.1 , "synPermProximalDec" : 0.001 , "initialProximalPermanence" : 0.6 , "minThresholdProximal" : minThresholdProximal , "sampleSizeProximal" : sampleSizeProximal , "connectedPermanenceProximal" : 0.5 , "synPermDistalInc" : 0.1 , "synPermDistalDec" : 0.001 , "initialDistalPermanence" : 0.41 , "activationThresholdDistal" : 13 , "sampleSizeDistal" : 20 , "connectedPermanenceDistal" : 0.5 , "seed" : self . seed , "learningMode" : True , }
Returns a good default set of parameters to use in the L2 region .
9,850
def generateFeatures ( numFeatures ) : candidates = ( [ chr ( i + 65 ) for i in xrange ( 26 ) ] + [ chr ( i + 97 ) for i in xrange ( 26 ) ] + [ chr ( i + 48 ) for i in xrange ( 10 ) ] ) if numFeatures > len ( candidates ) : candidates = [ "F{}" . format ( i ) for i in xrange ( numFeatures ) ] return candidates return candidates [ : numFeatures ]
Return string features .
9,851
def addMonitor ( self , monitor ) : token = self . nextMonitorToken self . nextMonitorToken += 1 self . monitors [ token ] = monitor return token
Subscribe to SingleLayer2DExperiment events .
9,852
def doTimestep ( self , locationSDR , transitionSDR , featureSDR , egocentricLocation , learn ) : for monitor in self . monitors . values ( ) : monitor . beforeTimestep ( locationSDR , transitionSDR , featureSDR , egocentricLocation , learn ) params = { "newLocation" : locationSDR , "deltaLocation" : transitionSDR , "featureLocationInput" : self . inputLayer . getActiveCells ( ) , "featureLocationGrowthCandidates" : self . inputLayer . getPredictedActiveCells ( ) , "learn" : learn , } self . locationLayer . compute ( ** params ) for monitor in self . monitors . values ( ) : monitor . afterLocationCompute ( ** params ) params = { "activeColumns" : featureSDR , "basalInput" : self . locationLayer . getActiveCells ( ) , "apicalInput" : self . objectLayer . getActiveCells ( ) , } self . inputLayer . compute ( ** params ) for monitor in self . monitors . values ( ) : monitor . afterInputCompute ( ** params ) params = { "feedforwardInput" : self . inputLayer . getActiveCells ( ) , "feedforwardGrowthCandidates" : self . inputLayer . getPredictedActiveCells ( ) , "learn" : learn , } self . objectLayer . compute ( ** params ) for monitor in self . monitors . values ( ) : monitor . afterObjectCompute ( ** params )
Run one timestep .
9,853
def learnTransitions ( self ) : print "Learning transitions" for ( i , j ) , locationSDR in self . locations . iteritems ( ) : print "i, j" , ( i , j ) for ( di , dj ) , transitionSDR in self . transitions . iteritems ( ) : i2 = i + di j2 = j + dj if ( 0 <= i2 < self . diameter and 0 <= j2 < self . diameter ) : for _ in xrange ( 5 ) : self . locationLayer . reset ( ) self . locationLayer . compute ( newLocation = self . locations [ ( i , j ) ] ) self . locationLayer . compute ( deltaLocation = transitionSDR , newLocation = self . locations [ ( i2 , j2 ) ] ) self . locationLayer . reset ( )
Train the location layer to do path integration . For every location teach it each previous - location + motor command pair .
9,854
def learnObjects ( self , objectPlacements ) : for monitor in self . monitors . values ( ) : monitor . afterPlaceObjects ( objectPlacements ) for objectName , objectDict in self . objects . iteritems ( ) : self . reset ( ) objectPlacement = objectPlacements [ objectName ] for locationName , featureName in objectDict . iteritems ( ) : egocentricLocation = ( locationName [ 0 ] + objectPlacement [ 0 ] , locationName [ 1 ] + objectPlacement [ 1 ] ) locationSDR = self . locations [ egocentricLocation ] featureSDR = self . features [ featureName ] transitionSDR = np . empty ( 0 ) self . locationLayer . reset ( ) self . inputLayer . reset ( ) for _ in xrange ( 10 ) : self . doTimestep ( locationSDR , transitionSDR , featureSDR , egocentricLocation , learn = True ) self . inputRepresentations [ ( featureName , egocentricLocation ) ] = ( self . inputLayer . getActiveCells ( ) ) self . objectRepresentations [ objectName ] = self . objectLayer . getActiveCells ( ) self . learnedObjectPlacements [ objectName ] = objectPlacement
Learn each provided object in egocentric space . Touch every location on each object .
9,855
def _selectTransition ( self , allocentricLocation , objectDict , visitCounts ) : candidates = list ( transition for transition in self . transitions . keys ( ) if ( allocentricLocation [ 0 ] + transition [ 0 ] , allocentricLocation [ 1 ] + transition [ 1 ] ) in objectDict ) random . shuffle ( candidates ) selectedVisitCount = None selectedTransition = None selectedAllocentricLocation = None for transition in candidates : candidateLocation = ( allocentricLocation [ 0 ] + transition [ 0 ] , allocentricLocation [ 1 ] + transition [ 1 ] ) if ( selectedVisitCount is None or visitCounts [ candidateLocation ] < selectedVisitCount ) : selectedVisitCount = visitCounts [ candidateLocation ] selectedTransition = transition selectedAllocentricLocation = candidateLocation return selectedAllocentricLocation , selectedTransition
Choose the transition that lands us in the location we ve touched the least often . Break ties randomly i . e . choose the first candidate in a shuffled list .
9,856
def reset ( self ) : self . _poolingActivation = numpy . zeros ( ( self . _numColumns ) , dtype = "int32" ) self . _poolingColumns = [ ] self . _overlapDutyCycles = numpy . zeros ( self . _numColumns , dtype = realDType ) self . _activeDutyCycles = numpy . zeros ( self . _numColumns , dtype = realDType ) self . _minOverlapDutyCycles = numpy . zeros ( self . _numColumns , dtype = realDType ) self . _minActiveDutyCycles = numpy . zeros ( self . _numColumns , dtype = realDType ) self . _boostFactors = numpy . ones ( self . _numColumns , dtype = realDType )
Reset the state of the temporal pooler
9,857
def compute ( self , inputVector , learn , activeArray , burstingColumns , predictedCells ) : assert ( numpy . size ( inputVector ) == self . _numInputs ) assert ( numpy . size ( predictedCells ) == self . _numInputs ) self . _updateBookeepingVars ( learn ) inputVector = numpy . array ( inputVector , dtype = realDType ) predictedCells = numpy . array ( predictedCells , dtype = realDType ) inputVector . reshape ( - 1 ) if self . _spVerbosity > 3 : print " Input bits: " , inputVector . nonzero ( ) [ 0 ] print " predictedCells: " , predictedCells . nonzero ( ) [ 0 ] if self . usePoolingRule : overlapsPooling = self . _calculatePoolingActivity ( predictedCells , learn ) if self . _spVerbosity > 4 : print "usePoolingRule: Overlaps after step 1:" print " " , overlapsPooling else : overlapsPooling = 0 overlapsAllInput = self . _calculateOverlap ( inputVector ) overlapsPredicted = self . _calculateOverlap ( predictedCells ) if self . _spVerbosity > 4 : print "Overlaps with all inputs:" print " Number of On Bits: " , inputVector . sum ( ) print " " , overlapsAllInput print "Overlaps with predicted inputs:" print " " , overlapsPredicted if self . useBurstingRule : overlapsBursting = self . _calculateBurstingColumns ( burstingColumns ) if self . _spVerbosity > 4 : print "Overlaps with bursting inputs:" print " " , overlapsBursting else : overlapsBursting = 0 overlaps = ( overlapsPooling + overlapsPredicted + overlapsAllInput + overlapsBursting ) if learn : boostedOverlaps = self . _boostFactors * overlaps if self . _spVerbosity > 4 : print "Overlaps after boosting:" print " " , boostedOverlaps else : boostedOverlaps = overlaps activeColumns = self . _inhibitColumns ( boostedOverlaps ) if learn : self . _adaptSynapses ( inputVector , activeColumns , predictedCells ) self . _updateDutyCycles ( overlaps , activeColumns ) self . _bumpUpWeakColumns ( ) self . _updateBoostFactors ( ) if self . _isUpdateRound ( ) : self . _updateInhibitionRadius ( ) self . _updateMinDutyCycles ( ) activeArray . fill ( 0 ) if activeColumns . size > 0 : activeArray [ activeColumns ] = 1 activeColumnIndices = numpy . where ( overlapsPredicted [ activeColumns ] > 0 ) [ 0 ] activeColWithPredictedInput = activeColumns [ activeColumnIndices ] numUnPredictedInput = float ( len ( burstingColumns . nonzero ( ) [ 0 ] ) ) numPredictedInput = float ( len ( predictedCells ) ) fracUnPredicted = numUnPredictedInput / ( numUnPredictedInput + numPredictedInput ) self . _updatePoolingState ( activeColWithPredictedInput , fracUnPredicted ) if self . _spVerbosity > 2 : activeColumns . sort ( ) print "The following columns are finally active:" print " " , activeColumns print "The following columns are in pooling state:" print " " , self . _poolingActivation . nonzero ( ) [ 0 ] return activeColumns
This is the primary public method of the class . This function takes an input vector and outputs the indices of the active columns .
9,858
def printParameters ( self ) : print "------------PY TemporalPooler Parameters ------------------" print "numInputs = " , self . getNumInputs ( ) print "numColumns = " , self . getNumColumns ( ) print "columnDimensions = " , self . _columnDimensions print "numActiveColumnsPerInhArea = " , self . getNumActiveColumnsPerInhArea ( ) print "potentialPct = " , self . getPotentialPct ( ) print "globalInhibition = " , self . getGlobalInhibition ( ) print "localAreaDensity = " , self . getLocalAreaDensity ( ) print "stimulusThreshold = " , self . getStimulusThreshold ( ) print "synPermActiveInc = " , self . getSynPermActiveInc ( ) print "synPermInactiveDec = " , self . getSynPermInactiveDec ( ) print "synPermConnected = " , self . getSynPermConnected ( ) print "minPctOverlapDutyCycle = " , self . getMinPctOverlapDutyCycles ( ) print "dutyCyclePeriod = " , self . getDutyCyclePeriod ( ) print "boostStrength = " , self . getBoostStrength ( ) print "spVerbosity = " , self . getSpVerbosity ( ) print "version = " , self . _version
Useful for debugging .
9,859
def train ( self , inputData , numIterations , reset = False ) : if not isinstance ( inputData , np . ndarray ) : inputData = np . array ( inputData ) if reset : self . _reset ( ) for _ in xrange ( numIterations ) : self . _iteration += 1 batch = self . _getDataBatch ( inputData ) if batch . shape [ 0 ] != self . filterDim : raise ValueError ( "Batches and filter dimesions don't match!" ) activations = self . encode ( batch ) self . _learn ( batch , activations ) if self . _iteration % self . decayCycle == 0 : self . learningRate *= self . learningRateDecay if self . verbosity >= 1 : self . plotLoss ( ) self . plotBasis ( )
Trains the SparseNet with the provided data .
9,860
def encode ( self , data , flatten = False ) : if not isinstance ( data , np . ndarray ) : data = np . array ( data ) if flatten : try : data = np . reshape ( data , ( self . filterDim , data . shape [ - 1 ] ) ) except ValueError : data = np . reshape ( data , ( self . filterDim , 1 ) ) if data . shape [ 0 ] != self . filterDim : raise ValueError ( "Data does not have the correct dimension!" ) if len ( data . shape ) == 1 : data = data [ : , np . newaxis ] projection = self . basis . T . dot ( data ) representation = self . basis . T . dot ( self . basis ) - np . eye ( self . outputDim ) states = np . zeros ( ( self . outputDim , data . shape [ 1 ] ) ) threshold = 0.5 * np . max ( np . abs ( projection ) , axis = 0 ) activations = self . _thresholdNonLinearity ( states , threshold ) for _ in xrange ( self . numLcaIterations ) : states *= ( 1 - self . lcaLearningRate ) states += self . lcaLearningRate * ( projection - representation . dot ( activations ) ) activations = self . _thresholdNonLinearity ( states , threshold ) threshold *= self . thresholdDecay threshold [ threshold < self . minThreshold ] = self . minThreshold return activations
Encodes the provided input data returning a sparse vector of activations .
9,861
def plotLoss ( self , filename = None ) : plt . figure ( ) plt . plot ( self . losses . keys ( ) , self . losses . values ( ) ) plt . xlabel ( "Iteration" ) plt . ylabel ( "Loss" ) plt . title ( "Learning curve for {}" . format ( self ) ) if filename is not None : plt . savefig ( filename )
Plots the loss history .
9,862
def plotBasis ( self , filename = None ) : if np . floor ( np . sqrt ( self . filterDim ) ) ** 2 != self . filterDim : print "Basis visualization is not available if filterDim is not a square." return dim = int ( np . sqrt ( self . filterDim ) ) if np . floor ( np . sqrt ( self . outputDim ) ) ** 2 != self . outputDim : outDimJ = np . sqrt ( np . floor ( self . outputDim / 2 ) ) outDimI = np . floor ( self . outputDim / outDimJ ) if outDimI > outDimJ : outDimI , outDimJ = outDimJ , outDimI else : outDimI = np . floor ( np . sqrt ( self . outputDim ) ) outDimJ = outDimI outDimI , outDimJ = int ( outDimI ) , int ( outDimJ ) basis = - np . ones ( ( 1 + outDimI * ( dim + 1 ) , 1 + outDimJ * ( dim + 1 ) ) ) k = 0 for i in xrange ( outDimI ) : for j in xrange ( outDimJ ) : colorLimit = np . max ( np . abs ( self . basis [ : , k ] ) ) mat = np . reshape ( self . basis [ : , k ] , ( dim , dim ) ) / colorLimit basis [ 1 + i * ( dim + 1 ) : 1 + i * ( dim + 1 ) + dim , 1 + j * ( dim + 1 ) : 1 + j * ( dim + 1 ) + dim ] = mat k += 1 plt . figure ( ) plt . subplot ( aspect = "equal" ) plt . pcolormesh ( basis ) plt . axis ( [ 0 , 1 + outDimJ * ( dim + 1 ) , 0 , 1 + outDimI * ( dim + 1 ) ] ) plt . gca ( ) . xaxis . set_major_locator ( plt . NullLocator ( ) ) plt . gca ( ) . yaxis . set_major_locator ( plt . NullLocator ( ) ) plt . title ( "Basis functions for {0}" . format ( self ) ) if filename is not None : plt . savefig ( filename )
Plots the basis functions reshaped in 2 - dimensional arrays .
9,863
def _reset ( self ) : self . basis = np . random . randn ( self . filterDim , self . outputDim ) self . basis /= np . sqrt ( np . sum ( self . basis ** 2 , axis = 0 ) ) self . _iteration = 0 self . losses = { }
Reinitializes basis functions iteration number and loss history .
9,864
def read ( cls , proto ) : sparsenet = object . __new__ ( cls ) sparsenet . filterDim = proto . filterDim sparsenet . outputDim = proto . outputDim sparsenet . batchSize = proto . batchSize lossHistoryProto = proto . losses sparsenet . losses = { } for i in xrange ( len ( lossHistoryProto ) ) : sparsenet . losses [ lossHistoryProto [ i ] . iteration ] = lossHistoryProto [ i ] . loss sparsenet . _iteration = proto . iteration sparsenet . basis = np . reshape ( proto . basis , newshape = ( sparsenet . filterDim , sparsenet . outputDim ) ) sparsenet . learningRate = proto . learningRate sparsenet . decayCycle = proto . decayCycle sparsenet . learningRateDecay = proto . learningRateDecay sparsenet . numLcaIterations = proto . numLcaIterations sparsenet . lcaLearningRate = proto . lcaLearningRate sparsenet . thresholdDecay = proto . thresholdDecay sparsenet . minThreshold = proto . minThreshold sparsenet . thresholdType = proto . thresholdType sparsenet . verbosity = proto . verbosity sparsenet . showEvery = proto . showEvery sparsenet . seed = int ( proto . seed ) if sparsenet . seed is not None : np . random . seed ( sparsenet . seed ) random . seed ( sparsenet . seed ) return sparsenet
Reads deserialized data from proto object
9,865
def write ( self , proto ) : proto . filterDim = self . filterDim proto . outputDim = self . outputDim proto . batchSize = self . batchSize lossHistoryProto = proto . init ( "losses" , len ( self . losses ) ) i = 0 for iteration , loss in self . losses . iteritems ( ) : iterationLossHistoryProto = lossHistoryProto [ i ] iterationLossHistoryProto . iteration = iteration iterationLossHistoryProto . loss = float ( loss ) i += 1 proto . iteration = self . _iteration proto . basis = list ( self . basis . flatten ( ) . astype ( type ( 'float' , ( float , ) , { } ) ) ) proto . learningRate = self . learningRate proto . decayCycle = self . decayCycle proto . learningRateDecay = self . learningRateDecay proto . numLcaIterations = self . numLcaIterations proto . lcaLearningRate = self . lcaLearningRate proto . thresholdDecay = self . thresholdDecay proto . minThreshold = self . minThreshold proto . thresholdType = self . thresholdType proto . verbosity = self . verbosity proto . showEvery = self . showEvery proto . seed = self . seed
Writes serialized data to proto object
9,866
def reset ( self ) : self . _poolingActivation = numpy . zeros ( self . getNumColumns ( ) , dtype = REAL_DTYPE ) self . _unionSDR = numpy . array ( [ ] , dtype = UINT_DTYPE ) self . _poolingTimer = numpy . ones ( self . getNumColumns ( ) , dtype = REAL_DTYPE ) * 1000 self . _poolingActivationInitLevel = numpy . zeros ( self . getNumColumns ( ) , dtype = REAL_DTYPE ) self . _preActiveInput = numpy . zeros ( self . getNumInputs ( ) , dtype = REAL_DTYPE ) self . _prePredictedActiveInput = numpy . zeros ( ( self . getNumInputs ( ) , self . _historyLength ) , dtype = REAL_DTYPE ) self . setOverlapDutyCycles ( numpy . zeros ( self . getNumColumns ( ) , dtype = REAL_DTYPE ) ) self . setActiveDutyCycles ( numpy . zeros ( self . getNumColumns ( ) , dtype = REAL_DTYPE ) ) self . setMinOverlapDutyCycles ( numpy . zeros ( self . getNumColumns ( ) , dtype = REAL_DTYPE ) ) self . setBoostFactors ( numpy . ones ( self . getNumColumns ( ) , dtype = REAL_DTYPE ) )
Reset the state of the Union Temporal Pooler .
9,867
def compute ( self , activeInput , predictedActiveInput , learn ) : assert numpy . size ( activeInput ) == self . getNumInputs ( ) assert numpy . size ( predictedActiveInput ) == self . getNumInputs ( ) self . _updateBookeepingVars ( learn ) overlapsActive = self . _calculateOverlap ( activeInput ) overlapsPredictedActive = self . _calculateOverlap ( predictedActiveInput ) totalOverlap = ( overlapsActive * self . _activeOverlapWeight + overlapsPredictedActive * self . _predictedActiveOverlapWeight ) . astype ( REAL_DTYPE ) if learn : boostFactors = numpy . zeros ( self . getNumColumns ( ) , dtype = REAL_DTYPE ) self . getBoostFactors ( boostFactors ) boostedOverlaps = boostFactors * totalOverlap else : boostedOverlaps = totalOverlap activeCells = self . _inhibitColumns ( boostedOverlaps ) self . _activeCells = activeCells self . _decayPoolingActivation ( ) self . _addToPoolingActivation ( activeCells , overlapsPredictedActive ) self . _getMostActiveCells ( ) if learn : self . _adaptSynapses ( predictedActiveInput , activeCells , self . getSynPermActiveInc ( ) , self . getSynPermInactiveDec ( ) ) self . _adaptSynapses ( predictedActiveInput , self . _unionSDR , self . _synPermPredActiveInc , 0.0 ) for i in xrange ( self . _historyLength ) : self . _adaptSynapses ( self . _prePredictedActiveInput [ : , i ] , activeCells , self . _synPermPreviousPredActiveInc , 0.0 ) self . _updateDutyCycles ( totalOverlap . astype ( UINT_DTYPE ) , activeCells ) self . _bumpUpWeakColumns ( ) self . _updateBoostFactors ( ) if self . _isUpdateRound ( ) : self . _updateInhibitionRadius ( ) self . _updateMinDutyCycles ( ) self . _preActiveInput = copy . copy ( activeInput ) self . _prePredictedActiveInput = numpy . roll ( self . _prePredictedActiveInput , 1 , 1 ) if self . _historyLength > 0 : self . _prePredictedActiveInput [ : , 0 ] = predictedActiveInput return self . _unionSDR
Computes one cycle of the Union Temporal Pooler algorithm .
9,868
def _decayPoolingActivation ( self ) : if self . _decayFunctionType == 'NoDecay' : self . _poolingActivation = self . _decayFunction . decay ( self . _poolingActivation ) elif self . _decayFunctionType == 'Exponential' : self . _poolingActivation = self . _decayFunction . decay ( self . _poolingActivationInitLevel , self . _poolingTimer ) return self . _poolingActivation
Decrements pooling activation of all cells
9,869
def _addToPoolingActivation ( self , activeCells , overlaps ) : self . _poolingActivation [ activeCells ] = self . _exciteFunction . excite ( self . _poolingActivation [ activeCells ] , overlaps [ activeCells ] ) self . _poolingTimer [ self . _poolingTimer >= 0 ] += 1 self . _poolingTimer [ activeCells ] = 0 self . _poolingActivationInitLevel [ activeCells ] = self . _poolingActivation [ activeCells ] return self . _poolingActivation
Adds overlaps from specified active cells to cells pooling activation .
9,870
def _getMostActiveCells ( self ) : poolingActivation = self . _poolingActivation nonZeroCells = numpy . argwhere ( poolingActivation > 0 ) [ : , 0 ] poolingActivationSubset = poolingActivation [ nonZeroCells ] + self . _poolingActivation_tieBreaker [ nonZeroCells ] potentialUnionSDR = nonZeroCells [ numpy . argsort ( poolingActivationSubset ) [ : : - 1 ] ] topCells = potentialUnionSDR [ 0 : self . _maxUnionCells ] if max ( self . _poolingTimer ) > self . _minHistory : self . _unionSDR = numpy . sort ( topCells ) . astype ( UINT_DTYPE ) else : self . _unionSDR = [ ] return self . _unionSDR
Gets the most active cells in the Union SDR having at least non - zero activation in sorted order .
9,871
def createNetwork ( networkConfig ) : registerAllResearchRegions ( ) network = Network ( ) if networkConfig [ "networkType" ] == "L4L2Column" : return createL4L2Column ( network , networkConfig , "_0" ) elif networkConfig [ "networkType" ] == "MultipleL4L2Columns" : return createMultipleL4L2Columns ( network , networkConfig ) elif networkConfig [ "networkType" ] == "MultipleL4L2ColumnsWithTopology" : return createMultipleL4L2ColumnsWithTopology ( network , networkConfig ) elif networkConfig [ "networkType" ] == "L2456Columns" : return createL2456Columns ( network , networkConfig ) elif networkConfig [ "networkType" ] == "L4L2TMColumn" : return createL4L2TMColumn ( network , networkConfig , "_0" ) elif networkConfig [ "networkType" ] == "CombinedSequenceColumn" : return createCombinedSequenceColumn ( network , networkConfig , "_0" )
Create and initialize the specified network instance .
9,872
def printNetwork ( network ) : print "The network has" , len ( network . regions . values ( ) ) , "regions" for p in range ( network . getMaxPhase ( ) ) : print "=== Phase" , p for region in network . regions . values ( ) : if network . getPhases ( region . name ) [ 0 ] == p : print " " , region . name
Given a network print out regions sorted by phase
9,873
def fit_params_to_1d_data ( logX ) : m_max = logX . shape [ 0 ] p_max = logX . shape [ 1 ] params = np . zeros ( ( m_max , p_max , 3 ) ) for m_ in range ( m_max ) : for p_ in range ( p_max ) : params [ m_ , p_ ] = skewnorm . fit ( logX [ m_ , p_ ] ) return params
Fit skewed normal distributions to 1 - D capactity data and return the distribution parameters .
9,874
def get_interpolated_params ( m_frac , ph , params ) : slope , offset = np . polyfit ( np . arange ( 1 , 4 ) , params [ : 3 , ph , 0 ] , deg = 1 ) a = slope * m_frac + offset slope , offset = np . polyfit ( np . arange ( 1 , 4 ) , params [ : 3 , ph , 1 ] , deg = 1 ) loc = slope * m_frac + offset slope , offset = np . polyfit ( np . arange ( 1 , 4 ) , params [ : 3 , ph , 2 ] , deg = 1 ) scale = slope * m_frac + offset return ( a , loc , scale )
Get parameters describing a 1 - D capactity distribution for fractional number of modules .
9,875
def rerunExperimentFromLogfile ( logFilename ) : callLog = LoggingDecorator . load ( logFilename ) exp = L246aNetwork ( * callLog [ 0 ] [ 1 ] [ "args" ] , ** callLog [ 0 ] [ 1 ] [ "kwargs" ] ) for call in callLog [ 1 : ] : method = getattr ( exp , call [ 0 ] ) method ( * call [ 1 ] [ "args" ] , ** call [ 1 ] [ "kwargs" ] ) return exp
Create an experiment class according to the sequence of operations in logFile and return resulting experiment instance . The log file is created by setting the logCalls constructor parameter to True
9,876
def learn ( self , objects ) : self . setLearning ( True ) for objectName , sensationList in objects . iteritems ( ) : self . sendReset ( ) print "Learning :" , objectName prevLoc = [ None ] * self . numColumns numFeatures = len ( sensationList [ 0 ] ) displacement = [ 0 ] * self . dimensions for sensation in xrange ( numFeatures ) : for col in xrange ( self . numColumns ) : location = np . array ( sensationList [ col ] [ sensation ] [ 0 ] ) feature = sensationList [ col ] [ sensation ] [ 1 ] if prevLoc [ col ] is not None : displacement = location - prevLoc [ col ] prevLoc [ col ] = location for _ in xrange ( self . repeat ) : self . motorInput [ col ] . addDataToQueue ( displacement ) self . sensorInput [ col ] . addDataToQueue ( feature , False , 0 ) displacement = [ 0 ] * self . dimensions self . network . run ( self . repeat * numFeatures ) self . learnedObjects [ objectName ] = self . getL2Representations ( )
Learns all provided objects
9,877
def getL2Representations ( self ) : return [ set ( L2 . getSelf ( ) . _pooler . getActiveCells ( ) ) for L2 in self . L2Regions ]
Returns the active representation in L2 .
9,878
def excite ( self , currentActivation , inputs ) : currentActivation += self . _minValue + ( self . _maxValue - self . _minValue ) / ( 1 + numpy . exp ( - self . _steepness * ( inputs - self . _xMidpoint ) ) ) return currentActivation
Increases current activation by amount .
9,879
def plot ( self ) : plt . ion ( ) plt . show ( ) x = numpy . linspace ( 0 , 15 , 100 ) y = numpy . zeros ( x . shape ) y = self . excite ( y , x ) plt . plot ( x , y ) plt . xlabel ( 'Input' ) plt . ylabel ( 'Persistence' ) plt . title ( 'Sigmoid Activation Function' )
plot the activation function
9,880
def computeAccuracyEnding ( predictions , truths , iterations , resets = None , randoms = None , num = None , sequenceCounter = None ) : accuracy = [ ] numIteration = [ ] numSequences = [ ] for i in xrange ( len ( predictions ) - 1 ) : if num is not None and i > num : continue if truths [ i ] is None : continue if resets is not None or randoms is not None : if not ( resets [ i + 1 ] or randoms [ i + 1 ] ) : continue correct = truths [ i ] is None or truths [ i ] in predictions [ i ] accuracy . append ( correct ) numSequences . append ( sequenceCounter [ i ] ) numIteration . append ( iterations [ i ] ) return ( accuracy , numIteration , numSequences )
Compute accuracy on the sequence ending
9,881
def createValidationDataSampler ( dataset , ratio ) : indices = np . random . permutation ( len ( dataset ) ) training_count = int ( len ( indices ) * ratio ) train = torch . utils . data . SubsetRandomSampler ( indices = indices [ : training_count ] ) validate = torch . utils . data . SubsetRandomSampler ( indices = indices [ training_count : ] ) return ( train , validate )
Create torch . utils . data . Sampler s used to split the dataset into 2 ramdom sampled subsets . The first should used for training and the second for validation .
9,882
def register_nonzero_counter ( network , stats ) : if hasattr ( network , "__counter_nonzero__" ) : raise ValueError ( "nonzero counter was already registered for this network" ) if not isinstance ( stats , dict ) : raise ValueError ( "stats must be a dictionary" ) network . __counter_nonzero__ = stats handles = [ ] for name , module in network . named_modules ( ) : handles . append ( module . register_forward_hook ( _nonzero_counter_hook ) ) if network != module : if hasattr ( module , "__counter_nonzero__" ) : raise ValueError ( "nonzero counter was already registered for this module" ) child_data = dict ( ) network . __counter_nonzero__ [ name ] = child_data module . __counter_nonzero__ = child_data network . __counter_nonzero_handles__ = handles
Register forward hooks to count the number of nonzero floating points values from all the tensors used by the given network during inference .
9,883
def initialize ( self , params , repetition ) : super ( TinyCIFARExperiment , self ) . initialize ( params , repetition ) self . network_type = params . get ( "network_type" , "sparse" )
Initialize experiment parameters and default values from configuration file . Called at the beginning of each experiment and each repetition .
9,884
def logger ( self , iteration , ret ) : print ( "Learning rate: {:f}" . format ( self . lr_scheduler . get_lr ( ) [ 0 ] ) ) entropies = getEntropies ( self . model ) print ( "Entropy and max entropy: " , float ( entropies [ 0 ] ) , entropies [ 1 ] ) print ( "Training time for epoch=" , self . epoch_train_time ) for noise in self . noise_values : print ( "Noise= {:3.2f}, loss = {:5.4f}, Accuracy = {:5.3f}%" . format ( noise , ret [ noise ] [ "loss" ] , 100.0 * ret [ noise ] [ "accuracy" ] ) ) print ( "Full epoch time =" , self . epoch_time ) if ret [ 0.0 ] [ "accuracy" ] > 0.7 : self . best_noise_score = max ( ret [ 0.1 ] [ "accuracy" ] , self . best_noise_score ) self . best_epoch = iteration
Print out relevant information at each epoch
9,885
def plotAccuracyAndMCsDuringDecrementChange ( results , title = "" , yaxis = "" ) : decrementRange = [ ] mcRange = [ ] for r in results : if r [ "basalPredictedSegmentDecrement" ] not in decrementRange : decrementRange . append ( r [ "basalPredictedSegmentDecrement" ] ) if r [ "inputSize" ] not in mcRange : mcRange . append ( r [ "inputSize" ] ) decrementRange . sort ( ) mcRange . sort ( ) print decrementRange print mcRange accuracy = numpy . zeros ( ( len ( mcRange ) , len ( decrementRange ) ) ) TMAccuracy = numpy . zeros ( ( len ( mcRange ) , len ( decrementRange ) ) ) totals = numpy . zeros ( ( len ( mcRange ) , len ( decrementRange ) ) ) for r in results : dec = r [ "basalPredictedSegmentDecrement" ] nf = r [ "inputSize" ] accuracy [ mcRange . index ( nf ) , decrementRange . index ( dec ) ] += r [ "objectAccuracyL2" ] TMAccuracy [ mcRange . index ( nf ) , decrementRange . index ( dec ) ] += r [ "sequenceCorrectClassificationsTM" ] totals [ mcRange . index ( nf ) , decrementRange . index ( dec ) ] += 1 for i , f in enumerate ( mcRange ) : print i , f , accuracy [ i ] / totals [ i ] print i , f , TMAccuracy [ i ] / totals [ i ] print i , f , totals [ i ] print
Plot accuracy vs decrement value
9,886
def gen4 ( dirName ) : try : resultsFig4A = os . path . join ( dirName , "pure_sequences_example.pkl" ) with open ( resultsFig4A , "rb" ) as f : results = cPickle . load ( f ) for trialNum , stat in enumerate ( results [ "statistics" ] ) : plotOneInferenceRun ( stat , itemType = "a single sequence" , fields = [ ( "L4 PredictedActive" , "Predicted active cells in sensorimotor layer" ) , ( "TM NextPredicted" , "Predicted cells in temporal sequence layer" ) , ( "TM PredictedActive" , "Predicted active cells in temporal sequence layer" ) , ] , basename = "pure_sequences" , trialNumber = trialNum , plotDir = os . path . join ( os . path . dirname ( os . path . realpath ( __file__ ) ) , "detailed_plots" ) ) print "Plots for Fig 4A generated in 'detailed_plots'" except Exception , e : print "\nCould not generate plots for Fig 4A: " traceback . print_exc ( ) print try : plotAccuracyDuringSequenceInference ( dirName , title = "Relative performance of layers while inferring temporal sequences" , yaxis = "Accuracy (%)" ) print "Plots for Fig 4B generated in 'plots'" except Exception , e : print "\nCould not generate plots for Fig 4B: " traceback . print_exc ( ) print try : plotAccuracyVsSequencesDuringSequenceInference ( dirName , title = "Relative performance of layers while inferring temporal sequences" , yaxis = "Accuracy (%)" ) print "Plots for Fig 4C generated in 'plots'" except Exception , e : print "\nCould not generate plots for Fig 4C: " traceback . print_exc ( ) print
Plots 4A and 4B
9,887
def compute ( self , inputs , outputs ) : if len ( self . queue ) > 0 : data = self . queue . pop ( ) else : raise Exception ( "RawSensor: No data to encode: queue is empty " ) outputs [ "resetOut" ] [ 0 ] = data [ "reset" ] outputs [ "sequenceIdOut" ] [ 0 ] = data [ "sequenceId" ] outputs [ "dataOut" ] [ : ] = 0 outputs [ "dataOut" ] [ data [ "nonZeros" ] ] = 1 if self . verbosity > 1 : print "RawSensor outputs:" print "sequenceIdOut: " , outputs [ "sequenceIdOut" ] print "resetOut: " , outputs [ "resetOut" ] print "dataOut: " , outputs [ "dataOut" ] . nonzero ( ) [ 0 ]
Get the next record from the queue and encode it . The fields for inputs and outputs are as defined in the spec above .
9,888
def convertSequenceMachineSequence ( generatedSequences ) : sequenceList = [ ] currentSequence = [ ] for s in generatedSequences : if s is None : sequenceList . append ( currentSequence ) currentSequence = [ ] else : currentSequence . append ( s ) return sequenceList
Convert a sequence from the SequenceMachine into a list of sequences such that each sequence is a list of set of SDRs .
9,889
def generateSequences ( n = 2048 , w = 40 , sequenceLength = 5 , sequenceCount = 2 , sharedRange = None , seed = 42 ) : patternAlphabetSize = 10 * ( sequenceLength * sequenceCount ) patternMachine = PatternMachine ( n , w , patternAlphabetSize , seed ) sequenceMachine = SequenceMachine ( patternMachine , seed ) numbers = sequenceMachine . generateNumbers ( sequenceCount , sequenceLength , sharedRange = sharedRange ) generatedSequences = sequenceMachine . generateFromNumbers ( numbers ) return sequenceMachine , generatedSequences , numbers
Generate high order sequences using SequenceMachine
9,890
def runInference ( exp , sequences , enableFeedback = True ) : if enableFeedback : print "Feedback enabled: " else : print "Feedback disabled: " error = 0 activityTraces = [ ] responses = [ ] for i , sequence in enumerate ( sequences ) : ( avgActiveCells , avgPredictedActiveCells , activityTrace , responsesThisSeq ) = exp . infer ( sequence , sequenceNumber = i , enableFeedback = enableFeedback ) error += avgActiveCells activityTraces . append ( activityTrace ) responses . append ( responsesThisSeq ) print " " error /= len ( sequences ) print "Average error = " , error return error , activityTraces , responses
Run inference on this set of sequences and compute error
9,891
def runStretch ( noiseLevel = None , profile = False ) : exp = L4L2Experiment ( "stretch_L10_F10_C2" , numCorticalColumns = 2 , ) objects = createObjectMachine ( machineType = "simple" , numInputBits = 20 , sensorInputSize = 1024 , externalInputSize = 1024 , numCorticalColumns = 2 , ) objects . createRandomObjects ( 10 , 10 , numLocations = 10 , numFeatures = 10 ) print "Objects are:" for object , pairs in objects . objects . iteritems ( ) : print str ( object ) + ": " + str ( pairs ) exp . learnObjects ( objects . provideObjectsToLearn ( ) ) if profile : exp . printProfile ( reset = True ) objectCopy1 = [ pair for pair in objects [ 0 ] ] objectCopy2 = [ pair for pair in objects [ 0 ] ] objectCopy3 = [ pair for pair in objects [ 0 ] ] random . shuffle ( objectCopy1 ) random . shuffle ( objectCopy2 ) random . shuffle ( objectCopy3 ) objectSensations1 = [ ] for pair in objectCopy1 : for _ in xrange ( 4 ) : objectSensations1 . append ( pair ) objectSensations2 = [ ] for pair in objectCopy2 : for _ in xrange ( 4 ) : objectSensations2 . append ( pair ) objectSensations3 = [ ] for pair in objectCopy3 : for _ in xrange ( 4 ) : objectSensations3 . append ( pair ) inferConfig = { "numSteps" : len ( objectSensations1 ) , "noiseLevel" : noiseLevel , "pairs" : { 0 : objectSensations1 , 1 : objectSensations2 , } } exp . infer ( objects . provideObjectToInfer ( inferConfig ) , objectName = 0 ) if profile : exp . printProfile ( ) exp . plotInferenceStats ( fields = [ "L2 Representation" , "Overlap L2 with object" , "L4 Representation" ] , onePlot = False , )
Stretch test that learns a lot of objects .
9,892
def trainModel ( model , loader , optimizer , device , criterion = F . nll_loss , batches_in_epoch = sys . maxsize , batch_callback = None , progress_bar = None ) : model . train ( ) if progress_bar is not None : loader = tqdm ( loader , ** progress_bar ) if batches_in_epoch < len ( loader ) : loader . total = batches_in_epoch for batch_idx , ( data , target ) in enumerate ( loader ) : data , target = data . to ( device ) , target . to ( device ) optimizer . zero_grad ( ) output = model ( data ) loss = criterion ( output , target ) loss . backward ( ) optimizer . step ( ) if batch_callback is not None : batch_callback ( model = model , batch_idx = batch_idx ) if batch_idx >= batches_in_epoch : break if progress_bar is not None : loader . n = loader . total loader . close ( )
Train the given model by iterating through mini batches . An epoch ends after one pass through the training set or if the number of mini batches exceeds the parameter batches_in_epoch .
9,893
def evaluateModel ( model , loader , device , batches_in_epoch = sys . maxsize , criterion = F . nll_loss , progress = None ) : model . eval ( ) loss = 0 correct = 0 dataset_len = len ( loader . sampler ) if progress is not None : loader = tqdm ( loader , ** progress ) with torch . no_grad ( ) : for batch_idx , ( data , target ) in enumerate ( loader ) : data , target = data . to ( device ) , target . to ( device ) output = model ( data ) loss += criterion ( output , target , reduction = 'sum' ) . item ( ) pred = output . max ( 1 , keepdim = True ) [ 1 ] correct += pred . eq ( target . view_as ( pred ) ) . sum ( ) . item ( ) if batch_idx >= batches_in_epoch : break if progress is not None : loader . close ( ) loss /= dataset_len accuracy = correct / dataset_len return { "total_correct" : correct , "loss" : loss , "accuracy" : accuracy }
Evaluate pre - trained model using given test dataset loader .
9,894
def run_false_positive_experiment_dim ( numActive = 128 , dim = 500 , numSamples = 1000 , numDendrites = 500 , synapses = 24 , numTrials = 10000 , seed = 42 , nonlinearity = sigmoid_nonlinearity ( 11.5 , 5 ) ) : numpy . random . seed ( seed ) fps = [ ] fns = [ ] totalUnclassified = 0 for trial in range ( numTrials ) : negData = generate_evenly_distributed_data_sparse ( dim = dim , num_active = numActive , num_samples = numSamples / 2 ) posData = generate_evenly_distributed_data_sparse ( dim = dim , num_active = numActive , num_samples = numSamples / 2 ) halfLabels = numpy . asarray ( [ 1 for _ in range ( numSamples / 2 ) ] ) flippedHalfLabels = halfLabels * - 1 neuron = Neuron ( size = synapses * numDendrites , num_dendrites = numDendrites , dendrite_length = synapses , dim = dim , nonlinearity = nonlinearity ) neg_neuron = Neuron ( size = synapses * numDendrites , num_dendrites = numDendrites , dendrite_length = synapses , dim = dim , nonlinearity = nonlinearity ) neuron . HTM_style_initialize_on_positive_data ( posData ) neg_neuron . HTM_style_initialize_on_positive_data ( negData ) fp , fn , uc = get_error ( posData , halfLabels , [ neuron ] , [ neg_neuron ] ) totalUnclassified += uc fps . append ( fp ) fns . append ( fn ) fp , fn , uc = get_error ( negData , flippedHalfLabels , [ neuron ] , [ neg_neuron ] ) totalUnclassified += uc fps . append ( fp ) fns . append ( fn ) print "Error with n = {} : {} FP, {} FN, {} unclassified" . format ( dim , sum ( fps ) , sum ( fns ) , totalUnclassified ) result = { "dim" : dim , "totalFP" : sum ( fps ) , "totalFN" : sum ( fns ) , "total mistakes" : sum ( fns + fps ) + totalUnclassified , "error" : float ( sum ( fns + fps ) + totalUnclassified ) / ( numTrials * numSamples ) , "totalSamples" : numTrials * numSamples , "a" : numActive , "num_dendrites" : numDendrites , "totalUnclassified" : totalUnclassified , "synapses" : 24 , "seed" : seed , } return result
Run an experiment to test the false positive rate based on number of synapses per dendrite dimension and sparsity . Uses two competing neurons along the P&M model .
9,895
def _getRadius ( self , location ) : return int ( math . sqrt ( sum ( [ coord ** 2 for coord in location ] ) ) )
Returns the radius associated with the given location .
9,896
def _addNoise ( self , pattern , noiseLevel ) : if pattern is None : return None newBits = [ ] for bit in pattern : if random . random ( ) < noiseLevel : newBits . append ( random . randint ( 0 , max ( pattern ) ) ) else : newBits . append ( bit ) return set ( newBits )
Adds noise the given list of patterns and returns a list of noisy copies .
9,897
def apicalCheck ( self , apicalInput ) : ( activeApicalSegments , matchingApicalSegments , apicalPotentialOverlaps ) = self . _calculateSegmentActivity ( self . apicalConnections , apicalInput , self . connectedPermanence , self . activationThreshold , self . minThreshold , self . reducedBasalThreshold ) apicallySupportedCells = self . apicalConnections . mapSegmentsToCells ( activeApicalSegments ) predictedCells = np . intersect1d ( self . basalConnections . mapSegmentsToCells ( self . activeBasalSegments ) , apicallySupportedCells ) return predictedCells
Return recent apically predicted cells for each tick of apical timer - finds active apical segments corresponding to predicted basal segment
9,898
def setupData ( self , dataPath , numLabels = 0 , ordered = False , stripCats = False , seed = 42 , ** kwargs ) : self . split ( dataPath , numLabels , ** kwargs ) if not ordered : self . randomizeData ( seed ) filename , ext = os . path . splitext ( dataPath ) classificationFileName = "{}_category.json" . format ( filename ) dataFileName = "{}_network{}" . format ( filename , ext ) if stripCats : self . stripCategories ( ) self . saveData ( dataFileName , classificationFileName ) return dataFileName
Main method of this class . Use for setting up a network data file .
9,899
def _formatSequence ( tokens , categories , seqID , uniqueID ) : record = { "_category" : categories , "_sequenceId" : seqID } data = [ ] reset = 1 for t in tokens : tokenRecord = record . copy ( ) tokenRecord [ "_token" ] = t tokenRecord [ "_reset" ] = reset tokenRecord [ "ID" ] = uniqueID reset = 0 data . append ( tokenRecord ) return data
Write the sequence of data records for this sample .