idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
9,900
def saveData ( self , dataOutputFile , categoriesOutputFile ) : if self . records is None : return False if not dataOutputFile . endswith ( "csv" ) : raise TypeError ( "data output file must be csv." ) if not categoriesOutputFile . endswith ( "json" ) : raise TypeError ( "category output file must be json" ) dataOutputDirectory = os . path . dirname ( dataOutputFile ) if not os . path . exists ( dataOutputDirectory ) : os . makedirs ( dataOutputDirectory ) categoriesOutputDirectory = os . path . dirname ( categoriesOutputFile ) if not os . path . exists ( categoriesOutputDirectory ) : os . makedirs ( categoriesOutputDirectory ) with open ( dataOutputFile , "w" ) as f : writer = csv . DictWriter ( f , fieldnames = self . fieldNames ) writer . writeheader ( ) writer . writerow ( self . types ) writer . writerow ( self . specials ) for data in self . records : for record in data : writer . writerow ( record ) with open ( categoriesOutputFile , "w" ) as f : f . write ( json . dumps ( self . categoryToId , sort_keys = True , indent = 4 , separators = ( "," , ": " ) ) ) return dataOutputFile
Save the processed data and the associated category mapping .
9,901
def generateSequence ( self , text , preprocess = False ) : tokens = TextPreprocess ( ) . tokenize ( text ) cat = [ - 1 ] self . sequenceCount += 1 uniqueID = "q" data = self . _formatSequence ( tokens , cat , self . sequenceCount - 1 , uniqueID ) return data
Return a list of lists representing the text sequence in network data format . Does not preprocess the text .
9,902
def getSamples ( netDataFile ) : try : with open ( netDataFile ) as f : reader = csv . reader ( f ) header = next ( reader , None ) next ( reader , None ) resetIdx = next ( reader ) . index ( "R" ) tokenIdx = header . index ( "_token" ) catIdx = header . index ( "_category" ) idIdx = header . index ( "ID" ) currentSample = [ ] samples = OrderedDict ( ) for line in reader : if int ( line [ resetIdx ] ) == 1 : if len ( currentSample ) != 0 : samples [ line [ idIdx ] ] = ( [ " " . join ( currentSample ) ] , [ int ( c ) for c in line [ catIdx ] . split ( " " ) ] ) currentSample = [ line [ tokenIdx ] ] else : currentSample . append ( line [ tokenIdx ] ) samples [ line [ idIdx ] ] = ( [ " " . join ( currentSample ) ] , [ int ( c ) for c in line [ catIdx ] . split ( " " ) ] ) return samples except IOError as e : print "Could not open the file {}." . format ( netDataFile ) raise e
Returns samples joined at reset points .
9,903
def getClassifications ( networkDataFile ) : try : with open ( networkDataFile ) as f : reader = csv . reader ( f ) next ( reader , None ) next ( reader , None ) specials = next ( reader ) resetIdx = specials . index ( "R" ) classIdx = specials . index ( "C" ) classifications = [ ] for line in reader : if int ( line [ resetIdx ] ) == 1 : classifications . append ( line [ classIdx ] ) return classifications except IOError as e : print "Could not open the file {}." . format ( networkDataFile ) raise e
Returns the classifications at the indices where the data sequences reset .
9,904
def getNumberOfTokens ( networkDataFile ) : try : with open ( networkDataFile ) as f : reader = csv . reader ( f ) next ( reader , None ) next ( reader , None ) resetIdx = next ( reader ) . index ( "R" ) count = 0 numTokens = [ ] for line in reader : if int ( line [ resetIdx ] ) == 1 : if count != 0 : numTokens . append ( count ) count = 1 else : count += 1 numTokens . append ( count ) return numTokens except IOError as e : print "Could not open the file {}." . format ( networkDataFile ) raise e
Returns the number of tokens for each sequence
9,905
def getResetsIndices ( networkDataFile ) : try : with open ( networkDataFile ) as f : reader = csv . reader ( f ) next ( reader , None ) next ( reader , None ) resetIdx = next ( reader ) . index ( "R" ) resets = [ ] for i , line in enumerate ( reader ) : if int ( line [ resetIdx ] ) == 1 : resets . append ( i ) return resets except IOError as e : print "Could not open the file {}." . format ( networkDataFile ) raise e
Returns the indices at which the data sequences reset .
9,906
def lastNoiseCurve ( expPath , suite , iteration = "last" ) : noiseValues = [ "0.0" , "0.05" , "0.1" , "0.15" , "0.2" , "0.25" , "0.3" , "0.35" , "0.4" , "0.45" , "0.5" ] print ( "\nNOISE CURVE =====" , expPath , "====== ITERATION:" , iteration , "=========" ) try : result = suite . get_value ( expPath , 0 , noiseValues , iteration ) info = [ ] for k in noiseValues : info . append ( [ k , result [ k ] [ "testerror" ] ] ) print ( tabulate ( info , headers = [ "noise" , "Test Error" ] , tablefmt = "grid" ) ) print ( "totalCorrect:" , suite . get_value ( expPath , 0 , "totalCorrect" , iteration ) ) except : print ( "Couldn't load experiment" , expPath )
Print the noise errors from the last iteration of this experiment
9,907
def learningCurve ( expPath , suite ) : print ( "\nLEARNING CURVE ================" , expPath , "=====================" ) try : headers = [ "testResults" , "validation" , "bgResults" , "elapsedTime" ] result = suite . get_value ( expPath , 0 , headers , "all" ) info = [ ] maxValidationAccuracy = - 1.0 maxTestAccuracy = - 1.0 maxBGAccuracy = - 1.0 maxIter = - 1 for i , v in enumerate ( zip ( result [ "testResults" ] , result [ "validation" ] , result [ "bgResults" ] , result [ "elapsedTime" ] ) ) : info . append ( [ i , v [ 0 ] [ "testerror" ] , v [ 1 ] [ "testerror" ] , v [ 2 ] [ "testerror" ] , int ( v [ 3 ] ) ] ) if v [ 1 ] [ "testerror" ] > maxValidationAccuracy : maxValidationAccuracy = v [ 1 ] [ "testerror" ] maxTestAccuracy = v [ 0 ] [ "testerror" ] maxBGAccuracy = v [ 2 ] [ "testerror" ] maxIter = i headers . insert ( 0 , "iteration" ) print ( tabulate ( info , headers = headers , tablefmt = "grid" ) ) print ( "Max validation score =" , maxValidationAccuracy , " at iteration" , maxIter ) print ( "Test score at that iteration =" , maxTestAccuracy ) print ( "BG score at that iteration =" , maxBGAccuracy ) except : print ( "Couldn't load experiment" , expPath )
Print the test validation and other scores from each iteration of this experiment . We select the test score that corresponds to the iteration with maximum validation accuracy .
9,908
def bestScore ( expPath , suite ) : maxValidationAccuracy = - 1.0 maxTestAccuracy = - 1.0 maxTotalAccuracy = - 1.0 maxBGAccuracy = - 1.0 maxIter = - 1 try : headers = [ "testResults" , "validation" , "bgResults" , "elapsedTime" , "totalCorrect" ] result = suite . get_value ( expPath , 0 , headers , "all" ) for i , v in enumerate ( zip ( result [ "testResults" ] , result [ "validation" ] , result [ "bgResults" ] , result [ "elapsedTime" ] , result [ "totalCorrect" ] ) ) : if v [ 1 ] [ "testerror" ] > maxValidationAccuracy : maxValidationAccuracy = v [ 1 ] [ "testerror" ] maxTestAccuracy = v [ 0 ] [ "testerror" ] maxBGAccuracy = v [ 2 ] [ "testerror" ] if v [ 4 ] is not None : maxTotalAccuracy = v [ 4 ] maxIter = i return maxTestAccuracy , maxValidationAccuracy , maxBGAccuracy , maxIter , maxTotalAccuracy except : print ( "Couldn't load experiment" , expPath ) return None , None , None , None , None
Given a single experiment return the test validation and other scores from the iteration with maximum validation accuracy .
9,909
def findOptimalResults ( expName , suite , outFile ) : writer = csv . writer ( outFile ) headers = [ "testAccuracy" , "bgAccuracy" , "maxTotalAccuracy" , "experiment path" ] writer . writerow ( headers ) info = [ ] print ( "\n================" , expName , "=====================" ) try : values , params = suite . get_values_fix_params ( expName , 0 , "testerror" , "last" ) for p in params : expPath = p [ "name" ] if not "results" in expPath : expPath = os . path . join ( "results" , expPath ) maxTestAccuracy , maxValidationAccuracy , maxBGAccuracy , maxIter , maxTotalAccuracy = bestScore ( expPath , suite ) row = [ maxTestAccuracy , maxBGAccuracy , maxTotalAccuracy , expPath ] info . append ( row ) writer . writerow ( row ) print ( tabulate ( info , headers = headers , tablefmt = "grid" ) ) except : print ( "Couldn't analyze experiment" , expName )
Go through every experiment in the specified folder . For each experiment find the iteration with the best validation score and return the metrics associated with that iteration .
9,910
def getErrorBars ( expPath , suite ) : exps = suite . get_exps ( expPath ) testScores = np . zeros ( len ( exps ) ) noiseScores = np . zeros ( len ( exps ) ) for i , e in enumerate ( exps ) : maxTestAccuracy , maxValidationAccuracy , maxBGAccuracy , maxIter , maxTotalAccuracy = bestScore ( e , suite ) testScores [ i ] = maxTestAccuracy noiseScores [ i ] = maxTotalAccuracy print ( e , maxTestAccuracy , maxTotalAccuracy ) print ( "" ) print ( "Experiment:" , expPath , "Number of sub-experiments" , len ( exps ) ) print ( "test score mean and standard deviation:" , testScores . mean ( ) , testScores . std ( ) ) print ( "noise score mean and standard deviation:" , noiseScores . mean ( ) , noiseScores . std ( ) )
Go through each experiment in the path . Get the best scores for each experiment based on accuracy on validation set . Print out overall mean and stdev for test accuracy BG accuracy and noise accuracy .
9,911
def setCompare ( a , b , aKey = None , bKey = None , leftMinusRight = False , rightMinusLeft = False ) : aKey = aKey if aKey is not None else a bKey = bKey if bKey is not None else b aWithinBMask = np . in1d ( aKey , bKey ) if rightMinusLeft : bWithinAMask = np . in1d ( bKey , aKey ) if leftMinusRight : return ( a [ aWithinBMask ] , a [ ~ aWithinBMask ] , b [ bWithinAMask ] ) else : return ( a [ aWithinBMask ] , b [ ~ bWithinAMask ] ) elif leftMinusRight : return ( a [ aWithinBMask ] , a [ ~ aWithinBMask ] ) else : return a [ aWithinBMask ]
Compute the intersection and differences between two arrays comparing elements by their key .
9,912
def argmaxMulti ( a , groupKeys , assumeSorted = False ) : if not assumeSorted : sorter = np . argsort ( groupKeys , kind = "mergesort" ) a = a [ sorter ] groupKeys = groupKeys [ sorter ] _ , indices , lengths = np . unique ( groupKeys , return_index = True , return_counts = True ) maxValues = np . maximum . reduceat ( a , indices ) allMaxIndices = np . flatnonzero ( np . repeat ( maxValues , lengths ) == a ) indices = allMaxIndices [ np . searchsorted ( allMaxIndices , indices ) ] if assumeSorted : return indices else : return sorter [ indices ]
This is like numpy s argmax but it returns multiple maximums .
9,913
def getAllCellsInColumns ( columns , cellsPerColumn ) : return ( ( columns * cellsPerColumn ) . reshape ( ( - 1 , 1 ) ) + np . arange ( cellsPerColumn , dtype = "uint32" ) ) . flatten ( )
Calculate all cell indices in the specified columns .
9,914
def letterSequence ( letters , w = 40 ) : sequence = [ ] for letter in letters : i = ord ( letter ) - ord ( 'A' ) sequence . append ( set ( range ( i * w , ( i + 1 ) * w ) ) ) return sequence
Return a list of input vectors corresponding to sequence of letters . The vector for each letter has w contiguous bits ON and represented as a sequence of non - zero indices .
9,915
def getHighOrderSequenceChunk ( it , switchover = 1000 , w = 40 , n = 2048 ) : if it % 10 == 3 : s = numpy . random . randint ( 5 ) if it <= switchover : if s == 0 : label = "XABCDE" elif s == 1 : label = "YCBEAF" elif s == 2 : label = "GHIJKL" elif s == 3 : label = "WABCMN" else : label = "ZDBCAE" else : if s == 0 : label = "XCBEAF" elif s == 1 : label = "YABCDE" elif s == 2 : label = "GABCMN" elif s == 3 : label = "WHIJKL" else : label = "ZDHICF" vecs = letterSequence ( label ) else : vecs = [ getRandomVector ( w , n ) ] label = "." return vecs , label
Given an iteration index returns a list of vectors to be appended to the input stream as well as a string label identifying the sequence . This version generates a bunch of high order sequences . The first element always provides sufficient context to predict the rest of the elements .
9,916
def addNoise ( vecs , percent = 0.1 , n = 2048 ) : noisyVecs = [ ] for vec in vecs : nv = vec . copy ( ) for idx in vec : if numpy . random . random ( ) <= percent : nv . discard ( idx ) nv . add ( numpy . random . randint ( n ) ) noisyVecs . append ( nv ) return noisyVecs
Add noise to the given sequence of vectors and return the modified sequence . A percentage of the on bits are shuffled to other locations .
9,917
def killCells ( i , options , tm ) : if options . simulation == "killer" : if i == options . switchover : print "i=" , i , "Killing cells for the first time!" tm . killCells ( percent = options . noise ) if i == options . secondKill : print "i=" , i , "Killing cells again up to" , options . secondNoise tm . killCells ( percent = options . secondNoise ) elif options . simulation == "killingMeSoftly" and ( i % 100 == 0 ) : steps = ( options . secondKill - options . switchover ) / 100 nsteps = ( options . secondNoise - options . noise ) / steps noise = options . noise + nsteps * ( i - options . switchover ) / 100 if i in xrange ( options . switchover , options . secondKill + 1 ) : print "i=" , i , "Killing cells!" tm . killCells ( percent = noise )
Kill cells as appropriate
9,918
def printTemporalMemory ( tm , outFile ) : table = PrettyTable ( [ "Parameter name" , "Value" , ] ) table . add_row ( [ "columnDimensions" , tm . getColumnDimensions ( ) ] ) table . add_row ( [ "cellsPerColumn" , tm . getCellsPerColumn ( ) ] ) table . add_row ( [ "activationThreshold" , tm . getActivationThreshold ( ) ] ) table . add_row ( [ "minThreshold" , tm . getMinThreshold ( ) ] ) table . add_row ( [ "maxNewSynapseCount" , tm . getMaxNewSynapseCount ( ) ] ) table . add_row ( [ "permanenceIncrement" , tm . getPermanenceIncrement ( ) ] ) table . add_row ( [ "permanenceDecrement" , tm . getPermanenceDecrement ( ) ] ) table . add_row ( [ "initialPermanence" , tm . getInitialPermanence ( ) ] ) table . add_row ( [ "connectedPermanence" , tm . getConnectedPermanence ( ) ] ) table . add_row ( [ "predictedSegmentDecrement" , tm . getPredictedSegmentDecrement ( ) ] ) print >> outFile , table . get_string ( ) . encode ( "utf-8" )
Given an instance of TemporalMemory print out the relevant parameters
9,919
def printOptions ( options , tm , outFile ) : print >> outFile , "TM parameters:" printTemporalMemory ( tm , outFile ) print >> outFile , "Experiment parameters:" for k , v in options . __dict__ . iteritems ( ) : print >> outFile , " %s : %s" % ( k , str ( v ) ) outFile . flush ( )
Pretty print the set of options
9,920
def runBasic ( noiseLevel = None , profile = False ) : exp = L4L2Experiment ( "basic_continuous" , numCorticalColumns = 2 ) objects = createObjectMachine ( machineType = "continuous" , numInputBits = 21 , sensorInputSize = 1024 , externalInputSize = 1024 , numCorticalColumns = 2 , ) objects . addObject ( Sphere ( radius = 20 ) , name = "sphere" ) objects . addObject ( Cylinder ( height = 50 , radius = 20 ) , name = "cylinder" ) objects . addObject ( Box ( dimensions = [ 10 , 20 , 30 , ] ) , name = "box" ) objects . addObject ( Cube ( width = 20 ) , name = "cube" ) learnConfig = { "sphere" : [ ( "surface" , 10 ) ] , "box" : [ ( "face" , 5 ) , ( "edge" , 5 ) , ( "vertex" , 5 ) ] , "cube" : [ ( feature , 5 ) for feature in objects [ "cube" ] . getFeatures ( ) ] , "cylinder" : [ ( feature , 5 ) for feature in objects [ "cylinder" ] . getFeatures ( ) ] } exp . learnObjects ( objects . provideObjectsToLearn ( learnConfig , plot = True ) , reset = True ) if profile : exp . printProfile ( ) inferConfig = { "numSteps" : 4 , "noiseLevel" : noiseLevel , "objectName" : "cube" , "pairs" : { 0 : [ "face" , "face" , "edge" , "edge" ] , 1 : [ "edge" , "face" , "face" , "edge" ] } } exp . infer ( objects . provideObjectToInfer ( inferConfig , plot = True ) , objectName = "cube" , reset = True ) if profile : exp . printProfile ( ) exp . plotInferenceStats ( fields = [ "L2 Representation" , "Overlap L2 with object" , "L4 Representation" ] , )
Runs a basic experiment on continuous locations learning a few locations on four basic objects and inferring one of them .
9,921
def plotBoostTrace ( sp , inputVectors , columnIndex ) : numInputVector , inputSize = inputVectors . shape columnNumber = np . prod ( sp . getColumnDimensions ( ) ) boostFactorsTrace = np . zeros ( ( columnNumber , numInputVector ) ) activeDutyCycleTrace = np . zeros ( ( columnNumber , numInputVector ) ) minActiveDutyCycleTrace = np . zeros ( ( columnNumber , numInputVector ) ) for i in range ( numInputVector ) : outputColumns = np . zeros ( sp . getColumnDimensions ( ) , dtype = uintType ) inputVector = copy . deepcopy ( inputVectors [ i ] [ : ] ) sp . compute ( inputVector , True , outputColumns ) boostFactors = np . zeros ( ( columnNumber , ) , dtype = realDType ) sp . getBoostFactors ( boostFactors ) boostFactorsTrace [ : , i ] = boostFactors activeDutyCycle = np . zeros ( ( columnNumber , ) , dtype = realDType ) sp . getActiveDutyCycles ( activeDutyCycle ) activeDutyCycleTrace [ : , i ] = activeDutyCycle minActiveDutyCycle = np . zeros ( ( columnNumber , ) , dtype = realDType ) sp . getMinActiveDutyCycles ( minActiveDutyCycle ) minActiveDutyCycleTrace [ : , i ] = minActiveDutyCycle plt . figure ( ) plt . subplot ( 2 , 1 , 1 ) plt . plot ( boostFactorsTrace [ columnIndex , : ] ) plt . ylabel ( 'Boost Factor' ) plt . subplot ( 2 , 1 , 2 ) plt . plot ( activeDutyCycleTrace [ columnIndex , : ] ) plt . plot ( minActiveDutyCycleTrace [ columnIndex , : ] ) plt . xlabel ( ' Time ' ) plt . ylabel ( 'Active Duty Cycle' )
Plot boostfactor for a selected column
9,922
def next_epoch ( self ) : epoch = next ( self . _all_epochs ) folder = os . path . join ( self . _root , str ( epoch ) , self . _subset ) self . data = [ ] silence = None gc . disable ( ) for filename in os . listdir ( folder ) : command = os . path . splitext ( os . path . basename ( filename ) ) [ 0 ] with open ( os . path . join ( folder , filename ) , "r" ) as pkl_file : audio = pickle . load ( pkl_file ) if command == "silence" : silence = audio else : target = self . classes . index ( os . path . basename ( command ) ) self . data . extend ( itertools . product ( audio , [ target ] ) ) gc . enable ( ) target = self . classes . index ( "silence" ) self . data += [ ( silence , target ) ] * int ( len ( self . data ) * self . _silence_percentage ) return epoch
Load next epoch from disk
9,923
def isValid ( folder , epoch = 0 ) : return os . path . exists ( os . path . join ( folder , str ( epoch ) , "train" , "silence.pkl" ) )
Check if the given folder is a valid preprocessed dataset
9,924
def burstColumn ( self , column , columnMatchingSegments , prevActiveCells , prevWinnerCells , learn ) : start = self . cellsPerColumn * column cellsForColumn = [ cellIdx for cellIdx in xrange ( start , start + self . cellsPerColumn ) if cellIdx not in self . deadCells ] return self . _burstColumn ( self . connections , self . _random , self . lastUsedIterationForSegment , column , columnMatchingSegments , prevActiveCells , prevWinnerCells , cellsForColumn , self . numActivePotentialSynapsesForSegment , self . iteration , self . maxNewSynapseCount , self . initialPermanence , self . permanenceIncrement , self . permanenceDecrement , self . maxSegmentsPerCell , self . maxSynapsesPerSegment , learn )
Activates all of the cells in an unpredicted active column chooses a winner cell and if learning is turned on learns on one segment growing a new segment if necessary .
9,925
def printDeadCells ( self ) : columnCasualties = numpy . zeros ( self . numberOfColumns ( ) ) for cell in self . deadCells : col = self . columnForCell ( cell ) columnCasualties [ col ] += 1 for col in range ( self . numberOfColumns ( ) ) : print col , columnCasualties [ col ]
Print statistics for the dead cells
9,926
def reset ( self ) : self . _unionSDR = numpy . zeros ( shape = ( self . _numInputs , ) ) self . _activeCellsHistory = [ ]
Reset Union Pooler clear active cell history
9,927
def getSparsity ( self ) : sparsity = numpy . sum ( self . _unionSDR ) / self . _numInputs return sparsity
Return the sparsity of the current union SDR
9,928
def plotDataframe ( table , title , plotPath ) : plt . figure ( ) axes = table . T . plot ( subplots = True , sharex = True , grid = True , legend = True , title = title , figsize = ( 8 , 11 ) ) accuracy = next ( ax for ax in axes if ax . lines [ 0 ] . get_label ( ) == 'accuracy' ) accuracy . set_ylim ( 0.0 , 1.0 ) plt . savefig ( plotPath ) plt . close ( )
Plot Panda dataframe .
9,929
def getDatetimeAxis ( ) : dataSet = 'nyc_taxi' filePath = './data/' + dataSet + '.csv' data = pd . read_csv ( filePath , header = 0 , skiprows = [ 1 , 2 ] , names = [ 'datetime' , 'value' , 'timeofday' , 'dayofweek' ] ) xaxisDate = pd . to_datetime ( data [ 'datetime' ] ) return xaxisDate
use datetime as x - axis
9,930
def encodeDeltas ( self , dx , dy ) : dxe = self . dxEncoder . encode ( dx ) dye = self . dyEncoder . encode ( dy ) ex = numpy . outer ( dxe , dye ) return ex . flatten ( ) . nonzero ( ) [ 0 ]
Return the SDR for dx dy
9,931
def encodeThetas ( self , theta1 , theta2 ) : t1e = self . theta1Encoder . encode ( theta1 ) t2e = self . theta2Encoder . encode ( theta2 ) ex = numpy . outer ( t2e , t1e ) return ex . flatten ( ) . nonzero ( ) [ 0 ]
Return the SDR for theta1 and theta2
9,932
def decodeThetas ( self , predictedCells ) : a = numpy . zeros ( self . bottomUpInputSize ) a [ predictedCells ] = 1 a = a . reshape ( ( self . theta1Encoder . getWidth ( ) , self . theta1Encoder . getWidth ( ) ) ) theta1PredictedBits = a . mean ( axis = 0 ) . nonzero ( ) [ 0 ] theta2PredictedBits = a . mean ( axis = 1 ) . nonzero ( ) [ 0 ] t1 = numpy . zeros ( self . theta1Encoder . getWidth ( ) ) t1 [ theta1PredictedBits ] = 1 t1Prediction = self . theta1Encoder . topDownCompute ( t1 ) [ 0 ] . value t2 = numpy . zeros ( self . theta2Encoder . getWidth ( ) ) t2 [ theta2PredictedBits ] = 1 t2Prediction = self . theta2Encoder . topDownCompute ( t2 ) [ 0 ] . value return t1Prediction , t2Prediction
Given the set of predicted cells return the predicted theta1 and theta2
9,933
def inferTM ( self , bottomUp , externalInput ) : self . reset ( ) self . tm . compute ( bottomUp , basalInput = externalInput , learn = False ) return self . tm . getPredictiveCells ( )
Run inference and return the set of predicted cells
9,934
def classify ( self , encoding , num = 1 ) : probDist = numpy . exp ( encoding ) / numpy . sum ( numpy . exp ( encoding ) ) sortIdx = numpy . argsort ( probDist ) return sortIdx [ - num : ] . tolist ( )
Classify with basic one - hot local incoding
9,935
def _seed ( self , seed = - 1 ) : if seed != - 1 : self . _random = np . random . RandomState ( seed ) else : self . _random = np . random . RandomState ( )
Initialize the random seed
9,936
def initialize_weights ( self ) : n = self . _outputSize m = self . _inputSize self . _Q = self . _random . sample ( ( n , m ) ) for i in range ( n ) : self . _Q [ i ] /= np . sqrt ( np . dot ( self . _Q [ i ] , self . _Q [ i ] ) )
Randomly initializes the visible - to - hidden connections .
9,937
def _inhibitColumnsWithLateral ( self , overlaps , lateralConnections ) : n , m = self . shape y = np . zeros ( n ) s = self . sparsity L = lateralConnections desiredWeight = self . codeWeight inhSignal = np . zeros ( n ) sortedIndices = np . argsort ( overlaps , kind = 'mergesort' ) [ : : - 1 ] currentWeight = 0 for i in sortedIndices : if overlaps [ i ] < self . _stimulusThreshold : break inhTooStrong = ( inhSignal [ i ] >= s ) if not inhTooStrong : y [ i ] = 1. currentWeight += 1 inhSignal [ : ] += L [ i , : ] if self . enforceDesiredWeight and currentWeight == desiredWeight : break activeColumns = np . where ( y == 1.0 ) [ 0 ] return activeColumns
Performs an experimentatl local inhibition . Local inhibition is iteratively performed on a column by column basis .
9,938
def compute ( self , inputVector , learn , activeArray , applyLateralInhibition = True ) : if not isinstance ( inputVector , np . ndarray ) : raise TypeError ( "Input vector must be a numpy array, not %s" % str ( type ( inputVector ) ) ) if inputVector . size != self . _numInputs : raise ValueError ( "Input vector dimensions don't match. Expecting %s but got %s" % ( inputVector . size , self . _numInputs ) ) self . _updateBookeepingVars ( learn ) inputVector = np . array ( inputVector , dtype = realDType ) inputVector . reshape ( - 1 ) self . _overlaps = self . _calculateOverlap ( inputVector ) if learn : self . _boostedOverlaps = self . _boostFactors * self . _overlaps else : self . _boostedOverlaps = self . _overlaps if applyLateralInhibition == True : activeColumns = self . _inhibitColumnsWithLateral ( self . _boostedOverlaps , self . lateralConnections ) else : activeColumns = self . _inhibitColumns ( self . _boostedOverlaps ) activeArray . fill ( 0 ) activeArray [ activeColumns ] = 1.0 if learn : self . _adaptSynapses ( inputVector , activeColumns , self . _boostedOverlaps ) self . _updateDutyCycles ( self . _overlaps , activeColumns ) self . _bumpUpWeakColumns ( ) self . _updateBoostFactors ( ) self . _updateAvgActivityPairs ( activeArray ) epsilon = self . lateralLearningRate if epsilon > 0 : self . _updateLateralConnections ( epsilon , self . avgActivityPairs ) if self . _isUpdateRound ( ) : self . _updateInhibitionRadius ( ) self . _updateMinDutyCycles ( ) return activeArray
This is the primary public method of the LateralPooler class . This function takes a input vector and outputs the indices of the active columns . If learn is set to True this method also updates the permanences of the columns and their lateral inhibitory connection weights .
9,939
def feedforward ( self ) : m = self . _numInputs n = self . _numColumns W = np . zeros ( ( n , m ) ) for i in range ( self . _numColumns ) : self . getPermanence ( i , W [ i , : ] ) return W
Soon to be depriciated . Needed to make the SP implementation compatible with some older code .
9,940
def learn ( self ) : self . setLearning ( True ) for obj in self . objects : self . sendReset ( ) previousLocation = [ None ] * self . numColumns displacement = [ 0. , 0. ] features = obj [ "features" ] numOfFeatures = len ( features ) touchSequence = np . random . permutation ( numOfFeatures ) for sensation in xrange ( numOfFeatures ) : for col in xrange ( self . numColumns ) : colSequence = np . roll ( touchSequence , col ) feature = features [ colSequence [ sensation ] ] locationOnObject = np . array ( [ feature [ "top" ] + feature [ "height" ] / 2. , feature [ "left" ] + feature [ "width" ] / 2. ] ) if previousLocation [ col ] is not None : displacement = locationOnObject - previousLocation [ col ] previousLocation [ col ] = locationOnObject activeColumns = self . featureSDR [ col ] [ feature [ "name" ] ] for _ in xrange ( self . numLearningPoints ) : self . motorInput [ col ] . addDataToQueue ( displacement ) self . sensorInput [ col ] . addDataToQueue ( activeColumns , False , 0 ) displacement = [ 0 , 0 ] self . network . run ( numOfFeatures * self . numLearningPoints ) self . learnedObjects [ obj [ "name" ] ] = self . getL2Representations ( )
Learn all objects on every column . Each column will learn all the features of every object and store the the object s L2 representation to be later used in the inference stage
9,941
def createL2456Columns ( network , networkConfig ) : numCorticalColumns = networkConfig [ "numCorticalColumns" ] for i in xrange ( numCorticalColumns ) : networkConfigCopy = copy . deepcopy ( networkConfig ) randomSeedBase = networkConfigCopy [ "randomSeedBase" ] networkConfigCopy [ "L2Params" ] [ "seed" ] = randomSeedBase + i networkConfigCopy [ "L4Params" ] [ "seed" ] = randomSeedBase + i networkConfigCopy [ "L5Params" ] [ "seed" ] = randomSeedBase + i networkConfigCopy [ "L6Params" ] [ "seed" ] = randomSeedBase + i networkConfigCopy [ "L2Params" ] [ "numOtherCorticalColumns" ] = numCorticalColumns - 1 networkConfigCopy [ "L5Params" ] [ "numOtherCorticalColumns" ] = numCorticalColumns - 1 suffix = "_" + str ( i ) network = _createL2456Column ( network , networkConfigCopy , suffix ) for i in range ( networkConfig [ "numCorticalColumns" ] ) : suffixSrc = "_" + str ( i ) for j in range ( networkConfig [ "numCorticalColumns" ] ) : if i != j : suffixDest = "_" + str ( j ) network . link ( "L2Column" + suffixSrc , "L2Column" + suffixDest , "UniformLink" , "" , srcOutput = "feedForwardOutput" , destInput = "lateralInput" ) network . link ( "L5Column" + suffixSrc , "L5Column" + suffixDest , "UniformLink" , "" , srcOutput = "feedForwardOutput" , destInput = "lateralInput" ) enableProfiling ( network ) return network
Create a network consisting of multiple L2456 columns as described in the file comments above .
9,942
def _mmComputeTransitionTraces ( self ) : if not self . _mmTransitionTracesStale : return self . _mmData [ "predictedActiveCellsForSequence" ] = defaultdict ( set ) self . _mmTraces [ "predictedActiveCells" ] = IndicesTrace ( self , "predicted => active cells (correct)" ) self . _mmTraces [ "predictedInactiveCells" ] = IndicesTrace ( self , "predicted => inactive cells (extra)" ) self . _mmTraces [ "predictedActiveColumns" ] = IndicesTrace ( self , "predicted => active columns (correct)" ) self . _mmTraces [ "predictedInactiveColumns" ] = IndicesTrace ( self , "predicted => inactive columns (extra)" ) self . _mmTraces [ "unpredictedActiveColumns" ] = IndicesTrace ( self , "unpredicted => active columns (bursting)" ) predictedCellsTrace = self . _mmTraces [ "predictedCells" ] for i , activeColumns in enumerate ( self . mmGetTraceActiveColumns ( ) . data ) : predictedActiveCells = set ( ) predictedInactiveCells = set ( ) predictedActiveColumns = set ( ) predictedInactiveColumns = set ( ) for predictedCell in predictedCellsTrace . data [ i ] : predictedColumn = self . columnForCell ( predictedCell ) if predictedColumn in activeColumns : predictedActiveCells . add ( predictedCell ) predictedActiveColumns . add ( predictedColumn ) sequenceLabel = self . mmGetTraceSequenceLabels ( ) . data [ i ] if sequenceLabel is not None : self . _mmData [ "predictedActiveCellsForSequence" ] [ sequenceLabel ] . add ( predictedCell ) else : predictedInactiveCells . add ( predictedCell ) predictedInactiveColumns . add ( predictedColumn ) unpredictedActiveColumns = set ( activeColumns ) - set ( predictedActiveColumns ) self . _mmTraces [ "predictedActiveCells" ] . data . append ( predictedActiveCells ) self . _mmTraces [ "predictedInactiveCells" ] . data . append ( predictedInactiveCells ) self . _mmTraces [ "predictedActiveColumns" ] . data . append ( predictedActiveColumns ) self . _mmTraces [ "predictedInactiveColumns" ] . data . append ( predictedInactiveColumns ) self . _mmTraces [ "unpredictedActiveColumns" ] . data . append ( unpredictedActiveColumns ) self . _mmTransitionTracesStale = False
Computes the transition traces if necessary .
9,943
def get_biased_correlations ( data , threshold = 10 ) : data = data . toDense ( ) correlations = numpy . corrcoef ( data , rowvar = False ) highest_correlations = [ ] for row in correlations : highest_correlations += sorted ( row , reverse = True ) [ 1 : threshold + 1 ] return numpy . mean ( highest_correlations )
Gets the highest few correlations for each bit across the entirety of the data . Meant to provide a comparison point for the pairwise correlations reported in the literature which are typically between neighboring neurons tuned to the same inputs . We would expect these neurons to be among the most correlated in any region so pairwise correlations between most likely do not provide an unbiased estimator of correlations between arbitrary neurons .
9,944
def get_pattern_correlations ( data ) : patterns = [ data . rowNonZeros ( i ) [ 0 ] for i in range ( data . nRows ( ) ) ] dense_data = data . toDense ( ) correlations = numpy . corrcoef ( dense_data , rowvar = False ) correlations = numpy . nan_to_num ( correlations ) pattern_correlations = [ ] for pattern in patterns : pattern_correlations . append ( [ correlations [ i , j ] for i in pattern for j in pattern if i != j ] ) return numpy . mean ( pattern_correlations )
Gets the average correlation between all bits in patterns across the entire dataset . Assumes input is a sparse matrix . Weighted by pattern rather than by bit ; this is the average pairwise correlation for every pattern in the data and is not the average pairwise correlation for all bits that ever cooccur . This is a subtle but important difference .
9,945
def generate_correlated_data ( dim = 2000 , num_active = 40 , num_samples = 1000 , num_cells_per_cluster_size = [ 2000 ] * 8 , cluster_sizes = range ( 2 , 10 ) ) : clusters = [ ] cells = set ( range ( dim ) ) for size , num_cells in zip ( cluster_sizes , num_cells_per_cluster_size ) : for i in range ( int ( 1. * num_cells / size ) ) : cluster = tuple ( numpy . random . choice ( dim , size , replace = False ) ) clusters . append ( cluster ) datapoints = [ ] for sample in range ( num_samples ) : if len ( clusters ) > num_active / 2 : chosen_clusters = numpy . random . choice ( len ( clusters ) , num_active / 2 , replace = False ) current_clusters = [ clusters [ i ] for i in chosen_clusters ] else : current_clusters = clusters current_cells = set ( ) for cluster in current_clusters : if len ( current_cells ) + len ( cluster ) < num_active : current_cells |= set ( cluster ) else : break if len ( current_cells ) < num_active : possible_cells = cells - current_cells new_cells = numpy . random . choice ( tuple ( possible_cells ) , num_active - len ( current_cells ) , replace = False ) current_cells |= set ( new_cells ) datapoints . append ( list ( current_cells ) ) data = SM32 ( ) data . reshape ( num_samples , dim ) for sample , datapoint in enumerate ( datapoints ) : for i in datapoint : data [ sample , i ] = 1. return data
Generates a set of data drawn from a uniform distribution but with bits clustered to force correlation between neurons . Clusters are randomly chosen to form an activation pattern in such a way as to maintain sparsity .
9,946
def apply_noise ( data , noise ) : if noise >= 1 : noise = noise / 100. for i in range ( data . nRows ( ) ) : ones = data . rowNonZeros ( i ) [ 0 ] replace_indices = numpy . random . choice ( ones , size = int ( len ( ones ) * noise ) , replace = False ) for index in replace_indices : data [ i , index ] = 0 new_indices = numpy . random . choice ( data . nCols ( ) , size = int ( len ( ones ) * noise ) , replace = False ) for index in new_indices : while data [ i , index ] == 1 : index = numpy . random . randint ( 0 , data . nCols ( ) ) data [ i , index ] = 1
Applies noise to a sparse matrix . Noise can be an integer between 0 and 100 indicating the percentage of ones in the original input to move or a float in [ 0 1 ) indicating the same thing . The input matrix is modified in - place and nothing is returned . This operation does not affect the sparsity of the matrix or of any individual datapoint .
9,947
def shuffle_sparse_matrix_and_labels ( matrix , labels ) : print "Shuffling data" new_matrix = matrix . toDense ( ) rng_state = numpy . random . get_state ( ) numpy . random . shuffle ( new_matrix ) numpy . random . set_state ( rng_state ) numpy . random . shuffle ( labels ) print "Data shuffled" return SM32 ( new_matrix ) , numpy . asarray ( labels )
Shuffles a sparse matrix and set of labels together . Resorts to densifying and then re - sparsifying the matrix for convenience . Still very fast .
9,948
def split_sparse_matrix ( matrix , num_categories ) : if matrix . nRows ( ) < num_categories : return [ matrix . getSlice ( i , i + 1 , 0 , matrix . nCols ( ) ) for i in range ( matrix . nRows ( ) ) ] + [ SM32 ( ) for i in range ( num_categories - matrix . nRows ( ) ) ] else : inc = matrix . nRows ( ) / num_categories divisions = [ matrix . getSlice ( i * inc , ( i + 1 ) * inc , 0 , matrix . nCols ( ) ) for i in range ( num_categories - 1 ) ] divisions . append ( matrix . getSlice ( ( num_categories - 1 ) * inc , matrix . nRows ( ) , 0 , matrix . nCols ( ) ) ) return divisions
An analog of numpy . split for our sparse matrix . If the number of categories does not divide the number of rows in the matrix all overflow is placed in the final bin .
9,949
def generate_phase_1 ( dim = 40 ) : phase_1 = numpy . random . normal ( 0 , 1 , dim ) for i in range ( dim - 4 , dim ) : phase_1 [ i ] = 1.0 return phase_1
The first step in creating datapoints in the Poirazi & Mel model . This returns a vector of dimension dim with the last four values set to 1 and the rest drawn from a normal distribution .
9,950
def generate_phase_2 ( phase_1 , dim = 40 ) : phase_2 = [ ] for i in range ( dim ) : indices = [ numpy . random . randint ( 0 , dim ) for i in range ( 4 ) ] phase_2 . append ( numpy . prod ( [ phase_1 [ i ] for i in indices ] ) ) return phase_2
The second step in creating datapoints in the Poirazi & Mel model . This takes a phase 1 vector and creates a phase 2 vector where each point is the product of four elements of the phase 1 vector randomly drawn with replacement .
9,951
def bin_number ( datapoint , intervals ) : index = numpy . searchsorted ( intervals , datapoint ) return [ 0 if index != i else 1 for i in range ( len ( intervals ) + 1 ) ]
Given a datapoint and intervals representing bins returns the number represented in binned form where the bin including the value is set to 1 and all others are 0 .
9,952
def bin_data ( data , dim = 40 , num_bins = 10 ) : intervals = generate_RF_bins ( data , dim , num_bins ) binned_data = [ numpy . concatenate ( [ bin_number ( data [ x ] [ i ] , intervals [ i ] ) for i in range ( len ( data [ x ] ) ) ] ) for x in range ( len ( data ) ) ] return binned_data
Fully bins the data generated by generate_data using generate_RF_bins and bin_number .
9,953
def killCells ( self , killCellPercent ) : if killCellPercent <= 0 : return numHiddenNeurons = self . net . numHiddenNeurons numDead = round ( killCellPercent * numHiddenNeurons ) zombiePermutation = numpy . random . permutation ( numHiddenNeurons ) deadCells = zombiePermutation [ 0 : numDead ] liveCells = zombiePermutation [ numDead : ] self . net . inputWeights = self . net . inputWeights [ liveCells , : ] self . net . bias = self . net . bias [ : , liveCells ] self . net . beta = self . net . beta [ liveCells , : ] self . net . M = self . net . M [ liveCells , liveCells ] self . net . numHiddenNeurons = numHiddenNeurons - numDead
kill a fraction of cells from the network
9,954
def printFrequencyStatistics ( counts , frequencies , numWords , size ) : avgBits = float ( counts . sum ( ) ) / numWords print "Retina width=128, height=128" print "Total number of words processed=" , numWords print "Average number of bits per word=" , avgBits , print "avg sparsity=" , avgBits / size print "counts matrix sum=" , counts . sum ( ) , print "max=" , counts . max ( ) , "min=" , counts . min ( ) , print "mean=" , counts . sum ( ) / float ( size ) print "frequency matrix sum=" , frequencies . sum ( ) , print "max=" , frequencies . max ( ) , "min=" , frequencies . min ( ) , print "mean=" , frequencies . sum ( ) / float ( size ) print "Number of bits with zero entries" , frequencies . nZeroCols ( )
Print interesting statistics regarding the counts and frequency matrices
9,955
def countRandomBitFrequencies ( numTerms = 100000 , percentSparsity = 0.01 ) : counts = SparseMatrix ( ) size = 128 * 128 counts . resize ( 1 , size ) sparseBitmap = SparseMatrix ( ) sparseBitmap . resize ( 1 , size ) random . seed ( 42 ) numWords = 0 for term in xrange ( numTerms ) : bitmap = random . sample ( xrange ( size ) , int ( size * percentSparsity ) ) bitmap . sort ( ) sparseBitmap . setRowFromSparse ( 0 , bitmap , [ 1 ] * len ( bitmap ) ) counts += sparseBitmap numWords += 1 frequencies = SparseMatrix ( ) frequencies . resize ( 1 , size ) frequencies . copy ( counts ) frequencies . divide ( float ( numWords ) ) printFrequencyStatistics ( counts , frequencies , numWords , size ) frequencyFilename = "bit_frequencies_random.pkl" print "Saving frequency matrix in" , frequencyFilename with open ( frequencyFilename , "wb" ) as frequencyPickleFile : pickle . dump ( frequencies , frequencyPickleFile ) return counts
Create a uniformly random counts matrix through sampling .
9,956
def plotlyFrequencyHistogram ( counts ) : data = [ go . Histogram ( x = tuple ( count for _ , _ , count in counts . getNonZerosSorted ( ) ) ) ] py . plot ( data , filename = os . environ . get ( "HEATMAP_NAME" , str ( datetime . datetime . now ( ) ) ) )
x - axis is a count of how many times a bit was active y - axis is number of bits that have that frequency
9,957
def getSparseTensor ( numNonzeros , inputSize , outputSize , onlyPositive = False , fixedRange = 1.0 / 24 ) : w = torch . Tensor ( outputSize , inputSize , ) if onlyPositive : w . data . uniform_ ( 0 , fixedRange ) else : w . data . uniform_ ( - fixedRange , fixedRange ) if numNonzeros < inputSize : numZeros = inputSize - numNonzeros outputIndices = np . arange ( outputSize ) inputIndices = np . array ( [ np . random . permutation ( inputSize ) [ : numZeros ] for _ in outputIndices ] , dtype = np . long ) zeroIndices = np . empty ( ( outputSize , numZeros , 2 ) , dtype = np . long ) zeroIndices [ : , : , 0 ] = outputIndices [ : , None ] zeroIndices [ : , : , 1 ] = inputIndices zeroIndices = torch . LongTensor ( zeroIndices . reshape ( - 1 , 2 ) ) zeroWts = ( zeroIndices [ : , 0 ] , zeroIndices [ : , 1 ] ) w . data [ zeroWts ] = 0.0 return w
Return a random tensor that is initialized like a weight matrix Size is outputSize X inputSize where weightSparsity% of each row is non - zero
9,958
def getPermutedTensors ( W , kw , n , m2 , noisePct ) : W2 = W . repeat ( m2 , 1 ) nz = W [ 0 ] . nonzero ( ) numberToZero = int ( round ( noisePct * kw ) ) for i in range ( m2 ) : indices = np . random . permutation ( kw ) [ 0 : numberToZero ] for j in indices : W2 [ i , nz [ j ] ] = 0 return W2
Generate m2 noisy versions of W . Noisy version of W is generated by randomly permuting noisePct of the non - zero components to other components .
9,959
def getTheta ( k , nTrials = 100000 ) : theDots = np . zeros ( nTrials ) w1 = getSparseTensor ( k , k , nTrials , fixedRange = 1.0 / k ) for i in range ( nTrials ) : theDots [ i ] = w1 [ i ] . dot ( w1 [ i ] ) dotMean = theDots . mean ( ) print ( "k=" , k , "min/mean/max diag of w dot products" , theDots . min ( ) , dotMean , theDots . max ( ) ) theta = dotMean / 2.0 print ( "Using theta as mean / 2.0 = " , theta ) return theta , theDots
Estimate a reasonable value of theta for this k .
9,960
def returnFalseNegatives ( kw , noisePct , n , theta ) : W = getSparseTensor ( kw , n , 1 , fixedRange = 1.0 / kw ) m2 = 10 inputVectors = getPermutedTensors ( W , kw , n , m2 , noisePct ) dot = inputVectors . matmul ( W . t ( ) ) numMatches = ( ( dot >= theta ) . sum ( ) ) . item ( ) pctMatches = numMatches / float ( m2 ) return pctMatches , numMatches , m2
Generate a weight vector W with kw non - zero components . Generate 1000 noisy versions of W and return the match statistics . Noisy version of W is generated by randomly setting noisePct of the non - zero components to zero .
9,961
def computeScaledProbabilities ( listOfScales = [ 1.0 , 1.5 , 2.0 , 2.5 , 3.0 , 3.5 , 4.0 ] , listofkValues = [ 64 , 128 , 256 ] , kw = 32 , n = 1000 , numWorkers = 10 , nTrials = 1000 , ) : args = [ ] theta , _ = getTheta ( kw ) for ki , k in enumerate ( listofkValues ) : for si , s in enumerate ( listOfScales ) : args . append ( { "k" : k , "kw" : kw , "n" : n , "theta" : theta , "nTrials" : nTrials , "inputScaling" : s , "errorIndex" : [ ki , si ] , } ) result = computeMatchProbabilityParallel ( args , numWorkers ) errors = np . zeros ( ( len ( listofkValues ) , len ( listOfScales ) ) ) for r in result : errors [ r [ "errorIndex" ] [ 0 ] , r [ "errorIndex" ] [ 1 ] ] = r [ "pctMatches" ] print ( "Errors using scaled inputs, for kw=" , kw ) print ( repr ( errors ) ) plotScaledMatches ( listofkValues , listOfScales , errors , "images/scalar_effect_of_scale_kw" + str ( kw ) + ".pdf" )
Compute the impact of S on match probabilities for a fixed value of n .
9,962
def computeMatchProbabilityOmega ( k , bMax , theta , nTrials = 100 ) : omegaProb = np . zeros ( bMax + 1 ) for b in range ( 1 , bMax + 1 ) : xwb = getSparseTensor ( b , b , nTrials , fixedRange = 1.0 / k ) xib = getSparseTensor ( b , b , nTrials , onlyPositive = True , fixedRange = 2.0 / k ) r = xwb . matmul ( xib . t ( ) ) numMatches = ( ( r >= theta ) . sum ( ) ) . item ( ) omegaProb [ b ] = numMatches / float ( nTrials * nTrials ) print ( omegaProb ) return omegaProb
The Omega match probability estimates the probability of matching when both vectors have exactly b components in common . This function computes this probability for b = 1 to bMax .
9,963
def plotMatches2 ( listofNValues , errors , listOfScales , scaleErrors , fileName = "images/scalar_matches.pdf" ) : w , h = figaspect ( 0.4 ) fig , ( ax1 , ax2 ) = plt . subplots ( 1 , 2 , figsize = ( w , h ) ) plotMatches ( listofNValues , errors , fileName = None , fig = fig , ax = ax1 ) plotScaledMatches ( listOfScales , scaleErrors , fileName = None , fig = fig , ax = ax2 ) plt . savefig ( fileName ) plt . close ( )
Plot two figures side by side in an aspect ratio appropriate for the paper .
9,964
def createPregeneratedGraphs ( ) : listofNValues = [ 250 , 500 , 1000 , 1500 , 2000 , 2500 ] kw = 32 errors = np . array ( [ [ 3.65083333e-03 , 3.06166667e-04 , 1.89166667e-05 , 4.16666667e-06 , 1.50000000e-06 , 9.16666667e-07 ] , [ 2.44633333e-02 , 3.64491667e-03 , 3.16083333e-04 , 6.93333333e-05 , 2.16666667e-05 , 8.66666667e-06 ] , [ 7.61641667e-02 , 2.42496667e-02 , 3.75608333e-03 , 9.78333333e-04 , 3.33250000e-04 , 1.42250000e-04 ] , [ 2.31302500e-02 , 2.38609167e-02 , 2.28072500e-02 , 2.33225000e-02 , 2.30650000e-02 , 2.33988333e-02 ] ] ) listOfScales = [ 1.0 , 1.5 , 2.0 , 2.5 , 3.0 , 3.5 , 4.0 ] scaleErrors = np . array ( [ [ 1.94166667e-05 , 1.14900000e-03 , 7.20725000e-03 , 1.92405833e-02 , 3.60794167e-02 , 5.70276667e-02 , 7.88510833e-02 ] , [ 3.12500000e-04 , 7.07616667e-03 , 2.71600000e-02 , 5.72415833e-02 , 8.95497500e-02 , 1.21294333e-01 , 1.50582500e-01 ] , [ 3.97708333e-03 , 3.31468333e-02 , 8.04755833e-02 , 1.28687750e-01 , 1.71220000e-01 , 2.07019250e-01 , 2.34703167e-01 ] ] ) plotMatches2 ( listofNValues , errors , listOfScales , scaleErrors , "images/scalar_matches_kw" + str ( kw ) + ".pdf" )
Creates graphs based on previous runs of the scripts . Useful for editing graph format for writeups .
9,965
def _learn ( connections , rng , learningSegments , activeInput , potentialOverlaps , initialPermanence , sampleSize , permanenceIncrement , permanenceDecrement , maxSynapsesPerSegment ) : connections . adjustSynapses ( learningSegments , activeInput , permanenceIncrement , - permanenceDecrement ) if sampleSize == - 1 : maxNew = len ( activeInput ) else : maxNew = sampleSize - potentialOverlaps [ learningSegments ] if maxSynapsesPerSegment != - 1 : synapseCounts = connections . mapSegmentsToSynapseCounts ( learningSegments ) numSynapsesToReachMax = maxSynapsesPerSegment - synapseCounts maxNew = np . where ( maxNew <= numSynapsesToReachMax , maxNew , numSynapsesToReachMax ) connections . growSynapsesToSample ( learningSegments , activeInput , maxNew , initialPermanence , rng )
Adjust synapse permanences grow new synapses and grow new segments .
9,966
def compute ( self , sensorToBodyByColumn , sensorToSpecificObjectByColumn ) : votesByCell = np . zeros ( self . cellCount , dtype = "int" ) self . activeSegmentsByColumn = [ ] for ( connections , activeSensorToBodyCells , activeSensorToSpecificObjectCells ) in zip ( self . connectionsByColumn , sensorToBodyByColumn , sensorToSpecificObjectByColumn ) : overlaps = connections . computeActivity ( { "sensorToBody" : activeSensorToBodyCells , "sensorToSpecificObject" : activeSensorToSpecificObjectCells , } ) activeSegments = np . where ( overlaps >= 2 ) [ 0 ] votes = connections . mapSegmentsToCells ( activeSegments ) votes = np . unique ( votes ) votesByCell [ votes ] += 1 self . activeSegmentsByColumn . append ( activeSegments ) candidates = np . where ( votesByCell == np . max ( votesByCell ) ) [ 0 ] self . activeCells = np . intersect1d ( self . activeCells , candidates ) if self . activeCells . size == 0 : self . activeCells = candidates self . inhibitedCells = np . setdiff1d ( np . where ( votesByCell > 0 ) [ 0 ] , self . activeCells )
Compute the body s location relative to a specific object from an array of sensor s location relative to a specific object and an array of sensor s location relative to body
9,967
def metricCompute ( self , sensorToBody , bodyToSpecificObject ) : overlaps = self . metricConnections . computeActivity ( { "bodyToSpecificObject" : bodyToSpecificObject , "sensorToBody" : sensorToBody , } ) self . activeMetricSegments = np . where ( overlaps >= 2 ) [ 0 ] self . activeCells = np . unique ( self . metricConnections . mapSegmentsToCells ( self . activeMetricSegments ) )
Compute the sensor s location relative to a specific object from the body s location relative to a specific object and the sensor s location relative to body
9,968
def anchorCompute ( self , anchorInput , learn ) : if learn : self . _anchorComputeLearningMode ( anchorInput ) else : overlaps = self . anchorConnections . computeActivity ( anchorInput , self . connectedPermanence ) self . activeSegments = np . where ( overlaps >= self . activationThreshold ) [ 0 ] self . activeCells = np . unique ( self . anchorConnections . mapSegmentsToCells ( self . activeSegments ) )
Compute the sensor s location relative to a specific object from the feature - location pair .
9,969
def compute ( self , egocentricLocation ) : offsetInCellFields = ( np . matmul ( self . rotationMatrix , egocentricLocation ) * self . cellFieldsPerUnitDistance ) np . mod ( offsetInCellFields , self . cellDimensions , out = offsetInCellFields ) self . activeCells = np . unique ( np . ravel_multi_index ( np . floor ( offsetInCellFields ) . T . astype ( 'int' ) , self . cellDimensions ) )
Compute the new active cells from the given sensor location relative to body vector .
9,970
def htmresearchCorePrereleaseInstalled ( ) : try : coreDistribution = pkg_resources . get_distribution ( "htmresearch-core" ) if pkg_resources . parse_version ( coreDistribution . version ) . is_prerelease : return True except pkg_resources . DistributionNotFound : pass return False
Make an attempt to determine if a pre - release version of htmresearch - core is installed already .
9,971
def infer ( self , sensationList , reset = True , objectName = None ) : self . _unsetLearningMode ( ) statistics = collections . defaultdict ( list ) if objectName is not None : if objectName not in self . objectRepresentationsL2 : raise ValueError ( "The provided objectName was not given during" " learning" ) for sensations in sensationList : for col in xrange ( self . numColumns ) : location , coarseFeature , fineFeature = sensations [ col ] self . locationInputs [ col ] . addDataToQueue ( list ( location ) , 0 , 0 ) self . coarseSensors [ col ] . addDataToQueue ( list ( coarseFeature ) , 0 , 0 ) self . sensors [ col ] . addDataToQueue ( list ( fineFeature ) , 0 , 0 ) self . network . run ( 1 ) self . _updateInferenceStats ( statistics , objectName ) if reset : self . _sendReset ( ) statistics [ "numSteps" ] = len ( sensationList ) statistics [ "object" ] = objectName if objectName is not None else "Unknown" self . statistics . append ( statistics )
Infer on a given set of sensations for a single object .
9,972
def getDefaultParams ( self ) : return { "sensorParams" : { "outputWidth" : self . sensorInputSize , } , "coarseSensorParams" : { "outputWidth" : self . sensorInputSize , } , "locationParams" : { "activeBits" : 41 , "outputWidth" : self . sensorInputSize , "radius" : 2 , "verbosity" : 0 , } , "L4Params" : { "columnCount" : self . sensorInputSize , "cellsPerColumn" : 8 , "learn" : True , "learnOnOneCell" : False , "initialPermanence" : 0.51 , "connectedPermanence" : 0.6 , "permanenceIncrement" : 0.1 , "permanenceDecrement" : 0.02 , "minThreshold" : 10 , "basalPredictedSegmentDecrement" : 0.002 , "activationThreshold" : 13 , "sampleSize" : 20 , "implementation" : "ApicalTiebreakCPP" , } , "L2Params" : { "inputWidth" : self . sensorInputSize * 8 , "cellCount" : 4096 , "sdrSize" : 40 , "synPermProximalInc" : 0.1 , "synPermProximalDec" : 0.001 , "initialProximalPermanence" : 0.6 , "minThresholdProximal" : 10 , "sampleSizeProximal" : 20 , "connectedPermanenceProximal" : 0.5 , "synPermDistalInc" : 0.1 , "synPermDistalDec" : 0.001 , "initialDistalPermanence" : 0.41 , "activationThresholdDistal" : 13 , "sampleSizeDistal" : 20 , "connectedPermanenceDistal" : 0.5 , "learningMode" : True , } , "L6Params" : { "columnCount" : self . sensorInputSize , "cellsPerColumn" : 8 , "learn" : True , "learnOnOneCell" : False , "initialPermanence" : 0.51 , "connectedPermanence" : 0.6 , "permanenceIncrement" : 0.1 , "permanenceDecrement" : 0.02 , "minThreshold" : 10 , "basalPredictedSegmentDecrement" : 0.004 , "activationThreshold" : 13 , "sampleSize" : 20 , } , "L5Params" : { "inputWidth" : self . sensorInputSize * 8 , "cellCount" : 4096 , "sdrSize" : 40 , "synPermProximalInc" : 0.1 , "synPermProximalDec" : 0.001 , "initialProximalPermanence" : 0.6 , "minThresholdProximal" : 10 , "sampleSizeProximal" : 20 , "connectedPermanenceProximal" : 0.5 , "synPermDistalInc" : 0.1 , "synPermDistalDec" : 0.001 , "initialDistalPermanence" : 0.41 , "activationThresholdDistal" : 13 , "sampleSizeDistal" : 20 , "connectedPermanenceDistal" : 0.5 , "learningMode" : True , } , }
Returns a good default set of parameters to use in L2456 regions
9,973
def _retrieveRegions ( self ) : self . sensors = [ ] self . coarseSensors = [ ] self . locationInputs = [ ] self . L4Columns = [ ] self . L2Columns = [ ] self . L5Columns = [ ] self . L6Columns = [ ] for i in xrange ( self . numColumns ) : self . sensors . append ( self . network . regions [ "sensorInput_" + str ( i ) ] . getSelf ( ) ) self . coarseSensors . append ( self . network . regions [ "coarseSensorInput_" + str ( i ) ] . getSelf ( ) ) self . locationInputs . append ( self . network . regions [ "locationInput_" + str ( i ) ] . getSelf ( ) ) self . L4Columns . append ( self . network . regions [ "L4Column_" + str ( i ) ] . getSelf ( ) ) self . L2Columns . append ( self . network . regions [ "L2Column_" + str ( i ) ] . getSelf ( ) ) self . L5Columns . append ( self . network . regions [ "L5Column_" + str ( i ) ] . getSelf ( ) ) self . L6Columns . append ( self . network . regions [ "L6Column_" + str ( i ) ] . getSelf ( ) )
Retrieve and store Python region instances for each column
9,974
def plotSuccessRate_varyNumColumns ( noiseSigma , noiseEverywhere ) : noiseLevels = [ x * 0.01 for x in xrange ( 0 , 101 , 5 ) ] l2Overrides = { "sampleSizeDistal" : 20 } columnCounts = [ 1 , 2 , 3 , 4 ] results = defaultdict ( list ) for trial in xrange ( 1 ) : print "trial" , trial objectDescriptions = createRandomObjectDescriptions ( 10 , 10 ) for numColumns in columnCounts : print "numColumns" , numColumns for noiseLevel in noiseLevels : r = doExperiment ( numColumns , l2Overrides , objectDescriptions , noiseLevel , noiseSigma , numInitialTraversals = 6 , noiseEverywhere = noiseEverywhere ) results [ ( numColumns , noiseLevel ) ] . extend ( r ) numCorrectActiveThreshold = 30 numIncorrectActiveThreshold = 10 plt . figure ( ) colors = dict ( zip ( columnCounts , ( 'r' , 'k' , 'g' , 'b' ) ) ) markers = dict ( zip ( columnCounts , ( 'o' , '*' , 'D' , 'x' ) ) ) for numColumns in columnCounts : y = [ ] for noiseLevel in noiseLevels : trials = results [ ( numColumns , noiseLevel ) ] numPassed = len ( [ True for numCorrect , numIncorrect in trials if numCorrect >= numCorrectActiveThreshold and numIncorrect <= numIncorrectActiveThreshold ] ) y . append ( numPassed / float ( len ( trials ) ) ) plt . plot ( noiseLevels , y , color = colors [ numColumns ] , marker = markers [ numColumns ] ) lgnd = plt . legend ( [ "%d columns" % numColumns for numColumns in columnCounts ] , bbox_to_anchor = ( 1.05 , 1 ) , loc = 2 , borderaxespad = 0.0 ) plt . xlabel ( "Mean feedforward noise level" ) plt . xticks ( [ 0.01 * n for n in xrange ( 0 , 101 , 10 ) ] ) plt . ylabel ( "Success rate" ) plt . yticks ( [ 0.0 , 0.2 , 0.4 , 0.6 , 0.8 , 1.0 ] ) plt . title ( "Inference with normally distributed noise (stdev=%.2f)" % noiseSigma ) plotPath = os . path . join ( "plots" , "successRate_varyColumnCount_sigma%.2f_%s.pdf" % ( noiseSigma , time . strftime ( "%Y%m%d-%H%M%S" ) ) ) plt . savefig ( plotPath , bbox_extra_artists = ( lgnd , ) , bbox_inches = "tight" ) print "Saved file %s" % plotPath
Run and plot the experiment varying the number of cortical columns .
9,975
def randomTraversal ( sensations , numTraversals ) : newSensations = [ ] for _ in range ( numTraversals ) : s = copy . deepcopy ( sensations ) random . shuffle ( s ) newSensations += s return newSensations
Given a list of sensations return the SDRs that would be obtained by numTraversals random traversals of that set of sensations .
9,976
def compute ( self , xt1 , yt1 , xt , yt , theta1t1 , theta2t1 , theta1 , theta2 ) : dx = xt - xt1 dy = yt - yt1 if self . numPoints < self . maxPoints : self . dxValues [ self . numPoints , 0 ] = dx self . dxValues [ self . numPoints , 1 ] = dy self . thetaValues [ self . numPoints , 0 ] = theta1 self . thetaValues [ self . numPoints , 1 ] = theta2 self . numPoints += 1 elif self . numPoints == self . maxPoints : print >> sys . stderr , "Max points exceeded, analyzing " , self . maxPoints , "points only" self . numPoints += 1
Accumulate the various inputs .
9,977
def bind ( cell1 , cell2 , moduleDimensions ) : cell1Coords = np . unravel_index ( cell1 , moduleDimensions ) cell2Coords = np . unravel_index ( cell2 , moduleDimensions ) transformCoords = [ ( c2 - c1 ) % m for c1 , c2 , m in itertools . izip ( cell1Coords , cell2Coords , moduleDimensions ) ] return np . ravel_multi_index ( transformCoords , moduleDimensions )
Return transform index for given cells .
9,978
def unbind ( cell1 , transform , moduleDimensions ) : cell1Coords = np . unravel_index ( cell1 , moduleDimensions ) transformCoords = np . unravel_index ( transform , moduleDimensions ) cell2Coords = [ ( t + c1 ) % m for c1 , t , m in itertools . izip ( cell1Coords , transformCoords , moduleDimensions ) ] return np . ravel_multi_index ( cell2Coords , moduleDimensions )
Return the cell index corresponding to the other half of the transform .
9,979
def updatePlaceWeights ( self ) : self . weightsPI += np . outer ( self . activationsI - self . boostTarget , self . activationsP ) * self . dt * self . learnFactorP * self . learningRate self . weightsPEL += np . outer ( self . activationsEL - self . boostTarget , self . activationsP ) * self . dt * self . learnFactorP * self . learningRate self . weightsPER += np . outer ( self . activationsER - self . boostTarget , self . activationsP ) * self . dt * self . learnFactorP * self . learningRate np . minimum ( self . weightsPI , 1 , self . weightsPI ) np . minimum ( self . weightsPEL , 1 , self . weightsPEL ) np . minimum ( self . weightsPER , 1 , self . weightsPER )
We use a simplified version of Hebbian learning to learn place weights . Cells above the boost target are wired to the currently - active places cells below it have their connection strength to them reduced .
9,980
def create_union_mnist_dataset ( ) : transform = transforms . Compose ( [ transforms . ToTensor ( ) , transforms . Normalize ( ( 0.1307 , ) , ( 0.3081 , ) ) ] ) mnist1 = datasets . MNIST ( 'data' , train = False , download = True , transform = transform ) data1 = zip ( mnist1 . test_data , mnist1 . test_labels ) mnist2 = datasets . MNIST ( 'data' , train = False , download = True , transform = transform ) data2 = zip ( mnist2 . test_data , mnist2 . test_labels ) random . shuffle ( data2 ) for i in range ( len ( data2 ) ) : if data1 [ i ] [ 1 ] == data2 [ i ] [ 1 ] : for j in range ( len ( data1 ) ) : if data1 [ i ] [ 1 ] != data2 [ j ] [ 1 ] and data2 [ i ] [ 1 ] != data1 [ j ] [ 1 ] : swap = data2 [ j ] data2 [ j ] = data2 [ i ] data2 [ i ] = swap break mnist2 . test_data , mnist2 . test_labels = zip ( * data2 ) return UnionDataset ( datasets = [ mnist1 , mnist2 ] , transform = lambda x , y : torch . max ( x , y ) )
Create a UnionDataset composed of two versions of the MNIST datasets where each item in the dataset contains 2 distinct images superimposed
9,981
def noisy ( pattern , noiseLevel , totalNumCells ) : n = int ( noiseLevel * len ( pattern ) ) noised = set ( pattern ) noised . difference_update ( random . sample ( noised , n ) ) for _ in xrange ( n ) : while True : v = random . randint ( 0 , totalNumCells - 1 ) if v not in pattern and v not in noised : noised . add ( v ) break return noised
Generate a noisy copy of a pattern .
9,982
def createRandomObjects ( numObjects , locationsPerObject , featurePoolSize ) : allFeatures = range ( featurePoolSize ) allLocations = range ( locationsPerObject ) objects = dict ( ( name , [ random . choice ( allFeatures ) for _ in xrange ( locationsPerObject ) ] ) for name in xrange ( numObjects ) ) return objects
Generate random objects .
9,983
def createL4L2Column ( network , networkConfig , suffix = "" ) : externalInputName = "externalInput" + suffix sensorInputName = "sensorInput" + suffix L4ColumnName = "L4Column" + suffix L2ColumnName = "L2Column" + suffix L4Params = copy . deepcopy ( networkConfig [ "L4Params" ] ) L4Params [ "basalInputWidth" ] = networkConfig [ "externalInputSize" ] L4Params [ "apicalInputWidth" ] = networkConfig [ "L2Params" ] [ "cellCount" ] if networkConfig [ "externalInputSize" ] > 0 : network . addRegion ( externalInputName , "py.RawSensor" , json . dumps ( { "outputWidth" : networkConfig [ "externalInputSize" ] } ) ) network . addRegion ( sensorInputName , "py.RawSensor" , json . dumps ( { "outputWidth" : networkConfig [ "sensorInputSize" ] } ) ) if networkConfig [ "externalInputSize" ] > 0 : _addLateralSPRegion ( network , networkConfig , suffix ) _addFeedForwardSPRegion ( network , networkConfig , suffix ) network . addRegion ( L4ColumnName , networkConfig [ "L4RegionType" ] , json . dumps ( L4Params ) ) network . addRegion ( L2ColumnName , "py.ColumnPoolerRegion" , json . dumps ( networkConfig [ "L2Params" ] ) ) if networkConfig [ "externalInputSize" ] > 0 : network . setPhases ( externalInputName , [ 0 ] ) network . setPhases ( sensorInputName , [ 0 ] ) _setLateralSPPhases ( network , networkConfig ) _setFeedForwardSPPhases ( network , networkConfig ) network . setPhases ( L4ColumnName , [ 2 ] ) network . setPhases ( L2ColumnName , [ 3 ] ) if networkConfig [ "externalInputSize" ] > 0 : _linkLateralSPRegion ( network , networkConfig , externalInputName , L4ColumnName ) _linkFeedForwardSPRegion ( network , networkConfig , sensorInputName , L4ColumnName ) network . link ( L4ColumnName , L2ColumnName , "UniformLink" , "" , srcOutput = "activeCells" , destInput = "feedforwardInput" ) network . link ( L4ColumnName , L2ColumnName , "UniformLink" , "" , srcOutput = "predictedActiveCells" , destInput = "feedforwardGrowthCandidates" ) if networkConfig . get ( "enableFeedback" , True ) : network . link ( L2ColumnName , L4ColumnName , "UniformLink" , "" , srcOutput = "feedForwardOutput" , destInput = "apicalInput" , propagationDelay = 1 ) network . link ( sensorInputName , L2ColumnName , "UniformLink" , "" , srcOutput = "resetOut" , destInput = "resetIn" ) network . link ( sensorInputName , L4ColumnName , "UniformLink" , "" , srcOutput = "resetOut" , destInput = "resetIn" ) enableProfiling ( network ) return network
Create a a single column containing one L4 and one L2 .
9,984
def createMultipleL4L2Columns ( network , networkConfig ) : numCorticalColumns = networkConfig [ "numCorticalColumns" ] for i in xrange ( numCorticalColumns ) : networkConfigCopy = copy . deepcopy ( networkConfig ) layerConfig = networkConfigCopy [ "L2Params" ] layerConfig [ "seed" ] = layerConfig . get ( "seed" , 42 ) + i layerConfig [ "numOtherCorticalColumns" ] = numCorticalColumns - 1 suffix = "_" + str ( i ) network = createL4L2Column ( network , networkConfigCopy , suffix ) for i in range ( networkConfig [ "numCorticalColumns" ] ) : suffixSrc = "_" + str ( i ) for j in range ( networkConfig [ "numCorticalColumns" ] ) : if i != j : suffixDest = "_" + str ( j ) network . link ( "L2Column" + suffixSrc , "L2Column" + suffixDest , "UniformLink" , "" , srcOutput = "feedForwardOutput" , destInput = "lateralInput" , propagationDelay = 1 ) enableProfiling ( network ) return network
Create a network consisting of multiple columns . Each column contains one L4 and one L2 is identical in structure to the network created by createL4L2Column . In addition all the L2 columns are fully connected to each other through their lateral inputs .
9,985
def createMultipleL4L2ColumnsWithTopology ( network , networkConfig ) : numCorticalColumns = networkConfig [ "numCorticalColumns" ] output_lateral_connections = [ [ ] for i in xrange ( numCorticalColumns ) ] input_lateral_connections = [ [ ] for i in xrange ( numCorticalColumns ) ] columnPositions = networkConfig . get ( "columnPositions" , None ) if columnPositions is None : columnPositions = [ ] side_length = int ( numpy . ceil ( numpy . sqrt ( numCorticalColumns ) ) ) for i in range ( side_length ) : for j in range ( side_length ) : columnPositions . append ( ( i , j ) ) columnPositions = columnPositions [ : numCorticalColumns ] longDistanceConnections = networkConfig . get ( "longDistanceConnections" , 0. ) for i , src_pos in enumerate ( columnPositions ) : for j , dest_pos in enumerate ( columnPositions ) : if i != j : if ( numpy . linalg . norm ( numpy . asarray ( src_pos ) - numpy . asarray ( dest_pos ) ) <= networkConfig [ "maxConnectionDistance" ] or numpy . random . rand ( ) < longDistanceConnections ) : output_lateral_connections [ i ] . append ( j ) input_lateral_connections [ j ] . append ( i ) for i in xrange ( numCorticalColumns ) : networkConfigCopy = copy . deepcopy ( networkConfig ) layerConfig = networkConfigCopy [ "L2Params" ] layerConfig [ "seed" ] = layerConfig . get ( "seed" , 42 ) + i layerConfig [ "numOtherCorticalColumns" ] = len ( input_lateral_connections [ i ] ) suffix = "_" + str ( i ) network = createL4L2Column ( network , networkConfigCopy , suffix ) for i , connections in enumerate ( output_lateral_connections ) : suffixSrc = "_" + str ( i ) for j in connections : suffixDest = "_" + str ( j ) network . link ( "L2Column" + suffixSrc , "L2Column" + suffixDest , "UniformLink" , "" , srcOutput = "feedForwardOutput" , destInput = "lateralInput" , propagationDelay = 1 ) enableProfiling ( network ) return network
Create a network consisting of multiple columns . Each column contains one L4 and one L2 is identical in structure to the network created by createL4L2Column . In addition the L2 columns are connected to each other through their lateral inputs based on the topological information provided .
9,986
def loadExperimentData ( folder , area ) : filename = os . path . join ( folder , "Combo3_{}.mat" . format ( area ) ) contents = sio . loadmat ( filename , variable_names = [ 'data' ] , struct_as_record = False , squeeze_me = True ) return contents [ 'data' ]
Loads the experiment s data from a MATLAB file into a python friendly structure .
9,987
def classifierPredict ( testVector , storedVectors ) : numClasses = storedVectors . shape [ 0 ] output = np . zeros ( ( numClasses , ) ) for i in range ( numClasses ) : output [ i ] = np . sum ( np . minimum ( testVector , storedVectors [ i , : ] ) ) return output
Return overlap of the testVector with stored representations for each object .
9,988
def run_multiple_column_experiment ( ) : featureRange = [ 5 , 10 , 20 , 30 ] pointRange = 1 objectRange = [ 100 ] numLocations = [ 10 ] numPoints = 10 numTrials = 10 columnRange = [ 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 ] useLocation = 1 resultsDir = os . path . dirname ( os . path . realpath ( __file__ ) ) args = [ ] for c in reversed ( columnRange ) : for o in reversed ( objectRange ) : for l in numLocations : for f in featureRange : for t in range ( numTrials ) : args . append ( { "numObjects" : o , "numLocations" : l , "numFeatures" : f , "numColumns" : c , "trialNum" : t , "pointRange" : pointRange , "numPoints" : numPoints , "useLocation" : useLocation } ) print "Number of experiments:" , len ( args ) idealResultsFile = os . path . join ( resultsDir , "ideal_multi_column_useLocation_{}.pkl" . format ( useLocation ) ) pool = Pool ( processes = cpu_count ( ) ) result = pool . map ( run_ideal_classifier , args ) with open ( idealResultsFile , "wb" ) as f : cPickle . dump ( result , f ) htmResultsFile = os . path . join ( resultsDir , "column_convergence_results.pkl" ) runExperimentPool ( numObjects = objectRange , numLocations = [ 10 ] , numFeatures = featureRange , numColumns = columnRange , numPoints = 10 , nTrials = numTrials , numWorkers = cpu_count ( ) , resultsName = htmResultsFile ) with open ( htmResultsFile , "rb" ) as f : results = cPickle . load ( f ) with open ( idealResultsFile , "rb" ) as f : resultsIdeal = cPickle . load ( f ) plt . figure ( ) plotConvergenceByColumn ( results , columnRange , featureRange , numTrials ) plotConvergenceByColumn ( resultsIdeal , columnRange , featureRange , numTrials , "--" ) plt . savefig ( 'plots/ideal_observer_multiple_column.pdf' )
Compare the ideal observer against a multi - column sensorimotor network .
9,989
def save ( callLog , logFilename ) : with open ( logFilename , "wb" ) as outp : cPickle . dump ( callLog , outp )
Save the call log history into this file .
9,990
def _getDefaultCombinedL4Params ( self , numInputBits , inputSize , numExternalInputBits , externalInputSize , L2CellCount ) : sampleSize = numExternalInputBits + numInputBits activationThreshold = int ( max ( numExternalInputBits , numInputBits ) * .6 ) minThreshold = activationThreshold return { "columnCount" : inputSize , "cellsPerColumn" : 16 , "learn" : True , "learnOnOneCell" : False , "initialPermanence" : 0.41 , "connectedPermanence" : 0.6 , "permanenceIncrement" : 0.1 , "permanenceDecrement" : 0.02 , "minThreshold" : minThreshold , "basalPredictedSegmentDecrement" : 0.001 , "apicalPredictedSegmentDecrement" : 0.0 , "reducedBasalThreshold" : int ( activationThreshold * 0.6 ) , "activationThreshold" : activationThreshold , "sampleSize" : sampleSize , "implementation" : "ApicalTiebreak" , "seed" : self . seed , "basalInputWidth" : inputSize * 16 + externalInputSize , "apicalInputWidth" : L2CellCount , }
Returns a good default set of parameters to use in a combined L4 region .
9,991
def findBinomialNsWithExpectedSampleMinimum ( desiredValuesSorted , p , numSamples , nMax ) : actualValues = [ getExpectedValue ( SampleMinimumDistribution ( numSamples , BinomialDistribution ( n , p , cache = True ) ) ) for n in xrange ( nMax + 1 ) ] results = [ ] n = 0 for desiredValue in desiredValuesSorted : while n + 1 <= nMax and actualValues [ n + 1 ] < desiredValue : n += 1 if n + 1 > nMax : break interpolated = n + ( ( desiredValue - actualValues [ n ] ) / ( actualValues [ n + 1 ] - actualValues [ n ] ) ) result = ( interpolated , actualValues [ n ] , actualValues [ n + 1 ] ) results . append ( result ) return results
For each desired value find an approximate n for which the sample minimum has a expected value equal to this value .
9,992
def findBinomialNsWithLowerBoundSampleMinimum ( confidence , desiredValuesSorted , p , numSamples , nMax ) : def P ( n , numOccurrences ) : return 1 - SampleMinimumDistribution ( numSamples , BinomialDistribution ( n , p ) ) . cdf ( numOccurrences - 1 ) results = [ ] n = 0 for desiredValue in desiredValuesSorted : while n + 1 <= nMax and P ( n + 1 , desiredValue ) < confidence : n += 1 if n + 1 > nMax : break left = P ( n , desiredValue ) right = P ( n + 1 , desiredValue ) interpolated = n + ( ( confidence - left ) / ( right - left ) ) result = ( interpolated , left , right ) results . append ( result ) return results
For each desired value find an approximate n for which the sample minimum has a probabilistic lower bound equal to this value .
9,993
def tempoAdjust1 ( self , tempoFactor ) : if self . apicalIntersect . any ( ) : tempoFactor = tempoFactor * 0.5 else : tempoFactor = tempoFactor * 2 return tempoFactor
Adjust tempo based on recent active apical input only
9,994
def tempoAdjust2 ( self , tempoFactor ) : late_votes = ( len ( self . adtm . getNextBasalPredictedCells ( ) ) - len ( self . apicalIntersect ) ) * - 1 early_votes = len ( self . apicalIntersect ) votes = late_votes + early_votes print ( 'vote tally' , votes ) if votes > 0 : tempoFactor = tempoFactor * 0.5 print 'speed up' elif votes < 0 : tempoFactor = tempoFactor * 2 print 'slow down' elif votes == 0 : print 'pick randomly' if random . random ( ) > 0.5 : tempoFactor = tempoFactor * 0.5 print 'random pick: speed up' else : tempoFactor = tempoFactor * 2 print 'random pick: slow down' return tempoFactor
Adjust tempo by aggregating active basal cell votes for pre vs . post
9,995
def _countWhereGreaterEqualInRows ( sparseMatrix , rows , threshold ) : return sum ( sparseMatrix . countWhereGreaterOrEqual ( row , row + 1 , 0 , sparseMatrix . nCols ( ) , threshold ) for row in rows )
Like countWhereGreaterOrEqual but for an arbitrary selection of rows and without any column filtering .
9,996
def compute ( self , feedforwardInput = ( ) , lateralInputs = ( ) , feedforwardGrowthCandidates = None , learn = True , predictedInput = None , ) : if feedforwardGrowthCandidates is None : feedforwardGrowthCandidates = feedforwardInput if not learn : self . _computeInferenceMode ( feedforwardInput , lateralInputs ) elif not self . onlineLearning : self . _computeLearningMode ( feedforwardInput , lateralInputs , feedforwardGrowthCandidates ) else : if ( predictedInput is not None and len ( predictedInput ) > self . predictedInhibitionThreshold ) : predictedActiveInput = numpy . intersect1d ( feedforwardInput , predictedInput ) predictedGrowthCandidates = numpy . intersect1d ( feedforwardGrowthCandidates , predictedInput ) self . _computeInferenceMode ( predictedActiveInput , lateralInputs ) self . _computeLearningMode ( predictedActiveInput , lateralInputs , feedforwardGrowthCandidates ) elif not self . minSdrSize <= len ( self . activeCells ) <= self . maxSdrSize : self . _computeInferenceMode ( feedforwardInput , lateralInputs ) self . _computeLearningMode ( feedforwardInput , lateralInputs , feedforwardGrowthCandidates ) else : self . _computeLearningMode ( feedforwardInput , lateralInputs , feedforwardGrowthCandidates )
Runs one time step of the column pooler algorithm .
9,997
def numberOfConnectedProximalSynapses ( self , cells = None ) : if cells is None : cells = xrange ( self . numberOfCells ( ) ) return _countWhereGreaterEqualInRows ( self . proximalPermanences , cells , self . connectedPermanenceProximal )
Returns the number of proximal connected synapses on these cells .
9,998
def numberOfProximalSynapses ( self , cells = None ) : if cells is None : cells = xrange ( self . numberOfCells ( ) ) n = 0 for cell in cells : n += self . proximalPermanences . nNonZerosOnRow ( cell ) return n
Returns the number of proximal synapses with permanence > 0 on these cells .
9,999
def numberOfDistalSegments ( self , cells = None ) : if cells is None : cells = xrange ( self . numberOfCells ( ) ) n = 0 for cell in cells : if self . internalDistalPermanences . nNonZerosOnRow ( cell ) > 0 : n += 1 for permanences in self . distalPermanences : if permanences . nNonZerosOnRow ( cell ) > 0 : n += 1 return n
Returns the total number of distal segments for these cells .