idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
10,000
def numberOfConnectedDistalSynapses ( self , cells = None ) : if cells is None : cells = xrange ( self . numberOfCells ( ) ) n = _countWhereGreaterEqualInRows ( self . internalDistalPermanences , cells , self . connectedPermanenceDistal ) for permanences in self . distalPermanences : n += _countWhereGreaterEqualInRows ( permanences , cells , self . connectedPermanenceDistal ) return n
Returns the number of connected distal synapses on these cells .
10,001
def _learn ( permanences , rng , activeCells , activeInput , growthCandidateInput , sampleSize , initialPermanence , permanenceIncrement , permanenceDecrement , connectedPermanence ) : permanences . incrementNonZerosOnOuter ( activeCells , activeInput , permanenceIncrement ) permanences . incrementNonZerosOnRowsExcludingCols ( activeCells , activeInput , - permanenceDecrement ) permanences . clipRowsBelowAndAbove ( activeCells , 0.0 , 1.0 ) if sampleSize == - 1 : permanences . setZerosOnOuter ( activeCells , activeInput , initialPermanence ) else : existingSynapseCounts = permanences . nNonZerosPerRowOnCols ( activeCells , activeInput ) maxNewByCell = numpy . empty ( len ( activeCells ) , dtype = "int32" ) numpy . subtract ( sampleSize , existingSynapseCounts , out = maxNewByCell ) permanences . setRandomZerosOnOuter ( activeCells , growthCandidateInput , maxNewByCell , initialPermanence , rng )
For each active cell reinforce active synapses punish inactive synapses and grow new synapses to a subset of the active input bits that the cell isn t already connected to .
10,002
def runExperiment ( n , w , threshold , cellsPerColumn , folder , numTrials = 5 , cleverTMSDRs = False ) : if not os . path . exists ( folder ) : try : os . makedirs ( folder ) except OSError : pass filename = "{}/n_{}_w_{}_threshold_{}_cellsPerColumn_{}.json" . format ( folder , n , w , threshold , cellsPerColumn ) if len ( glob . glob ( filename ) ) == 0 : print ( "Starting: {}/n_{}_w_{}_threshold_{}_cellsPerColumn_{}" . format ( folder , n , w , threshold , cellsPerColumn ) ) result = defaultdict ( list ) for _ in xrange ( numTrials ) : exp = PoolOfPairsLocation1DExperiment ( ** { "numMinicolumns" : n , "numActiveMinicolumns" : w , "poolingThreshold" : threshold , "cellsPerColumn" : cellsPerColumn , "minicolumnSDRs" : generateMinicolumnSDRs ( n = n , w = w , threshold = threshold ) , } ) if cleverTMSDRs : exp . trainWithSpecificPairSDRs ( carefullyCollideContexts ( numContexts = 25 , numCells = cellsPerColumn , numMinicolumns = n ) ) else : exp . train ( ) for unionSize in [ 1 , 3 , 5 , 7 , 9 , 11 , 13 , 15 , 17 , 19 , 21 , 23 , 25 ] : additionalSDRCounts = exp . testInferenceOnUnions ( unionSize ) result [ unionSize ] += additionalSDRCounts with open ( filename , "w" ) as fOut : json . dump ( sorted ( result . items ( ) , key = lambda x : x [ 0 ] ) , fOut ) print ( "Wrote:" , filename )
Run a PoolOfPairsLocation1DExperiment various union sizes .
10,003
def train ( self ) : for iDriving , cDriving in enumerate ( self . drivingOperandSDRs ) : minicolumnSDR = self . minicolumnSDRs [ iDriving ] self . pairLayerProximalConnections . associate ( minicolumnSDR , cDriving ) for iContext , cContext in enumerate ( self . contextOperandSDRs ) : iResult = ( iContext + iDriving ) % self . numLocations cResult = self . resultSDRs [ iResult ] self . pairLayer . compute ( minicolumnSDR , basalInput = cContext ) cPair = self . pairLayer . getWinnerCells ( ) self . poolingLayer . associate ( cResult , cPair )
Train the pair layer and pooling layer .
10,004
def run_noise_experiment ( num_neurons = 1 , a = 128 , dim = 6000 , test_noise_levels = range ( 15 , 100 , 5 ) , num_samples = 500 , num_dendrites = 500 , dendrite_length = 30 , theta = 8 , num_trials = 100 ) : nonlinearity = threshold_nonlinearity ( theta ) for noise in test_noise_levels : fps = [ ] fns = [ ] for trial in range ( num_trials ) : successful_initialization = False while not successful_initialization : neuron = Neuron ( size = dendrite_length * num_dendrites , num_dendrites = num_dendrites , dendrite_length = dendrite_length , dim = dim , nonlinearity = nonlinearity ) data = generate_evenly_distributed_data_sparse ( dim = dim , num_active = a , num_samples = num_samples ) labels = [ 1 for i in range ( num_samples ) ] neuron . HTM_style_initialize_on_data ( data , labels ) error , fp , fn = get_error ( data , labels , [ neuron ] ) print "Initialization error is {}, with {} false positives and {} false negatives" . format ( error , fp , fn ) if error == 0 : successful_initialization = True else : print "Repeating to get a successful initialization" apply_noise ( data , noise ) error , fp , fn = get_error ( data , labels , [ neuron ] ) fps . append ( fp ) fns . append ( fn ) print "Error at noise {} is {}, with {} false positives and {} false negatives" . format ( noise , error , fp , fn ) with open ( "noise_FN_{}.txt" . format ( theta ) , "a" ) as f : f . write ( str ( noise ) + ", " + str ( numpy . sum ( fns ) ) + ", " + str ( num_trials * num_samples ) + "\n" )
Tests the impact of noise on a neuron using an HTM approach to a P&M model of a neuron . Nonlinearity is a simple threshold at theta as in the original version of this experiment and each dendrite is bound by the initialization to a single pattern . Only one neuron is used unlike in the P&M classification experiment and a successful identification is simply defined as at least one dendrite having theta active synapses .
10,005
def reset ( self , params , repetition ) : pprint . pprint ( params ) self . initialize ( params , repetition ) dataDir = params . get ( 'dataDir' , 'data' ) self . transform_train = transforms . Compose ( [ transforms . RandomCrop ( 32 , padding = 4 ) , transforms . RandomHorizontalFlip ( ) , transforms . ToTensor ( ) , transforms . Normalize ( ( 0.4914 , 0.4822 , 0.4465 ) , ( 0.2023 , 0.1994 , 0.2010 ) ) , ] ) self . trainset = datasets . CIFAR10 ( root = dataDir , train = True , download = True , transform = self . transform_train ) self . createModel ( params , repetition ) print ( "Torch reports" , torch . cuda . device_count ( ) , "GPUs available" ) if torch . cuda . device_count ( ) > 1 : self . model = torch . nn . DataParallel ( self . model ) self . model . to ( self . device ) self . optimizer = self . createOptimizer ( self . model ) self . lr_scheduler = self . createLearningRateScheduler ( self . optimizer ) self . test_loaders = self . createTestLoaders ( self . noise_values )
Called at the beginning of each experiment and each repetition
10,006
def killCellRegion ( self , centerColumn , radius ) : self . deadCols = topology . wrappingNeighborhood ( centerColumn , radius , self . _columnDimensions ) self . deadColumnInputSpan = self . getConnectedSpan ( self . deadCols ) self . removeDeadColumns ( )
Kill cells around a centerColumn within radius
10,007
def compute ( self , inputVector , learn , activeArray ) : if not isinstance ( inputVector , numpy . ndarray ) : raise TypeError ( "Input vector must be a numpy array, not %s" % str ( type ( inputVector ) ) ) if inputVector . size != self . _numInputs : raise ValueError ( "Input vector dimensions don't match. Expecting %s but got %s" % ( inputVector . size , self . _numInputs ) ) self . _updateBookeepingVars ( learn ) inputVector = numpy . array ( inputVector , dtype = realDType ) inputVector . reshape ( - 1 ) self . _overlaps = self . _calculateOverlap ( inputVector ) if learn : self . _boostedOverlaps = self . _boostFactors * self . _overlaps else : self . _boostedOverlaps = self . _overlaps activeColumns = self . _inhibitColumns ( self . _boostedOverlaps ) if learn : self . _adaptSynapses ( inputVector , activeColumns ) self . _updateDutyCycles ( self . _overlaps , activeColumns ) self . _bumpUpWeakColumns ( ) self . _updateTargetActivityDensity ( ) self . _updateBoostFactors ( ) if self . _isUpdateRound ( ) : self . _updateInhibitionRadius ( ) self . _updateMinDutyCycles ( ) activeArray . fill ( 0 ) activeArray [ activeColumns ] = 1
This is the primary public method of the SpatialPooler class . This function takes a input vector and outputs the indices of the active columns . If learn is set to True this method also updates the permanences of the columns .
10,008
def getConstructorArguments ( ) : argspec = inspect . getargspec ( ColumnPooler . __init__ ) return argspec . args [ 1 : ] , argspec . defaults
Return constructor argument associated with ColumnPooler .
10,009
def initialize ( self ) : if self . _pooler is None : params = { "inputWidth" : self . inputWidth , "lateralInputWidths" : [ self . cellCount ] * self . numOtherCorticalColumns , "cellCount" : self . cellCount , "sdrSize" : self . sdrSize , "onlineLearning" : self . onlineLearning , "maxSdrSize" : self . maxSdrSize , "minSdrSize" : self . minSdrSize , "synPermProximalInc" : self . synPermProximalInc , "synPermProximalDec" : self . synPermProximalDec , "initialProximalPermanence" : self . initialProximalPermanence , "minThresholdProximal" : self . minThresholdProximal , "sampleSizeProximal" : self . sampleSizeProximal , "connectedPermanenceProximal" : self . connectedPermanenceProximal , "predictedInhibitionThreshold" : self . predictedInhibitionThreshold , "synPermDistalInc" : self . synPermDistalInc , "synPermDistalDec" : self . synPermDistalDec , "initialDistalPermanence" : self . initialDistalPermanence , "activationThresholdDistal" : self . activationThresholdDistal , "sampleSizeDistal" : self . sampleSizeDistal , "connectedPermanenceDistal" : self . connectedPermanenceDistal , "inertiaFactor" : self . inertiaFactor , "seed" : self . seed , } self . _pooler = ColumnPooler ( ** params )
Initialize the internal objects .
10,010
def compute ( self , inputs , outputs ) : if "resetIn" in inputs : assert len ( inputs [ "resetIn" ] ) == 1 if inputs [ "resetIn" ] [ 0 ] != 0 : self . reset ( ) outputs [ "feedForwardOutput" ] [ : ] = 0 outputs [ "activeCells" ] [ : ] = 0 return feedforwardInput = numpy . asarray ( inputs [ "feedforwardInput" ] . nonzero ( ) [ 0 ] , dtype = "uint32" ) if "feedforwardGrowthCandidates" in inputs : feedforwardGrowthCandidates = numpy . asarray ( inputs [ "feedforwardGrowthCandidates" ] . nonzero ( ) [ 0 ] , dtype = "uint32" ) else : feedforwardGrowthCandidates = feedforwardInput if "lateralInput" in inputs : lateralInputs = tuple ( numpy . asarray ( singleInput . nonzero ( ) [ 0 ] , dtype = "uint32" ) for singleInput in numpy . split ( inputs [ "lateralInput" ] , self . numOtherCorticalColumns ) ) else : lateralInputs = ( ) if "predictedInput" in inputs : predictedInput = numpy . asarray ( inputs [ "predictedInput" ] . nonzero ( ) [ 0 ] , dtype = "uint32" ) else : predictedInput = None self . _pooler . compute ( feedforwardInput , lateralInputs , feedforwardGrowthCandidates , learn = self . learningMode , predictedInput = predictedInput ) outputs [ "activeCells" ] [ : ] = 0 outputs [ "activeCells" ] [ self . _pooler . getActiveCells ( ) ] = 1 if self . defaultOutputType == "active" : outputs [ "feedForwardOutput" ] [ : ] = outputs [ "activeCells" ] else : raise Exception ( "Unknown outputType: " + self . defaultOutputType )
Run one iteration of compute .
10,011
def progress ( params , rep ) : name = params [ 'name' ] fullpath = os . path . join ( params [ 'path' ] , params [ 'name' ] ) logname = os . path . join ( fullpath , '%i.log' % rep ) if os . path . exists ( logname ) : logfile = open ( logname , 'r' ) lines = logfile . readlines ( ) logfile . close ( ) return int ( 100 * len ( lines ) / params [ 'iterations' ] ) else : return 0
Helper function to calculate the progress made on one experiment .
10,012
def convert_param_to_dirname ( param ) : if type ( param ) == types . StringType : return param else : return re . sub ( "0+$" , '0' , '%f' % param )
Helper function to convert a parameter value to a valid directory name .
10,013
def parse_opt ( self ) : optparser = optparse . OptionParser ( ) optparser . add_option ( '-c' , '--config' , action = 'store' , dest = 'config' , type = 'string' , default = 'experiments.cfg' , help = "your experiments config file" ) optparser . add_option ( '-n' , '--numcores' , action = 'store' , dest = 'ncores' , type = 'int' , default = cpu_count ( ) , help = "number of processes you want to use, default is %i" % cpu_count ( ) ) optparser . add_option ( '-d' , '--del' , action = 'store_true' , dest = 'delete' , default = False , help = "delete experiment folder if it exists" ) optparser . add_option ( '-e' , '--experiment' , action = 'append' , dest = 'experiments' , type = 'string' , help = "run only selected experiments, by default run all experiments in config file." ) optparser . add_option ( '-b' , '--browse' , action = 'store_true' , dest = 'browse' , default = False , help = "browse existing experiments." ) optparser . add_option ( '-B' , '--Browse' , action = 'store_true' , dest = 'browse_big' , default = False , help = "browse existing experiments, more verbose than -b" ) optparser . add_option ( '-p' , '--progress' , action = 'store_true' , dest = 'progress' , default = False , help = "like browse, but only shows name and progress bar" ) options , args = optparser . parse_args ( ) self . options = options return options , args
parses the command line options for different settings .
10,014
def parse_cfg ( self ) : self . cfgparser = ConfigParser ( ) if not self . cfgparser . read ( self . options . config ) : raise SystemExit ( 'config file %s not found.' % self . options . config ) projectDir = os . path . dirname ( self . options . config ) projectDir = os . path . abspath ( projectDir ) os . chdir ( projectDir )
parses the given config file for experiments .
10,015
def mkdir ( self , path ) : if not os . path . exists ( path ) : os . makedirs ( path )
create a directory if it does not exist .
10,016
def write_config_file ( self , params , path ) : cfgp = ConfigParser ( ) cfgp . add_section ( params [ 'name' ] ) for p in params : if p == 'name' : continue cfgp . set ( params [ 'name' ] , p , params [ p ] ) f = open ( os . path . join ( path , 'experiment.cfg' ) , 'w' ) cfgp . write ( f ) f . close ( )
write a config file for this single exp in the folder path .
10,017
def get_history ( self , exp , rep , tags ) : params = self . get_params ( exp ) if params == None : raise SystemExit ( 'experiment %s not found.' % exp ) if tags != 'all' and not hasattr ( tags , '__iter__' ) : tags = [ tags ] results = { } logfile = os . path . join ( exp , '%i.log' % rep ) try : f = open ( logfile ) except IOError : if len ( tags ) == 1 : return [ ] else : return { } for line in f : dic = json . loads ( line ) for tag in tags : if not tag in results : results [ tag ] = [ ] if tag in dic : results [ tag ] . append ( dic [ tag ] ) else : results [ tag ] . append ( None ) f . close ( ) if len ( results ) == 0 : if len ( tags ) == 1 : return [ ] else : return { } if len ( tags ) == 1 : return results [ results . keys ( ) [ 0 ] ] else : return results
returns the whole history for one experiment and one repetition . tags can be a string or a list of strings . if tags is a string the history is returned as list of values if tags is a list of strings or all history is returned as a dictionary of lists of values .
10,018
def create_dir ( self , params , delete = False ) : fullpath = os . path . join ( params [ 'path' ] , params [ 'name' ] ) self . mkdir ( fullpath ) if delete and os . path . exists ( fullpath ) : os . system ( 'rm %s/*' % fullpath ) self . write_config_file ( params , fullpath )
creates a subdirectory for the experiment and deletes existing files if the delete flag is true . then writes the current experiment . cfg file in the folder .
10,019
def start ( self ) : self . parse_opt ( ) self . parse_cfg ( ) if self . options . browse or self . options . browse_big or self . options . progress : self . browse ( ) raise SystemExit paramlist = [ ] for exp in self . cfgparser . sections ( ) : if not self . options . experiments or exp in self . options . experiments : params = self . items_to_params ( self . cfgparser . items ( exp ) ) params [ 'name' ] = exp paramlist . append ( params ) self . do_experiment ( paramlist )
starts the experiments as given in the config file .
10,020
def run_rep ( self , params , rep ) : try : name = params [ 'name' ] fullpath = os . path . join ( params [ 'path' ] , params [ 'name' ] ) logname = os . path . join ( fullpath , '%i.log' % rep ) restore = 0 if os . path . exists ( logname ) : logfile = open ( logname , 'r' ) lines = logfile . readlines ( ) logfile . close ( ) if 'iterations' in params and len ( lines ) == params [ 'iterations' ] : return False if not self . restore_supported : os . remove ( logname ) restore = 0 else : restore = len ( lines ) self . reset ( params , rep ) if restore : logfile = open ( logname , 'a' ) self . restore_state ( params , rep , restore ) else : logfile = open ( logname , 'w' ) for it in xrange ( restore , params [ 'iterations' ] ) : dic = self . iterate ( params , rep , it ) or { } dic [ 'iteration' ] = it if self . restore_supported : self . save_state ( params , rep , it ) if dic is not None : json . dump ( dic , logfile ) logfile . write ( '\n' ) logfile . flush ( ) logfile . close ( ) self . finalize ( params , rep ) except : import traceback traceback . print_exc ( ) raise
run a single repetition including directory creation log files etc .
10,021
def getClusterPrototypes ( self , numClusters , numPrototypes = 1 ) : linkage = self . getLinkageMatrix ( ) linkage [ : , 2 ] -= linkage [ : , 2 ] . min ( ) clusters = scipy . cluster . hierarchy . fcluster ( linkage , numClusters , criterion = "maxclust" ) prototypes = [ ] clusterSizes = [ ] for cluster_id in numpy . unique ( clusters ) : ids = numpy . arange ( len ( clusters ) ) [ clusters == cluster_id ] clusterSizes . append ( len ( ids ) ) if len ( ids ) > numPrototypes : cluster_prototypes = HierarchicalClustering . _getPrototypes ( ids , self . _overlaps , numPrototypes ) else : cluster_prototypes = numpy . ones ( numPrototypes ) * - 1 cluster_prototypes [ : len ( ids ) ] = ids prototypes . append ( cluster_prototypes ) return numpy . vstack ( prototypes ) . astype ( int ) , numpy . array ( clusterSizes )
Create numClusters flat clusters and find approximately numPrototypes prototypes per flat cluster . Returns an array with each row containing the indices of the prototypes for a single flat cluster .
10,022
def _getPrototypes ( indices , overlaps , topNumber = 1 ) : n = numpy . roots ( [ 1 , - 1 , - 2 * len ( overlaps ) ] ) . max ( ) k = len ( indices ) indices = numpy . array ( indices , dtype = int ) rowIdxs = numpy . ndarray ( ( k , k - 1 ) , dtype = int ) colIdxs = numpy . ndarray ( ( k , k - 1 ) , dtype = int ) for i in xrange ( k ) : rowIdxs [ i , : ] = indices [ i ] colIdxs [ i , : i ] = indices [ : i ] colIdxs [ i , i : ] = indices [ i + 1 : ] idx = HierarchicalClustering . _condensedIndex ( rowIdxs , colIdxs , n ) subsampledOverlaps = overlaps [ idx ] meanSubsampledOverlaps = subsampledOverlaps . mean ( 1 ) biggestOverlapSubsetIdxs = numpy . argsort ( - meanSubsampledOverlaps ) [ : topNumber ] return indices [ biggestOverlapSubsetIdxs ]
Given a compressed overlap array and a set of indices specifying a subset of those in that array return the set of topNumber indices of vectors that have maximum average overlap with other vectors in indices .
10,023
def analyzeParameters ( expName , suite ) : print ( "\n================" , expName , "=====================" ) try : expParams = suite . get_params ( expName ) pprint . pprint ( expParams ) for p in [ "boost_strength" , "k" , "learning_rate" , "weight_sparsity" , "k_inference_factor" , "boost_strength_factor" , "c1_out_channels" , "c1_k" , "learning_rate_factor" , "batches_in_epoch" , ] : if p in expParams and type ( expParams [ p ] ) == list : print ( "\n" , p ) for v1 in expParams [ p ] : values , params = suite . get_values_fix_params ( expName , 0 , "testerror" , "last" , ** { p : v1 } ) v = np . array ( values ) try : print ( "Average/min/max for" , p , v1 , "=" , v . mean ( ) , v . min ( ) , v . max ( ) ) except : print ( "Can't compute stats for" , p ) except : print ( "Couldn't load experiment" , expName )
Analyze the impact of each list parameter in this experiment
10,024
def summarizeResults ( expName , suite ) : print ( "\n================" , expName , "=====================" ) try : values , params = suite . get_values_fix_params ( expName , 0 , "totalCorrect" , "last" ) v = np . array ( values ) sortedIndices = v . argsort ( ) for i in sortedIndices [ : : - 1 ] : print ( v [ i ] , params [ i ] [ "name" ] ) print ( ) except : print ( "Couldn't analyze experiment" , expName ) try : values , params = suite . get_values_fix_params ( expName , 0 , "testerror" , "last" ) v = np . array ( values ) sortedIndices = v . argsort ( ) for i in sortedIndices [ : : - 1 ] : print ( v [ i ] , params [ i ] [ "name" ] ) print ( ) except : print ( "Couldn't analyze experiment" , expName )
Summarize the totalCorrect value from the last iteration for each experiment in the directory tree .
10,025
def learningCurve ( expPath , suite ) : print ( "\nLEARNING CURVE ================" , expPath , "=====================" ) try : headers = [ "testerror" , "totalCorrect" , "elapsedTime" , "entropy" ] result = suite . get_value ( expPath , 0 , headers , "all" ) info = [ ] for i , v in enumerate ( zip ( result [ "testerror" ] , result [ "totalCorrect" ] , result [ "elapsedTime" ] , result [ "entropy" ] ) ) : info . append ( [ i , v [ 0 ] , v [ 1 ] , int ( v [ 2 ] ) , v [ 3 ] ] ) headers . insert ( 0 , "iteration" ) print ( tabulate ( info , headers = headers , tablefmt = "grid" ) ) except : print ( "Couldn't load experiment" , expPath )
Print the test and overall noise errors from each iteration of this experiment
10,026
def compute ( self , inputs , outputs ) : if len ( self . queue ) > 0 : data = self . queue . pop ( ) else : raise Exception ( "CoordinateSensor: No data to encode: queue is empty" ) outputs [ "resetOut" ] [ 0 ] = data [ "reset" ] outputs [ "sequenceIdOut" ] [ 0 ] = data [ "sequenceId" ] sdr = self . encoder . encode ( ( numpy . array ( data [ "coordinate" ] ) , self . radius ) ) outputs [ "dataOut" ] [ : ] = sdr if self . verbosity > 1 : print "CoordinateSensor outputs:" print "Coordinate = " , data [ "coordinate" ] print "sequenceIdOut: " , outputs [ "sequenceIdOut" ] print "resetOut: " , outputs [ "resetOut" ] print "dataOut: " , outputs [ "dataOut" ] . nonzero ( ) [ 0 ]
Get the next record from the queue and encode it .
10,027
def printDiagnostics ( exp , sequences , objects , args , verbosity = 0 ) : print "Experiment start time:" , time . ctime ( ) print "\nExperiment arguments:" pprint . pprint ( args ) r = sequences . objectConfusion ( ) print "Average common pairs in sequences=" , r [ 0 ] , print ", features=" , r [ 2 ] r = objects . objectConfusion ( ) print "Average common pairs in objects=" , r [ 0 ] , print ", locations=" , r [ 1 ] , print ", features=" , r [ 2 ] if verbosity > 0 : print "\nObjects are:" for o in objects : pairs = objects [ o ] pairs . sort ( ) print str ( o ) + ": " + str ( pairs ) print "\nSequences:" for i in sequences : print i , sequences [ i ] print "\nNetwork parameters:" pprint . pprint ( exp . config )
Useful diagnostics for debugging .
10,028
def createArgs ( ** kwargs ) : if len ( kwargs ) == 0 : return [ { } ] kargs = deepcopy ( kwargs ) k1 = kargs . keys ( ) [ 0 ] values = kargs . pop ( k1 ) args = [ ] otherArgs = createArgs ( ** kargs ) for v in values : newArgs = deepcopy ( otherArgs ) arg = { k1 : v } for newArg in newArgs : newArg . update ( arg ) args . append ( newArg ) return args
Each kwarg is a list . Return a list of dicts representing all possible combinations of the kwargs .
10,029
def randomizeSequence ( sequence , symbolsPerSequence , numColumns , sparsity , p = 0.25 ) : randomizedSequence = [ ] sparseCols = int ( numColumns * sparsity ) numSymbolsToChange = int ( symbolsPerSequence * p ) symIndices = np . random . permutation ( np . arange ( symbolsPerSequence ) ) for symbol in range ( symbolsPerSequence ) : randomizedSequence . append ( sequence [ symbol ] ) i = 0 while numSymbolsToChange > 0 : randomizedSequence [ symIndices [ i ] ] = generateRandomSymbol ( numColumns , sparseCols ) i += 1 numSymbolsToChange -= 1 return randomizedSequence
Takes a sequence as input and randomizes a percentage p of it by choosing SDRs at random while preserving the remaining invariant .
10,030
def generateHOSequence ( sequence , symbolsPerSequence , numColumns , sparsity ) : sequenceHO = [ ] sparseCols = int ( numColumns * sparsity ) for symbol in range ( symbolsPerSequence ) : if symbol == 0 or symbol == ( symbolsPerSequence - 1 ) : sequenceHO . append ( generateRandomSymbol ( numColumns , sparseCols ) ) else : sequenceHO . append ( sequence [ symbol ] ) return sequenceHO
Generates a high - order sequence by taking an initial sequence and the changing its first and last SDRs by random SDRs
10,031
def percentOverlap ( x1 , x2 , numColumns ) : nonZeroX1 = np . count_nonzero ( x1 ) nonZeroX2 = np . count_nonzero ( x2 ) sparseCols = min ( nonZeroX1 , nonZeroX2 ) binX1 = np . zeros ( numColumns , dtype = "uint32" ) binX2 = np . zeros ( numColumns , dtype = "uint32" ) for i in range ( sparseCols ) : binX1 [ x1 [ i ] ] = 1 binX2 [ x2 [ i ] ] = 1 return float ( np . dot ( binX1 , binX2 ) ) / float ( sparseCols )
Calculates the percentage of overlap between two SDRs
10,032
def generateRandomSymbol ( numColumns , sparseCols ) : symbol = list ( ) remainingCols = sparseCols while remainingCols > 0 : col = random . randrange ( numColumns ) if col not in symbol : symbol . append ( col ) remainingCols -= 1 return symbol
Generates a random SDR with sparseCols number of active columns
10,033
def generateRandomSequence ( numSymbols , numColumns , sparsity ) : sequence = [ ] sparseCols = int ( numColumns * sparsity ) for _ in range ( numSymbols ) : sequence . append ( generateRandomSymbol ( numColumns , sparseCols ) ) return sequence
Generate a random sequence comprising numSymbols SDRs
10,034
def accuracy ( current , predicted ) : acc = 0 if np . count_nonzero ( predicted ) > 0 : acc = float ( np . dot ( current , predicted ) ) / float ( np . count_nonzero ( predicted ) ) return acc
Computes the accuracy of the TM at time - step t based on the prediction at time - step t - 1 and the current active columns at time - step t .
10,035
def sampleCellsWithinColumns ( numCellPairs , cellsPerColumn , numColumns , seed = 42 ) : np . random . seed ( seed ) cellPairs = [ ] for i in range ( numCellPairs ) : randCol = np . random . randint ( numColumns ) randCells = np . random . choice ( np . arange ( cellsPerColumn ) , ( 2 , ) , replace = False ) cellsPair = randCol * cellsPerColumn + randCells cellPairs . append ( cellsPair ) return cellPairs
Generate indices of cell pairs each pair of cells are from the same column
10,036
def sampleCellsAcrossColumns ( numCellPairs , cellsPerColumn , numColumns , seed = 42 ) : np . random . seed ( seed ) cellPairs = [ ] for i in range ( numCellPairs ) : randCols = np . random . choice ( np . arange ( numColumns ) , ( 2 , ) , replace = False ) randCells = np . random . choice ( np . arange ( cellsPerColumn ) , ( 2 , ) , replace = False ) cellsPair = np . zeros ( ( 2 , ) ) for j in range ( 2 ) : cellsPair [ j ] = randCols [ j ] * cellsPerColumn + randCells [ j ] cellPairs . append ( cellsPair . astype ( 'int32' ) ) return cellPairs
Generate indices of cell pairs each pair of cells are from different column
10,037
def subSample ( spikeTrains , numCells , totalCells , currentTS , timeWindow ) : indices = np . random . permutation ( np . arange ( totalCells ) ) if currentTS > 0 and currentTS < timeWindow : subSpikeTrains = np . zeros ( ( numCells , currentTS ) , dtype = "uint32" ) for i in range ( numCells ) : subSpikeTrains [ i , : ] = spikeTrains [ indices [ i ] , : ] elif currentTS > 0 and currentTS >= timeWindow : subSpikeTrains = np . zeros ( ( numCells , timeWindow ) , dtype = "uint32" ) for i in range ( numCells ) : subSpikeTrains [ i , : ] = spikeTrains [ indices [ i ] , ( currentTS - timeWindow ) : currentTS ] elif currentTS == 0 : totalTS = np . shape ( spikeTrains ) [ 1 ] subSpikeTrains = np . zeros ( ( numCells , totalTS ) , dtype = "uint32" ) for i in range ( numCells ) : subSpikeTrains [ i , : ] = spikeTrains [ indices [ i ] , : ] elif currentTS < 0 : totalTS = np . shape ( spikeTrains ) [ 1 ] subSpikeTrains = np . zeros ( ( numCells , timeWindow ) , dtype = "uint32" ) rnd = random . randrange ( totalTS - timeWindow ) print "Starting from timestep: " + str ( rnd ) for i in range ( numCells ) : subSpikeTrains [ i , : ] = spikeTrains [ indices [ i ] , rnd : ( rnd + timeWindow ) ] return subSpikeTrains
Obtains a random sample of cells from the whole spike train matrix consisting of numCells cells from the start of simulation time up to currentTS
10,038
def subSampleWholeColumn ( spikeTrains , colIndices , cellsPerColumn , currentTS , timeWindow ) : numColumns = np . shape ( colIndices ) [ 0 ] numCells = numColumns * cellsPerColumn if currentTS > 0 and currentTS < timeWindow : subSpikeTrains = np . zeros ( ( numCells , currentTS ) , dtype = "uint32" ) for i in range ( numColumns ) : currentCol = colIndices [ i ] initialCell = cellsPerColumn * currentCol for j in range ( cellsPerColumn ) : subSpikeTrains [ ( cellsPerColumn * i ) + j , : ] = spikeTrains [ initialCell + j , : ] elif currentTS > 0 and currentTS >= timeWindow : subSpikeTrains = np . zeros ( ( numCells , timeWindow ) , dtype = "uint32" ) for i in range ( numColumns ) : currentCol = colIndices [ i ] initialCell = cellsPerColumn * currentCol for j in range ( cellsPerColumn ) : subSpikeTrains [ ( cellsPerColumn * i ) + j , : ] = spikeTrains [ initialCell + j , ( currentTS - timeWindow ) : currentTS ] elif currentTS == 0 : totalTS = np . shape ( spikeTrains ) [ 1 ] subSpikeTrains = np . zeros ( ( numCells , totalTS ) , dtype = "uint32" ) for i in range ( numColumns ) : currentCol = colIndices [ i ] initialCell = cellsPerColumn * currentCol for j in range ( cellsPerColumn ) : subSpikeTrains [ ( cellsPerColumn * i ) + j , : ] = spikeTrains [ initialCell + j , : ] elif currentTS < 0 : totalTS = np . shape ( spikeTrains ) [ 1 ] subSpikeTrains = np . zeros ( ( numCells , timeWindow ) , dtype = "uint32" ) rnd = random . randrange ( totalTS - timeWindow ) print "Starting from timestep: " + str ( rnd ) for i in range ( numColumns ) : currentCol = colIndices [ i ] initialCell = cellsPerColumn * currentCol for j in range ( cellsPerColumn ) : subSpikeTrains [ ( cellsPerColumn * i ) + j , : ] = spikeTrains [ initialCell + j , rnd : ( rnd + timeWindow ) ] return subSpikeTrains
Obtains subsample from matrix of spike trains by considering the cells in columns specified by colIndices . Thus it returns a matrix of spike trains of cells within the same column .
10,039
def computeEntropy ( spikeTrains ) : MIN_ACTIVATION_PROB = 0.000001 activationProb = np . mean ( spikeTrains , 1 ) activationProb [ activationProb < MIN_ACTIVATION_PROB ] = MIN_ACTIVATION_PROB activationProb = activationProb / np . sum ( activationProb ) entropy = - np . dot ( activationProb , np . log2 ( activationProb ) ) return entropy
Estimates entropy in spike trains .
10,040
def computeISI ( spikeTrains ) : zeroCount = 0 isi = [ ] cells = 0 for i in range ( np . shape ( spikeTrains ) [ 0 ] ) : if cells > 0 and cells % 250 == 0 : print str ( cells ) + " cells processed" for j in range ( np . shape ( spikeTrains ) [ 1 ] ) : if spikeTrains [ i ] [ j ] == 0 : zeroCount += 1 elif zeroCount > 0 : isi . append ( zeroCount ) zeroCount = 0 zeroCount = 0 cells += 1 print "**All cells processed**" return isi
Estimates the inter - spike interval from a spike train matrix .
10,041
def poissonSpikeGenerator ( firingRate , nBins , nTrials ) : dt = 0.001 poissonSpikeTrain = np . zeros ( ( nTrials , nBins ) , dtype = "uint32" ) for i in range ( nTrials ) : for j in range ( int ( nBins ) ) : if random . random ( ) < firingRate * dt : poissonSpikeTrain [ i , j ] = 1 return poissonSpikeTrain
Generates a Poisson spike train .
10,042
def raster ( event_times_list , color = 'k' ) : ax = plt . gca ( ) for ith , trial in enumerate ( event_times_list ) : plt . vlines ( trial , ith + .5 , ith + 1.5 , color = color ) plt . ylim ( .5 , len ( event_times_list ) + .5 ) return ax
Creates a raster from spike trains .
10,043
def rasterPlot ( spikeTrain , model ) : nTrials = np . shape ( spikeTrain ) [ 0 ] spikes = [ ] for i in range ( nTrials ) : spikes . append ( spikeTrain [ i ] . nonzero ( ) [ 0 ] . tolist ( ) ) plt . figure ( ) ax = raster ( spikes ) plt . xlabel ( 'Time' ) plt . ylabel ( 'Neuron' ) plt . savefig ( "raster" + str ( model ) ) plt . close ( )
Plots raster and saves figure in working directory
10,044
def saveTM ( tm ) : proto1 = TemporalMemoryProto_capnp . TemporalMemoryProto . new_message ( ) tm . write ( proto1 ) with open ( 'tm.nta' , 'wb' ) as f : proto1 . write ( f )
Saves the temporal memory and the sequences generated for its training .
10,045
def mapLabelRefs ( dataDict ) : labelRefs = [ label for label in set ( itertools . chain . from_iterable ( [ x [ 1 ] for x in dataDict . values ( ) ] ) ) ] for recordNumber , data in dataDict . iteritems ( ) : dataDict [ recordNumber ] = ( data [ 0 ] , numpy . array ( [ labelRefs . index ( label ) for label in data [ 1 ] ] ) , data [ 2 ] ) return labelRefs , dataDict
Replace the label strings in dataDict with corresponding ints .
10,046
def bucketCSVs ( csvFile , bucketIdx = 2 ) : try : with open ( csvFile , "rU" ) as f : reader = csv . reader ( f ) headers = next ( reader , None ) dataDict = OrderedDict ( ) for lineNumber , line in enumerate ( reader ) : if line [ bucketIdx ] in dataDict : dataDict [ line [ bucketIdx ] ] . append ( line ) else : dataDict [ line [ bucketIdx ] ] = [ line ] except IOError as e : print e filePaths = [ ] for i , ( _ , lines ) in enumerate ( dataDict . iteritems ( ) ) : bucketFile = csvFile . replace ( "." , "_" + str ( i ) + "." ) writeCSV ( lines , headers , bucketFile ) filePaths . append ( bucketFile ) return filePaths
Write the individual buckets in csvFile to their own CSV files .
10,047
def readDir ( dirPath , numLabels , modify = False ) : samplesDict = defaultdict ( list ) for _ , _ , files in os . walk ( dirPath ) : for f in files : basename , extension = os . path . splitext ( os . path . basename ( f ) ) if "." in basename and extension == ".csv" : category = basename . split ( "." ) [ - 1 ] if modify : category = category . replace ( "0" , "/" ) category = category . replace ( "_" , " " ) samplesDict [ category ] = readCSV ( os . path . join ( dirPath , f ) , numLabels = numLabels ) return samplesDict
Reads in data from a directory of CSV files ; assumes the directory only contains CSV files .
10,048
def writeCSV ( data , headers , csvFile ) : with open ( csvFile , "wb" ) as f : writer = csv . writer ( f , delimiter = "," ) writer . writerow ( headers ) writer . writerows ( data )
Write data with column headers to a CSV .
10,049
def writeFromDict ( dataDict , headers , csvFile ) : with open ( csvFile , "wb" ) as f : writer = csv . writer ( f , delimiter = "," ) writer . writerow ( headers ) for row in sorted ( dataDict . keys ( ) ) : writer . writerow ( dataDict [ row ] )
Write dictionary to a CSV where keys are row numbers and values are a list .
10,050
def readDataAndReshuffle ( args , categoriesInOrderOfInterest = None ) : dataDict = readCSV ( args . dataPath , 1 ) labelRefs , dataDict = mapLabelRefs ( dataDict ) if "numLabels" in args : numLabels = args . numLabels else : numLabels = len ( labelRefs ) if categoriesInOrderOfInterest is None : categoriesInOrderOfInterest = range ( 0 , numLabels ) else : categoriesInOrderOfInterest = categoriesInOrderOfInterest [ 0 : numLabels ] dataSet = [ ] documentTextMap = { } counts = numpy . zeros ( len ( labelRefs ) ) for document in dataDict . itervalues ( ) : try : docId = int ( document [ 2 ] ) except : raise RuntimeError ( "docId " + str ( docId ) + " is not an integer" ) oldCategoryIndex = document [ 1 ] [ 0 ] documentTextMap [ docId ] = document [ 0 ] if oldCategoryIndex in categoriesInOrderOfInterest : newIndex = categoriesInOrderOfInterest . index ( oldCategoryIndex ) dataSet . append ( [ document [ 0 ] , [ newIndex ] , docId ] ) counts [ newIndex ] += 1 documentCategoryMap = { } for doc in dataDict . iteritems ( ) : docId = int ( doc [ 1 ] [ 2 ] ) oldCategoryIndex = doc [ 1 ] [ 1 ] [ 0 ] if oldCategoryIndex in categoriesInOrderOfInterest : newIndex = categoriesInOrderOfInterest . index ( oldCategoryIndex ) v = documentCategoryMap . get ( docId , [ ] ) v . append ( newIndex ) documentCategoryMap [ docId ] = v labelRefs = [ labelRefs [ i ] for i in categoriesInOrderOfInterest ] print "Total number of unique documents" , len ( documentCategoryMap ) print "Category counts: " , counts print "Categories in training/test data:" , labelRefs return dataSet , labelRefs , documentCategoryMap , documentTextMap
Read data file specified in args optionally reshuffle categories print out some statistics and return various data structures . This routine is pretty specific and only used in some simple test scripts .
10,051
def createModel ( modelName , ** kwargs ) : if modelName not in TemporalMemoryTypes . getTypes ( ) : raise RuntimeError ( "Unknown model type: " + modelName ) return getattr ( TemporalMemoryTypes , modelName ) ( ** kwargs )
Return a classification model of the appropriate type . The model could be any supported subclass of ClassficationModel based on modelName .
10,052
def getConstructorArguments ( modelName ) : if modelName not in TemporalMemoryTypes . getTypes ( ) : raise RuntimeError ( "Unknown model type: " + modelName ) argspec = inspect . getargspec ( getattr ( TemporalMemoryTypes , modelName ) . __init__ ) return ( argspec . args [ 1 : ] , argspec . defaults )
Return constructor arguments and associated default values for the given model type .
10,053
def getTypes ( cls ) : for attrName in dir ( cls ) : attrValue = getattr ( cls , attrName ) if ( isinstance ( attrValue , type ) ) : yield attrName
Get sequence of acceptable model types . Iterates through class attributes and separates the user - defined enumerations from the default attributes implicit to Python classes . i . e . this function returns the names of the attributes explicitly defined above .
10,054
def initialize_dendrites ( self ) : self . dendrites = SM32 ( ) self . dendrites . reshape ( self . dim , self . num_dendrites ) for row in range ( self . num_dendrites ) : synapses = numpy . random . choice ( self . dim , self . dendrite_length , replace = False ) for synapse in synapses : self . dendrites [ synapse , row ] = 1
Initialize all the dendrites of the neuron to a set of random connections
10,055
def calculate_activation ( self , datapoint ) : activations = datapoint * self . dendrites activations = self . nonlinearity ( activations ) return activations . sum ( )
Only for a single datapoint
10,056
def HTM_style_initialize_on_data ( self , data , labels ) : current_dendrite = 0 self . dendrites = SM32 ( ) self . dendrites . reshape ( self . dim , self . num_dendrites ) data = copy . deepcopy ( data ) data . deleteRows ( [ i for i , v in enumerate ( labels ) if v != 1 ] ) if data . nRows ( ) > self . num_dendrites : print "Neuron using clustering to initialize dendrites" data = ( data . toDense ( ) ) model = KMeans ( n_clusters = self . num_dendrites , n_jobs = 1 ) clusters = model . fit_predict ( data ) multisets = [ [ Counter ( ) , [ ] ] for i in range ( self . num_dendrites ) ] sparse_data = [ [ i for i , d in enumerate ( datapoint ) if d == 1 ] for datapoint in data ] for datapoint , cluster in zip ( sparse_data , clusters ) : multisets [ cluster ] [ 0 ] = multisets [ cluster ] [ 0 ] + Counter ( datapoint ) multisets [ cluster ] [ 1 ] . append ( set ( datapoint ) ) for i , multiset in enumerate ( multisets ) : shared_elements = set ( map ( lambda x : x [ 0 ] , filter ( lambda x : x [ 1 ] > 1 , multiset [ 0 ] . most_common ( self . dendrite_length ) ) ) ) dendrite_connections = shared_elements while len ( shared_elements ) < self . dendrite_length : most_distant_point = multiset [ 1 ] [ numpy . argmin ( [ len ( dendrite_connections . intersection ( point ) ) for point in multiset [ 1 ] ] ) ] new_connection = random . sample ( most_distant_point - dendrite_connections , 1 ) [ 0 ] dendrite_connections . add ( new_connection ) for synapse in dendrite_connections : self . dendrites [ synapse , current_dendrite ] = 1. current_dendrite += 1 else : for i in range ( data . nRows ( ) ) : ones = data . rowNonZeros ( i ) [ 0 ] dendrite_connections = numpy . random . choice ( ones , size = self . dendrite_length , replace = False ) for synapse in dendrite_connections : self . dendrites [ synapse , current_dendrite ] = 1. current_dendrite += 1 self . initialize_permanences ( )
Uses a style of initialization inspired by the temporal memory . When a new positive example is found a dendrite is chosen and a number of synapses are created to the example .
10,057
def HTM_style_train_on_datapoint ( self , datapoint , label ) : activations = datapoint * self . dendrites self . nonlinearity ( activations ) activation = numpy . sign ( activations . sum ( ) ) if label >= 1 and activation >= 0.5 : strongest_branch = activations . rowMax ( 0 ) [ 0 ] datapoint . transpose ( ) inc_vector = self . dendrites . getSlice ( 0 , self . dim , strongest_branch , strongest_branch + 1 ) * self . permanence_increment inc_vector . elementNZMultiply ( datapoint ) dec_vector = self . dendrites . getSlice ( 0 , self . dim , strongest_branch , strongest_branch + 1 ) * self . permanence_decrement dec_vector . elementNZMultiply ( 1 - datapoint ) self . permanences . setSlice ( 0 , strongest_branch , self . permanences . getSlice ( 0 , self . dim , strongest_branch , strongest_branch + 1 ) + inc_vector - dec_vector ) positions , scores = self . permanences . colNonZeros ( strongest_branch ) [ 0 ] , self . permanences . colNonZeros ( strongest_branch ) [ 1 ] for position , score in zip ( positions , scores ) : if score < self . permanence_threshold : self . dendrites [ position , strongest_branch ] = 0 self . permanences [ position , strongest_branch ] = 0 new_connection = random . sample ( set ( datapoint . colNonZeros ( 0 ) [ 0 ] ) - set ( self . dendrites . colNonZeros ( strongest_branch ) [ 0 ] ) , 1 ) [ 0 ] self . dendrites [ new_connection , strongest_branch ] = 1. self . permanences [ new_connection , strongest_branch ] = self . initial_permanence elif label < 1 and activation >= 0.5 : strongest_branch = activations . rowMax ( 0 ) [ 0 ] dec_vector = self . dendrites . getSlice ( 0 , self . dim , strongest_branch , strongest_branch + 1 ) * self . permanence_decrement datapoint . transpose ( ) dec_vector . elementNZMultiply ( datapoint ) self . permanences . setSlice ( 0 , strongest_branch , self . permanences . getSlice ( 0 , self . dim , strongest_branch , strongest_branch + 1 ) - dec_vector ) elif label >= 1 and activation < 0.5 : weakest_branch = numpy . argmin ( self . permanences . colSums ( ) ) if numpy . median ( self . permanences . getCol ( weakest_branch ) ) < self . permanence_threshold : self . permanences . setColToZero ( weakest_branch ) self . dendrites . setColToZero ( weakest_branch ) ones = datapoint . rowNonZeros ( 0 ) [ 0 ] dendrite_connections = numpy . random . choice ( ones , size = self . dendrite_length , replace = False ) for synapse in dendrite_connections : self . dendrites [ synapse , weakest_branch ] = 1. self . permanences [ synapse , weakest_branch ] = self . initial_permanence
Run a version of permanence - based training on a datapoint . Due to the fixed dendrite count and dendrite length we are forced to more efficiently use each synapse deleting synapses and resetting them if they are not found useful .
10,058
def initialize ( self ) : autoArgs = { name : getattr ( self , name ) for name in self . _poolerArgNames } autoArgs [ "inputDimensions" ] = [ self . _inputWidth ] autoArgs [ "columnDimensions" ] = [ self . _columnCount ] autoArgs [ "potentialRadius" ] = self . _inputWidth autoArgs [ "historyLength" ] = self . _historyLength autoArgs [ "minHistory" ] = self . _minHistory self . _pooler = self . _poolerClass ( ** autoArgs )
Initialize the self . _poolerClass
10,059
def compute ( self , inputs , outputs ) : resetSignal = False if 'resetIn' in inputs : if len ( inputs [ 'resetIn' ] ) != 1 : raise Exception ( "resetIn has invalid length" ) if inputs [ 'resetIn' ] [ 0 ] != 0 : resetSignal = True outputs [ "mostActiveCells" ] [ : ] = numpy . zeros ( self . _columnCount , dtype = GetNTAReal ( ) ) if self . _poolerType == "simpleUnion" : self . _pooler . unionIntoArray ( inputs [ "activeCells" ] , outputs [ "mostActiveCells" ] , forceOutput = resetSignal ) else : predictedActiveCells = inputs [ "predictedActiveCells" ] if ( "predictedActiveCells" in inputs ) else numpy . zeros ( self . _inputWidth , dtype = uintDType ) mostActiveCellsIndices = self . _pooler . compute ( inputs [ "activeCells" ] , predictedActiveCells , self . learningMode ) outputs [ "mostActiveCells" ] [ mostActiveCellsIndices ] = 1 if resetSignal : self . reset ( )
Run one iteration of TemporalPoolerRegion s compute .
10,060
def getSpec ( cls ) : spec = cls . getBaseSpec ( ) p , o = _getAdditionalSpecs ( ) spec [ "parameters" ] . update ( p ) spec [ "parameters" ] . update ( o ) return spec
Return the Spec for TemporalPoolerRegion .
10,061
def computeCapacity ( results , threshold ) : closestBelow = None closestAbove = None for numObjects , accuracy in sorted ( results ) : if accuracy >= threshold : if closestAbove is None or closestAbove [ 0 ] < numObjects : closestAbove = ( numObjects , accuracy ) closestBelow = None else : if closestBelow is None : closestBelow = ( numObjects , accuracy ) if closestBelow is None or closestAbove is None : print closestBelow , threshold , closestAbove raise ValueError ( "Results must include a value above and below threshold of {}" . format ( threshold ) ) print " Capacity threshold is between {} and {}" . format ( closestAbove [ 0 ] , closestBelow [ 0 ] ) return closestAbove [ 0 ]
Returns largest number of objects with accuracy above threshold .
10,062
def reset ( self ) : self . activeCells = np . empty ( 0 , dtype = "uint32" ) self . activeDeltaSegments = np . empty ( 0 , dtype = "uint32" ) self . activeFeatureLocationSegments = np . empty ( 0 , dtype = "uint32" )
Deactivate all cells .
10,063
def compute ( self , deltaLocation = ( ) , newLocation = ( ) , featureLocationInput = ( ) , featureLocationGrowthCandidates = ( ) , learn = True ) : prevActiveCells = self . activeCells self . activeDeltaSegments = np . where ( ( self . internalConnections . computeActivity ( prevActiveCells , self . connectedPermanence ) >= self . activationThreshold ) & ( self . deltaConnections . computeActivity ( deltaLocation , self . connectedPermanence ) >= self . activationThreshold ) ) [ 0 ] if len ( deltaLocation ) == 0 : self . activeFeatureLocationSegments = np . where ( self . featureLocationConnections . computeActivity ( featureLocationInput , self . connectedPermanence ) >= self . activationThreshold ) [ 0 ] else : self . activeFeatureLocationSegments = np . empty ( 0 , dtype = "uint32" ) if len ( newLocation ) > 0 : self . activeCells = newLocation if learn : self . _learnTransition ( prevActiveCells , deltaLocation , newLocation ) self . _learnFeatureLocationPair ( newLocation , featureLocationInput , featureLocationGrowthCandidates ) elif len ( prevActiveCells ) > 0 : if len ( deltaLocation ) > 0 : cellsForDeltaSegments = self . internalConnections . mapSegmentsToCells ( self . activeDeltaSegments ) self . activeCells = np . unique ( cellsForDeltaSegments ) else : if len ( self . activeFeatureLocationSegments ) > 0 : cellsForFeatureLocationSegments = ( self . featureLocationConnections . mapSegmentsToCells ( self . activeFeatureLocationSegments ) ) self . activeCells = np . intersect1d ( prevActiveCells , cellsForFeatureLocationSegments ) else : self . activeCells = prevActiveCells elif len ( featureLocationInput ) > 0 : cellsForFeatureLocationSegments = ( self . featureLocationConnections . mapSegmentsToCells ( self . activeFeatureLocationSegments ) ) self . activeCells = np . unique ( cellsForFeatureLocationSegments )
Run one time step of the Location Memory algorithm .
10,064
def initialize ( self ) : if self . _modules is None : self . _modules = [ ] for i in xrange ( self . moduleCount ) : self . _modules . append ( ThresholdedGaussian2DLocationModule ( cellsPerAxis = self . cellsPerAxis , scale = self . scale [ i ] , orientation = self . orientation [ i ] , anchorInputSize = self . anchorInputSize , activeFiringRate = self . activeFiringRate , bumpSigma = self . bumpSigma , activationThreshold = self . activationThreshold , initialPermanence = self . initialPermanence , connectedPermanence = self . connectedPermanence , learningThreshold = self . learningThreshold , sampleSize = self . sampleSize , permanenceIncrement = self . permanenceIncrement , permanenceDecrement = self . permanenceDecrement , maxSynapsesPerSegment = self . maxSynapsesPerSegment , bumpOverlapMethod = self . bumpOverlapMethod , seed = self . seed ) ) if self . dimensions > 2 : self . _projection = [ self . createProjectionMatrix ( dimensions = self . dimensions ) for _ in xrange ( self . moduleCount ) ]
Initialize grid cell modules
10,065
def compute ( self , inputs , outputs ) : if inputs . get ( "resetIn" , False ) : self . reset ( ) if self . learningMode : self . activateRandomLocation ( ) outputs [ "activeCells" ] [ : ] = 0 outputs [ "learnableCells" ] [ : ] = 0 outputs [ "sensoryAssociatedCells" ] [ : ] = 0 return displacement = inputs . get ( "displacement" , np . array ( [ ] ) ) anchorInput = inputs . get ( "anchorInput" , np . array ( [ ] ) ) . nonzero ( ) [ 0 ] anchorGrowthCandidates = inputs . get ( "anchorGrowthCandidates" , np . array ( [ ] ) ) . nonzero ( ) [ 0 ] activeCells = np . array ( [ ] , dtype = np . uint32 ) learnableCells = np . array ( [ ] , dtype = np . uint32 ) sensoryAssociatedCells = np . array ( [ ] , dtype = np . uint32 ) shouldMove = displacement . any ( ) shouldSense = anchorInput . any ( ) or anchorGrowthCandidates . any ( ) if shouldMove and len ( displacement ) != self . dimensions : raise TypeError ( "displacement must have {} dimensions" . format ( self . dimensions ) ) if self . dualPhase : if self . _sensing : shouldMove = False else : shouldSense = False self . _sensing = not self . _sensing for i in xrange ( self . moduleCount ) : module = self . _modules [ i ] if shouldMove : movement = displacement if self . dimensions > 2 : movement = np . matmul ( self . _projection [ i ] , movement ) module . movementCompute ( movement ) if shouldSense : module . sensoryCompute ( anchorInput , anchorGrowthCandidates , self . learningMode ) start = i * self . cellCount activeCells = np . append ( activeCells , module . getActiveCells ( ) + start ) learnableCells = np . append ( learnableCells , module . getLearnableCells ( ) + start ) sensoryAssociatedCells = np . append ( sensoryAssociatedCells , module . getSensoryAssociatedCells ( ) + start ) outputs [ "activeCells" ] [ : ] = 0 outputs [ "activeCells" ] [ activeCells ] = 1 outputs [ "learnableCells" ] [ : ] = 0 outputs [ "learnableCells" ] [ learnableCells ] = 1 outputs [ "sensoryAssociatedCells" ] [ : ] = 0 outputs [ "sensoryAssociatedCells" ] [ sensoryAssociatedCells ] = 1
Compute the location based on the displacement and anchorInput by first applying the movement if displacement is present in the input array and then applying the sensation if anchorInput is present in the input array . The anchorGrowthCandidates input array is used during learning
10,066
def setParameter ( self , parameterName , index , parameterValue ) : spec = self . getSpec ( ) if parameterName not in spec [ 'parameters' ] : raise Exception ( "Unknown parameter: " + parameterName ) setattr ( self , parameterName , parameterValue )
Set the value of a Spec parameter .
10,067
def getOutputElementCount ( self , name ) : if name in [ "activeCells" , "learnableCells" , "sensoryAssociatedCells" ] : return self . cellCount * self . moduleCount else : raise Exception ( "Invalid output name specified: " + name )
Returns the size of the output array
10,068
def reset ( self ) : self . L4 . reset ( ) for module in self . L6aModules : module . reset ( )
Clear all cell activity .
10,069
def getLocationRepresentation ( self ) : activeCells = np . array ( [ ] , dtype = "uint32" ) totalPrevCells = 0 for module in self . L6aModules : activeCells = np . append ( activeCells , module . getActiveCells ( ) + totalPrevCells ) totalPrevCells += module . numberOfCells ( ) return activeCells
Get the full population representation of the location layer .
10,070
def getLearnableLocationRepresentation ( self ) : learnableCells = np . array ( [ ] , dtype = "uint32" ) totalPrevCells = 0 for module in self . L6aModules : learnableCells = np . append ( learnableCells , module . getLearnableCells ( ) + totalPrevCells ) totalPrevCells += module . numberOfCells ( ) return learnableCells
Get the cells in the location layer that should be associated with the sensory input layer representation . In some models this is identical to the active cells . In others it s a subset .
10,071
def learnObject ( self , objectDescription , randomLocation = False , useNoise = False , noisyTrainingTime = 1 ) : self . reset ( ) self . column . activateRandomLocation ( ) locationsAreUnique = True if randomLocation or useNoise : numIters = noisyTrainingTime else : numIters = 1 for i in xrange ( numIters ) : for iFeature , feature in enumerate ( objectDescription [ "features" ] ) : self . _move ( feature , randomLocation = randomLocation , useNoise = useNoise ) featureSDR = self . features [ feature [ "name" ] ] self . _sense ( featureSDR , learn = True , waitForSettle = False ) locationRepresentation = self . column . getSensoryAssociatedLocationRepresentation ( ) self . locationRepresentations [ ( objectDescription [ "name" ] , iFeature ) ] . append ( locationRepresentation ) self . inputRepresentations [ ( objectDescription [ "name" ] , iFeature , feature [ "name" ] ) ] = ( self . column . L4 . getWinnerCells ( ) ) locationTuple = tuple ( locationRepresentation ) locationsAreUnique = ( locationsAreUnique and locationTuple not in self . representationSet ) self . representationSet . add ( tuple ( locationRepresentation ) ) self . learnedObjects . append ( objectDescription ) return locationsAreUnique
Train the network to recognize the specified object . Move the sensor to one of its features and activate a random location representation in the location layer . Move the sensor over the object updating the location representation through path integration . At each point on the object form reciprocal connections between the represention of the location and the representation of the sensory input .
10,072
def _move ( self , feature , randomLocation = False , useNoise = True ) : if randomLocation : locationOnObject = { "top" : feature [ "top" ] + np . random . rand ( ) * feature [ "height" ] , "left" : feature [ "left" ] + np . random . rand ( ) * feature [ "width" ] , } else : locationOnObject = { "top" : feature [ "top" ] + feature [ "height" ] / 2. , "left" : feature [ "left" ] + feature [ "width" ] / 2. } if self . locationOnObject is not None : displacement = { "top" : locationOnObject [ "top" ] - self . locationOnObject [ "top" ] , "left" : locationOnObject [ "left" ] - self . locationOnObject [ "left" ] } if useNoise : params = self . column . movementCompute ( displacement , self . noiseFactor , self . moduleNoiseFactor ) else : params = self . column . movementCompute ( displacement , 0 , 0 ) for monitor in self . monitors . values ( ) : monitor . afterLocationShift ( ** params ) else : for monitor in self . monitors . values ( ) : monitor . afterLocationInitialize ( ) self . locationOnObject = locationOnObject for monitor in self . monitors . values ( ) : monitor . afterLocationChanged ( locationOnObject )
Move the sensor to the center of the specified feature . If the sensor is currently at another location send the displacement into the cortical column so that it can perform path integration .
10,073
def _sense ( self , featureSDR , learn , waitForSettle ) : for monitor in self . monitors . values ( ) : monitor . beforeSense ( featureSDR ) iteration = 0 prevCellActivity = None while True : ( inputParams , locationParams ) = self . column . sensoryCompute ( featureSDR , learn ) if waitForSettle : cellActivity = ( set ( self . column . getSensoryRepresentation ( ) ) , set ( self . column . getLocationRepresentation ( ) ) ) if cellActivity == prevCellActivity : break prevCellActivity = cellActivity for monitor in self . monitors . values ( ) : if iteration > 0 : monitor . beforeSensoryRepetition ( ) monitor . afterInputCompute ( ** inputParams ) monitor . afterLocationAnchor ( ** locationParams ) iteration += 1 if not waitForSettle or iteration >= self . maxSettlingTime : break
Send the sensory input into the network . Optionally send it multiple times until the network settles .
10,074
def createObjectMachine ( machineType , ** kwargs ) : if machineType not in ObjectMachineTypes . getTypes ( ) : raise RuntimeError ( "Unknown model type: " + machineType ) return getattr ( ObjectMachineTypes , machineType ) ( ** kwargs )
Return an object machine of the appropriate type .
10,075
def getEntropies ( m ) : entropy = 0.0 max_entropy = 0.0 for module in m . children ( ) : e , m = getEntropies ( module ) entropy += e max_entropy += m e , m = getEntropy ( m ) entropy += e max_entropy += m return entropy , max_entropy
Recursively get the current and max entropies from every child module
10,076
def updateBoostStrength ( m ) : if isinstance ( m , KWinnersBase ) : if m . training : m . boostStrength = m . boostStrength * m . boostStrengthFactor
Function used to update KWinner modules boost strength after each epoch .
10,077
def updateBoostStrength ( self ) : if self . training : self . boostStrength = self . boostStrength * self . boostStrengthFactor
Update boost strength using given strength factor during training
10,078
def entropy ( self ) : if self . k < self . n : _ , entropy = binaryEntropy ( self . dutyCycle ) return entropy else : return 0
Returns the current total entropy of this layer
10,079
def greedySensorPositions ( numSensors , numLocations ) : locationViewCounts = [ 0 ] * numLocations locationViewCountsBySensor = [ [ 0 ] * numLocations for _ in xrange ( numSensors ) ] placement = random . sample ( xrange ( numLocations ) , numSensors ) while True : yield tuple ( placement ) for sensor , location in enumerate ( placement ) : locationViewCounts [ location ] += 1 locationViewCountsBySensor [ sensor ] [ location ] += 1 nextLocationsRanked = sorted ( xrange ( numLocations ) , key = lambda x : ( locationViewCounts [ x ] , random . random ( ) ) ) nextLocations = nextLocationsRanked [ : numSensors ] sensors = range ( numSensors ) random . shuffle ( sensors ) for sensor in sensors : viewCount = min ( locationViewCountsBySensor [ sensor ] [ location ] for location in nextLocations ) location = random . choice ( [ x for x in nextLocations if locationViewCountsBySensor [ sensor ] [ x ] == viewCount ] ) nextLocations . remove ( location ) placement [ sensor ] = location
Returns an infinite sequence of sensor placements .
10,080
def initialize ( self ) : if self . _tm is None : params = { "columnCount" : self . columnCount , "basalInputSize" : self . basalInputWidth , "apicalInputSize" : self . apicalInputWidth , "cellsPerColumn" : self . cellsPerColumn , "activationThreshold" : self . activationThreshold , "initialPermanence" : self . initialPermanence , "connectedPermanence" : self . connectedPermanence , "minThreshold" : self . minThreshold , "sampleSize" : self . sampleSize , "permanenceIncrement" : self . permanenceIncrement , "permanenceDecrement" : self . permanenceDecrement , "basalPredictedSegmentDecrement" : self . basalPredictedSegmentDecrement , "apicalPredictedSegmentDecrement" : self . apicalPredictedSegmentDecrement , "maxSynapsesPerSegment" : self . maxSynapsesPerSegment , "seed" : self . seed , } if self . implementation == "ApicalTiebreakCPP" : params [ "learnOnOneCell" ] = self . learnOnOneCell params [ "maxSegmentsPerCell" ] = self . maxSegmentsPerCell import htmresearch_core . experimental cls = htmresearch_core . experimental . ApicalTiebreakPairMemory elif self . implementation == "ApicalTiebreak" : params [ "reducedBasalThreshold" ] = self . reducedBasalThreshold import htmresearch . algorithms . apical_tiebreak_temporal_memory cls = htmresearch . algorithms . apical_tiebreak_temporal_memory . ApicalTiebreakPairMemory elif self . implementation == "ApicalDependent" : params [ "reducedBasalThreshold" ] = self . reducedBasalThreshold import htmresearch . algorithms . apical_dependent_temporal_memory cls = htmresearch . algorithms . apical_dependent_temporal_memory . TripleMemory else : raise ValueError ( "Unrecognized implementation %s" % self . implementation ) self . _tm = cls ( ** params )
Initialize the self . _tm if not already initialized .
10,081
def getOutputElementCount ( self , name ) : if name in [ "activeCells" , "predictedCells" , "predictedActiveCells" , "winnerCells" ] : return self . cellsPerColumn * self . columnCount else : raise Exception ( "Invalid output name specified: %s" % name )
Return the number of elements for the given output .
10,082
def learnSequences ( self , sequences ) : sequence_order = range ( len ( sequences ) ) if self . config [ "L2Params" ] [ "onlineLearning" ] : self . _setLearningMode ( l4Learning = True , l2Learning = True ) for _ in xrange ( self . numLearningPoints ) : random . shuffle ( sequence_order ) for i in sequence_order : sequence = sequences [ i ] for s in sequence : self . sensorInputs [ 0 ] . addDataToQueue ( list ( s ) , 0 , 0 ) self . network . run ( 1 ) self . sendReset ( ) else : self . _setLearningMode ( l4Learning = True , l2Learning = False ) for _ in xrange ( self . numLearningPoints ) : random . shuffle ( sequence_order ) for i in sequence_order : sequence = sequences [ i ] for s in sequence : self . sensorInputs [ 0 ] . addDataToQueue ( list ( s ) , 0 , 0 ) self . network . run ( 1 ) self . sendReset ( ) self . _setLearningMode ( l4Learning = False , l2Learning = True ) for i in sequence_order : sequence = sequences [ i ] for s in sequence : self . sensorInputs [ 0 ] . addDataToQueue ( list ( s ) , 0 , 0 ) self . network . run ( 1 ) self . sendReset ( ) self . _setLearningMode ( l4Learning = True , l2Learning = False ) for _ in xrange ( 5 ) : for i in sequence_order : sequence = sequences [ i ] for s in sequence : self . sensorInputs [ 0 ] . addDataToQueue ( list ( s ) , 0 , 0 ) self . network . run ( 1 ) self . sendReset ( ) self . _setLearningMode ( l4Learning = False , l2Learning = False ) self . sendReset ( ) for sequenceNum , sequence in enumerate ( sequences ) : for s in sequence : self . sensorInputs [ 0 ] . addDataToQueue ( list ( s ) , 0 , 0 ) self . network . run ( 1 ) self . objectL2Representations [ sequenceNum ] = self . getL2Representations ( ) self . sendReset ( ) return
Learns all provided sequences . Always reset the network in between sequences .
10,083
def getL4PredictedActiveCells ( self ) : predictedActive = [ ] for i in xrange ( self . numColumns ) : region = self . network . regions [ "L4Column_" + str ( i ) ] predictedActive . append ( region . getOutputData ( "predictedActiveCells" ) . nonzero ( ) [ 0 ] ) return predictedActive
Returns the predicted active cells in each column in L4 .
10,084
def _setLearningMode ( self , l4Learning = False , l2Learning = False ) : for column in self . L4Columns : column . setParameter ( "learn" , 0 , l4Learning ) for column in self . L2Columns : column . setParameter ( "learningMode" , 0 , l2Learning )
Sets the learning mode for L4 and L2 .
10,085
def printDiagnosticsAfterTraining ( exp , verbosity = 0 ) : print "Number of connected synapses per cell" l2 = exp . getAlgorithmInstance ( "L2" ) numConnectedCells = 0 connectedSynapses = 0 for c in range ( 4096 ) : cp = l2 . numberOfConnectedProximalSynapses ( [ c ] ) if cp > 0 : numConnectedCells += 1 connectedSynapses += cp print "Num L2 cells with connected synapses:" , numConnectedCells if numConnectedCells > 0 : print "Avg connected synapses per connected cell:" , float ( connectedSynapses ) / numConnectedCells print
Useful diagnostics a trained system for debugging .
10,086
def trainSequences ( sequences , exp , idOffset = 0 ) : for seqId in sequences : iterations = 3 * len ( sequences [ seqId ] ) for p in range ( iterations ) : s = sequences . provideObjectsToLearn ( [ seqId ] ) objectSDRs = dict ( ) objectSDRs [ seqId + idOffset ] = s [ seqId ] exp . learnObjects ( objectSDRs , reset = False ) exp . TMColumns [ 0 ] . reset ( ) exp . sendReset ( )
Train the network on all the sequences
10,087
def trainObjects ( objects , exp , numRepeatsPerObject , experimentIdOffset = 0 ) : objectsToLearn = objects . provideObjectsToLearn ( ) objectTraversals = { } for objectId in objectsToLearn : objectTraversals [ objectId + experimentIdOffset ] = objects . randomTraversal ( objectsToLearn [ objectId ] , numRepeatsPerObject ) exp . learnObjects ( objectTraversals )
Train the network on all the objects by randomly traversing points on each object . We offset the id of each object to avoid confusion with any sequences that might have been learned .
10,088
def inferObject ( exp , objectId , objects , objectName ) : objectSensations = { } objectSensations [ 0 ] = [ ] obj = objects [ objectId ] objectCopy = [ pair for pair in obj ] random . shuffle ( objectCopy ) for pair in objectCopy : objectSensations [ 0 ] . append ( pair ) inferConfig = { "numSteps" : len ( objectSensations [ 0 ] ) , "pairs" : objectSensations , "includeRandomLocation" : False , } inferenceSDRs = objects . provideObjectToInfer ( inferConfig ) exp . infer ( inferenceSDRs , objectName = objectName )
Run inference on the given object . objectName is the name of this object in the experiment .
10,089
def createSuperimposedSensorySDRs ( sequenceSensations , objectSensations ) : assert len ( sequenceSensations ) == len ( objectSensations ) superimposedSensations = [ ] for i , objectSensation in enumerate ( objectSensations ) : newSensation = { 0 : ( objectSensation [ 0 ] [ 0 ] , sequenceSensations [ i ] [ 0 ] [ 1 ] . union ( objectSensation [ 0 ] [ 1 ] ) ) } superimposedSensations . append ( newSensation ) return superimposedSensations
Given two lists of sensations create a new list where the sensory SDRs are union of the individual sensory SDRs . Keep the location SDRs from the object .
10,090
def runExperimentPool ( numSequences , numFeatures , numLocations , numObjects , numWorkers = 7 , nTrials = 1 , seqLength = 10 , figure = "" , numRepetitions = 1 , synPermProximalDecL2 = [ 0.001 ] , minThresholdProximalL2 = [ 10 ] , sampleSizeProximalL2 = [ 15 ] , inputSize = [ 1024 ] , basalPredictedSegmentDecrement = [ 0.0006 ] , resultsName = "convergence_results.pkl" ) : args = [ ] for bd in basalPredictedSegmentDecrement : for i in inputSize : for thresh in minThresholdProximalL2 : for dec in synPermProximalDecL2 : for s in sampleSizeProximalL2 : for o in reversed ( numSequences ) : for l in numLocations : for f in numFeatures : for no in numObjects : for t in range ( nTrials ) : args . append ( { "numSequences" : o , "numFeatures" : f , "numObjects" : no , "trialNum" : t , "seqLength" : seqLength , "numLocations" : l , "sampleSizeProximalL2" : s , "synPermProximalDecL2" : dec , "minThresholdProximalL2" : thresh , "numRepetitions" : numRepetitions , "figure" : figure , "inputSize" : i , "basalPredictedSegmentDecrement" : bd , } ) print "{} experiments to run, {} workers" . format ( len ( args ) , numWorkers ) if numWorkers > 1 : pool = Pool ( processes = numWorkers ) result = pool . map ( runExperiment , args ) else : result = [ ] for arg in args : result . append ( runExperiment ( arg ) ) with open ( resultsName , "wb" ) as f : cPickle . dump ( result , f ) return result
Run a bunch of experiments using a pool of numWorkers multiple processes . For numSequences numFeatures and numLocations pass in a list containing valid values for that parameter . The cross product of everything is run and each combination is run nTrials times .
10,091
def runExperiment5A ( dirName ) : resultsFilename = os . path . join ( dirName , "sensorimotor_sequence_example.pkl" ) results = runExperiment ( { "numSequences" : 0 , "seqLength" : 10 , "numFeatures" : 100 , "trialNum" : 4 , "numObjects" : 50 , "numLocations" : 100 , } ) with open ( resultsFilename , "wb" ) as f : cPickle . dump ( results , f )
This runs the first experiment in the section Simulations with Sensorimotor Sequences an example sensorimotor sequence .
10,092
def runExperiment5B ( dirName ) : resultsName = os . path . join ( dirName , "sensorimotor_batch_results_more_objects.pkl" ) numTrials = 10 featureRange = [ 10 , 50 , 100 , 150 , 500 ] objectRange = [ 110 , 130 , 200 , 300 ] locationRange = [ 100 ] runExperimentPool ( numSequences = [ 0 ] , numObjects = objectRange , numFeatures = featureRange , numLocations = locationRange , nTrials = numTrials , numWorkers = cpu_count ( ) - 1 , numRepetitions = 10 , resultsName = resultsName )
This runs the second experiment in the section Simulations with Sensorimotor Sequences . It averages over many parameter combinations . This experiment could take several hours . You can run faster versions by reducing the number of trials .
10,093
def runExperiment6 ( dirName ) : resultsFilename = os . path . join ( dirName , "combined_results.pkl" ) results = runExperiment ( { "numSequences" : 50 , "seqLength" : 10 , "numObjects" : 50 , "numFeatures" : 500 , "trialNum" : 8 , "numLocations" : 100 , "settlingTime" : 1 , "figure" : "6" , "numRepetitions" : 30 , "basalPredictedSegmentDecrement" : 0.001 , "stripStats" : False , } ) with open ( resultsFilename , "wb" ) as f : cPickle . dump ( results , f )
This runs the experiment the section Simulations with Combined Sequences an example stream containing a mixture of temporal and sensorimotor sequences .
10,094
def runExperimentS ( dirName ) : resultsFilename = os . path . join ( dirName , "superimposed_training.pkl" ) results = runExperiment ( { "numSequences" : 50 , "numObjects" : 50 , "seqLength" : 10 , "numFeatures" : 100 , "trialNum" : 8 , "numLocations" : 100 , "numRepetitions" : 30 , "sampleSizeProximalL2" : 15 , "minThresholdProximalL2" : 10 , "figure" : "S" , "stripStats" : False , } ) with open ( resultsFilename , "wb" ) as f : cPickle . dump ( results , f ) with open ( resultsFilename , "rb" ) as f : r = cPickle . load ( f ) r . pop ( "objects" ) r . pop ( "sequences" ) stat = r . pop ( "statistics" ) pprint . pprint ( r ) sObject = 0 sSequence = 0 for i in range ( 0 , 50 ) : sObject += sum ( stat [ i ] [ 'L4 PredictedActive C0' ] ) for i in range ( 50 , 100 ) : sSequence += sum ( stat [ i ] [ 'L4 PredictedActive C0' ] ) print sObject , sSequence
This runs an experiment where the network is trained on stream containing a mixture of temporal and sensorimotor sequences .
10,095
def runExperimentSP ( dirName ) : resultsFilename = os . path . join ( dirName , "superimposed_128mcs.pkl" ) numTrials = 10 featureRange = [ 1000 ] objectRange = [ 50 ] runExperimentPool ( numSequences = objectRange , numObjects = objectRange , numFeatures = featureRange , numLocations = [ 100 ] , nTrials = numTrials , numWorkers = cpu_count ( ) - 1 , resultsName = resultsFilename , figure = "S" , numRepetitions = 30 , sampleSizeProximalL2 = [ 15 ] , minThresholdProximalL2 = [ 10 ] , synPermProximalDecL2 = [ 0.001 ] , basalPredictedSegmentDecrement = [ 0.0 , 0.001 , 0.002 , 0.003 , 0.004 , 0.005 , 0.01 , 0.02 , 0.04 , 0.08 , 0.12 ] , inputSize = [ 128 ] , ) print "Done with experiments"
This runs a pool of experiments where the network is trained on stream containing a mixture of temporal and sensorimotor sequences .
10,096
def reset ( self , ) : self . activeState [ 't-1' ] . fill ( 0 ) self . activeState [ 't' ] . fill ( 0 ) self . predictedState [ 't-1' ] . fill ( 0 ) self . predictedState [ 't' ] . fill ( 0 ) self . learnState [ 't-1' ] . fill ( 0 ) self . learnState [ 't' ] . fill ( 0 ) self . confidence [ 't-1' ] . fill ( 0 ) self . confidence [ 't' ] . fill ( 0 ) self . segmentUpdates = { } self . _internalStats [ 'nInfersSinceReset' ] = 0 self . _internalStats [ 'curPredictionScore' ] = 0 self . _internalStats [ 'curPredictionScore2' ] = 0 self . _internalStats [ 'curFalseNegativeScore' ] = 0 self . _internalStats [ 'curFalsePositiveScore' ] = 0 self . _internalStats [ 'curMissing' ] = 0 self . _internalStats [ 'curExtra' ] = 0 self . _internalStats [ 'prevSequenceSignature' ] = None if self . collectSequenceStats : if self . _internalStats [ 'confHistogram' ] . sum ( ) > 0 : sig = self . _internalStats [ 'confHistogram' ] . copy ( ) sig . reshape ( self . numberOfCols * self . cellsPerColumn ) self . _internalStats [ 'prevSequenceSignature' ] = sig self . _internalStats [ 'confHistogram' ] . fill ( 0 ) self . resetCalled = True
Reset the state of all cells . This is normally used between sequences while training . All internal states are reset to 0 .
10,097
def printComputeEnd ( self , output , learn = False ) : if self . verbosity >= 3 : print "----- computeEnd summary: " print "numBurstingCols: %s, " % ( self . activeState [ 't' ] . min ( axis = 1 ) . sum ( ) ) , print "curPredScore2: %s, " % ( self . _internalStats [ 'curPredictionScore2' ] ) , print "curFalsePosScore: %s, " % ( self . _internalStats [ 'curFalsePositiveScore' ] ) , print "1-curFalseNegScore: %s, " % ( 1 - self . _internalStats [ 'curFalseNegativeScore' ] ) , print "numPredictedCells[t-1]: %s" % ( self . predictedState [ 't-1' ] . sum ( ) ) , print "numSegments: " , self . getNumSegments ( ) print "----- activeState (%d on) ------" % ( self . activeState [ 't' ] . sum ( ) ) self . printActiveIndices ( self . activeState [ 't' ] ) if self . verbosity >= 5 : self . printState ( self . activeState [ 't' ] ) print "----- predictedState (%d on)-----" % ( self . predictedState [ 't' ] . sum ( ) ) self . printActiveIndices ( self . predictedState [ 't' ] ) if self . verbosity >= 5 : self . printState ( self . predictedState [ 't' ] ) print "----- cell confidence -----" self . printActiveIndices ( self . confidence [ 't' ] , andValues = True ) if self . verbosity >= 5 : self . printConfidence ( self . confidence [ 't' ] ) print "----- confidence[t-1] for currently active cells -----" cc = self . confidence [ 't-1' ] * self . activeState [ 't' ] self . printActiveIndices ( cc , andValues = True ) if self . verbosity == 4 : print "Cells, predicted segments only:" self . printCells ( predictedOnly = True ) elif self . verbosity >= 5 : print "Cells, all segments:" self . printCells ( predictedOnly = False ) print
Called at the end of inference to print out various diagnostic information based on the current verbosity level .
10,098
def computePhase2 ( self , doLearn = False ) : for c in xrange ( self . numberOfCols ) : buPredicted = False for i in xrange ( self . cellsPerColumn ) : maxConfidence = 0 for s in self . cells [ c ] [ i ] : if self . isSegmentActive ( s , self . activeState [ 't' ] ) : self . predictedState [ 't' ] [ c , i ] = 1 buPredicted = True maxConfidence = max ( maxConfidence , s . dutyCycle ( readOnly = True ) ) if doLearn : s . totalActivations += 1 s . lastActiveIteration = self . iterationIdx activeUpdate = self . getSegmentActiveSynapses ( c , i , s , 't' ) activeUpdate . phase1Flag = False self . addToSegmentUpdates ( c , i , activeUpdate ) self . confidence [ 't' ] [ c , i ] = maxConfidence
This is the phase 2 of learning inference and multistep prediction . During this phase all the cell with lateral support have their predictedState turned on and the firing segments are queued up for updates .
10,099
def columnConfidences ( self , cellConfidences = None ) : if cellConfidences is None : cellConfidences = self . confidence [ 't' ] colConfidences = cellConfidences . sum ( axis = 1 ) return colConfidences
Compute the column confidences given the cell confidences . If None is passed in for cellConfidences it uses the stored cell confidences from the last compute .