idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
9,900 | def saveData ( self , dataOutputFile , categoriesOutputFile ) : if self . records is None : return False if not dataOutputFile . endswith ( "csv" ) : raise TypeError ( "data output file must be csv." ) if not categoriesOutputFile . endswith ( "json" ) : raise TypeError ( "category output file must be json" ) dataOutput... | Save the processed data and the associated category mapping . |
9,901 | def generateSequence ( self , text , preprocess = False ) : tokens = TextPreprocess ( ) . tokenize ( text ) cat = [ - 1 ] self . sequenceCount += 1 uniqueID = "q" data = self . _formatSequence ( tokens , cat , self . sequenceCount - 1 , uniqueID ) return data | Return a list of lists representing the text sequence in network data format . Does not preprocess the text . |
9,902 | def getSamples ( netDataFile ) : try : with open ( netDataFile ) as f : reader = csv . reader ( f ) header = next ( reader , None ) next ( reader , None ) resetIdx = next ( reader ) . index ( "R" ) tokenIdx = header . index ( "_token" ) catIdx = header . index ( "_category" ) idIdx = header . index ( "ID" ) currentSamp... | Returns samples joined at reset points . |
9,903 | def getClassifications ( networkDataFile ) : try : with open ( networkDataFile ) as f : reader = csv . reader ( f ) next ( reader , None ) next ( reader , None ) specials = next ( reader ) resetIdx = specials . index ( "R" ) classIdx = specials . index ( "C" ) classifications = [ ] for line in reader : if int ( line [ ... | Returns the classifications at the indices where the data sequences reset . |
9,904 | def getNumberOfTokens ( networkDataFile ) : try : with open ( networkDataFile ) as f : reader = csv . reader ( f ) next ( reader , None ) next ( reader , None ) resetIdx = next ( reader ) . index ( "R" ) count = 0 numTokens = [ ] for line in reader : if int ( line [ resetIdx ] ) == 1 : if count != 0 : numTokens . appen... | Returns the number of tokens for each sequence |
9,905 | def getResetsIndices ( networkDataFile ) : try : with open ( networkDataFile ) as f : reader = csv . reader ( f ) next ( reader , None ) next ( reader , None ) resetIdx = next ( reader ) . index ( "R" ) resets = [ ] for i , line in enumerate ( reader ) : if int ( line [ resetIdx ] ) == 1 : resets . append ( i ) return ... | Returns the indices at which the data sequences reset . |
9,906 | def lastNoiseCurve ( expPath , suite , iteration = "last" ) : noiseValues = [ "0.0" , "0.05" , "0.1" , "0.15" , "0.2" , "0.25" , "0.3" , "0.35" , "0.4" , "0.45" , "0.5" ] print ( "\nNOISE CURVE =====" , expPath , "====== ITERATION:" , iteration , "=========" ) try : result = suite . get_value ( expPath , 0 , noiseValue... | Print the noise errors from the last iteration of this experiment |
9,907 | def learningCurve ( expPath , suite ) : print ( "\nLEARNING CURVE ================" , expPath , "=====================" ) try : headers = [ "testResults" , "validation" , "bgResults" , "elapsedTime" ] result = suite . get_value ( expPath , 0 , headers , "all" ) info = [ ] maxValidationAccuracy = - 1.0 maxTestAccuracy =... | Print the test validation and other scores from each iteration of this experiment . We select the test score that corresponds to the iteration with maximum validation accuracy . |
9,908 | def bestScore ( expPath , suite ) : maxValidationAccuracy = - 1.0 maxTestAccuracy = - 1.0 maxTotalAccuracy = - 1.0 maxBGAccuracy = - 1.0 maxIter = - 1 try : headers = [ "testResults" , "validation" , "bgResults" , "elapsedTime" , "totalCorrect" ] result = suite . get_value ( expPath , 0 , headers , "all" ) for i , v in... | Given a single experiment return the test validation and other scores from the iteration with maximum validation accuracy . |
9,909 | def findOptimalResults ( expName , suite , outFile ) : writer = csv . writer ( outFile ) headers = [ "testAccuracy" , "bgAccuracy" , "maxTotalAccuracy" , "experiment path" ] writer . writerow ( headers ) info = [ ] print ( "\n================" , expName , "=====================" ) try : values , params = suite . get_va... | Go through every experiment in the specified folder . For each experiment find the iteration with the best validation score and return the metrics associated with that iteration . |
9,910 | def getErrorBars ( expPath , suite ) : exps = suite . get_exps ( expPath ) testScores = np . zeros ( len ( exps ) ) noiseScores = np . zeros ( len ( exps ) ) for i , e in enumerate ( exps ) : maxTestAccuracy , maxValidationAccuracy , maxBGAccuracy , maxIter , maxTotalAccuracy = bestScore ( e , suite ) testScores [ i ] ... | Go through each experiment in the path . Get the best scores for each experiment based on accuracy on validation set . Print out overall mean and stdev for test accuracy BG accuracy and noise accuracy . |
9,911 | def setCompare ( a , b , aKey = None , bKey = None , leftMinusRight = False , rightMinusLeft = False ) : aKey = aKey if aKey is not None else a bKey = bKey if bKey is not None else b aWithinBMask = np . in1d ( aKey , bKey ) if rightMinusLeft : bWithinAMask = np . in1d ( bKey , aKey ) if leftMinusRight : return ( a [ aW... | Compute the intersection and differences between two arrays comparing elements by their key . |
9,912 | def argmaxMulti ( a , groupKeys , assumeSorted = False ) : if not assumeSorted : sorter = np . argsort ( groupKeys , kind = "mergesort" ) a = a [ sorter ] groupKeys = groupKeys [ sorter ] _ , indices , lengths = np . unique ( groupKeys , return_index = True , return_counts = True ) maxValues = np . maximum . reduceat (... | This is like numpy s argmax but it returns multiple maximums . |
9,913 | def getAllCellsInColumns ( columns , cellsPerColumn ) : return ( ( columns * cellsPerColumn ) . reshape ( ( - 1 , 1 ) ) + np . arange ( cellsPerColumn , dtype = "uint32" ) ) . flatten ( ) | Calculate all cell indices in the specified columns . |
9,914 | def letterSequence ( letters , w = 40 ) : sequence = [ ] for letter in letters : i = ord ( letter ) - ord ( 'A' ) sequence . append ( set ( range ( i * w , ( i + 1 ) * w ) ) ) return sequence | Return a list of input vectors corresponding to sequence of letters . The vector for each letter has w contiguous bits ON and represented as a sequence of non - zero indices . |
9,915 | def getHighOrderSequenceChunk ( it , switchover = 1000 , w = 40 , n = 2048 ) : if it % 10 == 3 : s = numpy . random . randint ( 5 ) if it <= switchover : if s == 0 : label = "XABCDE" elif s == 1 : label = "YCBEAF" elif s == 2 : label = "GHIJKL" elif s == 3 : label = "WABCMN" else : label = "ZDBCAE" else : if s == 0 : l... | Given an iteration index returns a list of vectors to be appended to the input stream as well as a string label identifying the sequence . This version generates a bunch of high order sequences . The first element always provides sufficient context to predict the rest of the elements . |
9,916 | def addNoise ( vecs , percent = 0.1 , n = 2048 ) : noisyVecs = [ ] for vec in vecs : nv = vec . copy ( ) for idx in vec : if numpy . random . random ( ) <= percent : nv . discard ( idx ) nv . add ( numpy . random . randint ( n ) ) noisyVecs . append ( nv ) return noisyVecs | Add noise to the given sequence of vectors and return the modified sequence . A percentage of the on bits are shuffled to other locations . |
9,917 | def killCells ( i , options , tm ) : if options . simulation == "killer" : if i == options . switchover : print "i=" , i , "Killing cells for the first time!" tm . killCells ( percent = options . noise ) if i == options . secondKill : print "i=" , i , "Killing cells again up to" , options . secondNoise tm . killCells (... | Kill cells as appropriate |
9,918 | def printTemporalMemory ( tm , outFile ) : table = PrettyTable ( [ "Parameter name" , "Value" , ] ) table . add_row ( [ "columnDimensions" , tm . getColumnDimensions ( ) ] ) table . add_row ( [ "cellsPerColumn" , tm . getCellsPerColumn ( ) ] ) table . add_row ( [ "activationThreshold" , tm . getActivationThreshold ( ) ... | Given an instance of TemporalMemory print out the relevant parameters |
9,919 | def printOptions ( options , tm , outFile ) : print >> outFile , "TM parameters:" printTemporalMemory ( tm , outFile ) print >> outFile , "Experiment parameters:" for k , v in options . __dict__ . iteritems ( ) : print >> outFile , " %s : %s" % ( k , str ( v ) ) outFile . flush ( ) | Pretty print the set of options |
9,920 | def runBasic ( noiseLevel = None , profile = False ) : exp = L4L2Experiment ( "basic_continuous" , numCorticalColumns = 2 ) objects = createObjectMachine ( machineType = "continuous" , numInputBits = 21 , sensorInputSize = 1024 , externalInputSize = 1024 , numCorticalColumns = 2 , ) objects . addObject ( Sphere ( radiu... | Runs a basic experiment on continuous locations learning a few locations on four basic objects and inferring one of them . |
9,921 | def plotBoostTrace ( sp , inputVectors , columnIndex ) : numInputVector , inputSize = inputVectors . shape columnNumber = np . prod ( sp . getColumnDimensions ( ) ) boostFactorsTrace = np . zeros ( ( columnNumber , numInputVector ) ) activeDutyCycleTrace = np . zeros ( ( columnNumber , numInputVector ) ) minActiveDutyC... | Plot boostfactor for a selected column |
9,922 | def next_epoch ( self ) : epoch = next ( self . _all_epochs ) folder = os . path . join ( self . _root , str ( epoch ) , self . _subset ) self . data = [ ] silence = None gc . disable ( ) for filename in os . listdir ( folder ) : command = os . path . splitext ( os . path . basename ( filename ) ) [ 0 ] with open ( os ... | Load next epoch from disk |
9,923 | def isValid ( folder , epoch = 0 ) : return os . path . exists ( os . path . join ( folder , str ( epoch ) , "train" , "silence.pkl" ) ) | Check if the given folder is a valid preprocessed dataset |
9,924 | def burstColumn ( self , column , columnMatchingSegments , prevActiveCells , prevWinnerCells , learn ) : start = self . cellsPerColumn * column cellsForColumn = [ cellIdx for cellIdx in xrange ( start , start + self . cellsPerColumn ) if cellIdx not in self . deadCells ] return self . _burstColumn ( self . connections ... | Activates all of the cells in an unpredicted active column chooses a winner cell and if learning is turned on learns on one segment growing a new segment if necessary . |
9,925 | def printDeadCells ( self ) : columnCasualties = numpy . zeros ( self . numberOfColumns ( ) ) for cell in self . deadCells : col = self . columnForCell ( cell ) columnCasualties [ col ] += 1 for col in range ( self . numberOfColumns ( ) ) : print col , columnCasualties [ col ] | Print statistics for the dead cells |
9,926 | def reset ( self ) : self . _unionSDR = numpy . zeros ( shape = ( self . _numInputs , ) ) self . _activeCellsHistory = [ ] | Reset Union Pooler clear active cell history |
9,927 | def getSparsity ( self ) : sparsity = numpy . sum ( self . _unionSDR ) / self . _numInputs return sparsity | Return the sparsity of the current union SDR |
9,928 | def plotDataframe ( table , title , plotPath ) : plt . figure ( ) axes = table . T . plot ( subplots = True , sharex = True , grid = True , legend = True , title = title , figsize = ( 8 , 11 ) ) accuracy = next ( ax for ax in axes if ax . lines [ 0 ] . get_label ( ) == 'accuracy' ) accuracy . set_ylim ( 0.0 , 1.0 ) plt... | Plot Panda dataframe . |
9,929 | def getDatetimeAxis ( ) : dataSet = 'nyc_taxi' filePath = './data/' + dataSet + '.csv' data = pd . read_csv ( filePath , header = 0 , skiprows = [ 1 , 2 ] , names = [ 'datetime' , 'value' , 'timeofday' , 'dayofweek' ] ) xaxisDate = pd . to_datetime ( data [ 'datetime' ] ) return xaxisDate | use datetime as x - axis |
9,930 | def encodeDeltas ( self , dx , dy ) : dxe = self . dxEncoder . encode ( dx ) dye = self . dyEncoder . encode ( dy ) ex = numpy . outer ( dxe , dye ) return ex . flatten ( ) . nonzero ( ) [ 0 ] | Return the SDR for dx dy |
9,931 | def encodeThetas ( self , theta1 , theta2 ) : t1e = self . theta1Encoder . encode ( theta1 ) t2e = self . theta2Encoder . encode ( theta2 ) ex = numpy . outer ( t2e , t1e ) return ex . flatten ( ) . nonzero ( ) [ 0 ] | Return the SDR for theta1 and theta2 |
9,932 | def decodeThetas ( self , predictedCells ) : a = numpy . zeros ( self . bottomUpInputSize ) a [ predictedCells ] = 1 a = a . reshape ( ( self . theta1Encoder . getWidth ( ) , self . theta1Encoder . getWidth ( ) ) ) theta1PredictedBits = a . mean ( axis = 0 ) . nonzero ( ) [ 0 ] theta2PredictedBits = a . mean ( axis = 1... | Given the set of predicted cells return the predicted theta1 and theta2 |
9,933 | def inferTM ( self , bottomUp , externalInput ) : self . reset ( ) self . tm . compute ( bottomUp , basalInput = externalInput , learn = False ) return self . tm . getPredictiveCells ( ) | Run inference and return the set of predicted cells |
9,934 | def classify ( self , encoding , num = 1 ) : probDist = numpy . exp ( encoding ) / numpy . sum ( numpy . exp ( encoding ) ) sortIdx = numpy . argsort ( probDist ) return sortIdx [ - num : ] . tolist ( ) | Classify with basic one - hot local incoding |
9,935 | def _seed ( self , seed = - 1 ) : if seed != - 1 : self . _random = np . random . RandomState ( seed ) else : self . _random = np . random . RandomState ( ) | Initialize the random seed |
9,936 | def initialize_weights ( self ) : n = self . _outputSize m = self . _inputSize self . _Q = self . _random . sample ( ( n , m ) ) for i in range ( n ) : self . _Q [ i ] /= np . sqrt ( np . dot ( self . _Q [ i ] , self . _Q [ i ] ) ) | Randomly initializes the visible - to - hidden connections . |
9,937 | def _inhibitColumnsWithLateral ( self , overlaps , lateralConnections ) : n , m = self . shape y = np . zeros ( n ) s = self . sparsity L = lateralConnections desiredWeight = self . codeWeight inhSignal = np . zeros ( n ) sortedIndices = np . argsort ( overlaps , kind = 'mergesort' ) [ : : - 1 ] currentWeight = 0 for i... | Performs an experimentatl local inhibition . Local inhibition is iteratively performed on a column by column basis . |
9,938 | def compute ( self , inputVector , learn , activeArray , applyLateralInhibition = True ) : if not isinstance ( inputVector , np . ndarray ) : raise TypeError ( "Input vector must be a numpy array, not %s" % str ( type ( inputVector ) ) ) if inputVector . size != self . _numInputs : raise ValueError ( "Input vector dime... | This is the primary public method of the LateralPooler class . This function takes a input vector and outputs the indices of the active columns . If learn is set to True this method also updates the permanences of the columns and their lateral inhibitory connection weights . |
9,939 | def feedforward ( self ) : m = self . _numInputs n = self . _numColumns W = np . zeros ( ( n , m ) ) for i in range ( self . _numColumns ) : self . getPermanence ( i , W [ i , : ] ) return W | Soon to be depriciated . Needed to make the SP implementation compatible with some older code . |
9,940 | def learn ( self ) : self . setLearning ( True ) for obj in self . objects : self . sendReset ( ) previousLocation = [ None ] * self . numColumns displacement = [ 0. , 0. ] features = obj [ "features" ] numOfFeatures = len ( features ) touchSequence = np . random . permutation ( numOfFeatures ) for sensation in xrange ... | Learn all objects on every column . Each column will learn all the features of every object and store the the object s L2 representation to be later used in the inference stage |
9,941 | def createL2456Columns ( network , networkConfig ) : numCorticalColumns = networkConfig [ "numCorticalColumns" ] for i in xrange ( numCorticalColumns ) : networkConfigCopy = copy . deepcopy ( networkConfig ) randomSeedBase = networkConfigCopy [ "randomSeedBase" ] networkConfigCopy [ "L2Params" ] [ "seed" ] = randomSeed... | Create a network consisting of multiple L2456 columns as described in the file comments above . |
9,942 | def _mmComputeTransitionTraces ( self ) : if not self . _mmTransitionTracesStale : return self . _mmData [ "predictedActiveCellsForSequence" ] = defaultdict ( set ) self . _mmTraces [ "predictedActiveCells" ] = IndicesTrace ( self , "predicted => active cells (correct)" ) self . _mmTraces [ "predictedInactiveCells" ] =... | Computes the transition traces if necessary . |
9,943 | def get_biased_correlations ( data , threshold = 10 ) : data = data . toDense ( ) correlations = numpy . corrcoef ( data , rowvar = False ) highest_correlations = [ ] for row in correlations : highest_correlations += sorted ( row , reverse = True ) [ 1 : threshold + 1 ] return numpy . mean ( highest_correlations ) | Gets the highest few correlations for each bit across the entirety of the data . Meant to provide a comparison point for the pairwise correlations reported in the literature which are typically between neighboring neurons tuned to the same inputs . We would expect these neurons to be among the most correlated in any re... |
9,944 | def get_pattern_correlations ( data ) : patterns = [ data . rowNonZeros ( i ) [ 0 ] for i in range ( data . nRows ( ) ) ] dense_data = data . toDense ( ) correlations = numpy . corrcoef ( dense_data , rowvar = False ) correlations = numpy . nan_to_num ( correlations ) pattern_correlations = [ ] for pattern in patterns ... | Gets the average correlation between all bits in patterns across the entire dataset . Assumes input is a sparse matrix . Weighted by pattern rather than by bit ; this is the average pairwise correlation for every pattern in the data and is not the average pairwise correlation for all bits that ever cooccur . This is a ... |
9,945 | def generate_correlated_data ( dim = 2000 , num_active = 40 , num_samples = 1000 , num_cells_per_cluster_size = [ 2000 ] * 8 , cluster_sizes = range ( 2 , 10 ) ) : clusters = [ ] cells = set ( range ( dim ) ) for size , num_cells in zip ( cluster_sizes , num_cells_per_cluster_size ) : for i in range ( int ( 1. * num_ce... | Generates a set of data drawn from a uniform distribution but with bits clustered to force correlation between neurons . Clusters are randomly chosen to form an activation pattern in such a way as to maintain sparsity . |
9,946 | def apply_noise ( data , noise ) : if noise >= 1 : noise = noise / 100. for i in range ( data . nRows ( ) ) : ones = data . rowNonZeros ( i ) [ 0 ] replace_indices = numpy . random . choice ( ones , size = int ( len ( ones ) * noise ) , replace = False ) for index in replace_indices : data [ i , index ] = 0 new_indices... | Applies noise to a sparse matrix . Noise can be an integer between 0 and 100 indicating the percentage of ones in the original input to move or a float in [ 0 1 ) indicating the same thing . The input matrix is modified in - place and nothing is returned . This operation does not affect the sparsity of the matrix or of... |
9,947 | def shuffle_sparse_matrix_and_labels ( matrix , labels ) : print "Shuffling data" new_matrix = matrix . toDense ( ) rng_state = numpy . random . get_state ( ) numpy . random . shuffle ( new_matrix ) numpy . random . set_state ( rng_state ) numpy . random . shuffle ( labels ) print "Data shuffled" return SM32 ( new_matr... | Shuffles a sparse matrix and set of labels together . Resorts to densifying and then re - sparsifying the matrix for convenience . Still very fast . |
9,948 | def split_sparse_matrix ( matrix , num_categories ) : if matrix . nRows ( ) < num_categories : return [ matrix . getSlice ( i , i + 1 , 0 , matrix . nCols ( ) ) for i in range ( matrix . nRows ( ) ) ] + [ SM32 ( ) for i in range ( num_categories - matrix . nRows ( ) ) ] else : inc = matrix . nRows ( ) / num_categories ... | An analog of numpy . split for our sparse matrix . If the number of categories does not divide the number of rows in the matrix all overflow is placed in the final bin . |
9,949 | def generate_phase_1 ( dim = 40 ) : phase_1 = numpy . random . normal ( 0 , 1 , dim ) for i in range ( dim - 4 , dim ) : phase_1 [ i ] = 1.0 return phase_1 | The first step in creating datapoints in the Poirazi & Mel model . This returns a vector of dimension dim with the last four values set to 1 and the rest drawn from a normal distribution . |
9,950 | def generate_phase_2 ( phase_1 , dim = 40 ) : phase_2 = [ ] for i in range ( dim ) : indices = [ numpy . random . randint ( 0 , dim ) for i in range ( 4 ) ] phase_2 . append ( numpy . prod ( [ phase_1 [ i ] for i in indices ] ) ) return phase_2 | The second step in creating datapoints in the Poirazi & Mel model . This takes a phase 1 vector and creates a phase 2 vector where each point is the product of four elements of the phase 1 vector randomly drawn with replacement . |
9,951 | def bin_number ( datapoint , intervals ) : index = numpy . searchsorted ( intervals , datapoint ) return [ 0 if index != i else 1 for i in range ( len ( intervals ) + 1 ) ] | Given a datapoint and intervals representing bins returns the number represented in binned form where the bin including the value is set to 1 and all others are 0 . |
9,952 | def bin_data ( data , dim = 40 , num_bins = 10 ) : intervals = generate_RF_bins ( data , dim , num_bins ) binned_data = [ numpy . concatenate ( [ bin_number ( data [ x ] [ i ] , intervals [ i ] ) for i in range ( len ( data [ x ] ) ) ] ) for x in range ( len ( data ) ) ] return binned_data | Fully bins the data generated by generate_data using generate_RF_bins and bin_number . |
9,953 | def killCells ( self , killCellPercent ) : if killCellPercent <= 0 : return numHiddenNeurons = self . net . numHiddenNeurons numDead = round ( killCellPercent * numHiddenNeurons ) zombiePermutation = numpy . random . permutation ( numHiddenNeurons ) deadCells = zombiePermutation [ 0 : numDead ] liveCells = zombiePermut... | kill a fraction of cells from the network |
9,954 | def printFrequencyStatistics ( counts , frequencies , numWords , size ) : avgBits = float ( counts . sum ( ) ) / numWords print "Retina width=128, height=128" print "Total number of words processed=" , numWords print "Average number of bits per word=" , avgBits , print "avg sparsity=" , avgBits / size print "counts mat... | Print interesting statistics regarding the counts and frequency matrices |
9,955 | def countRandomBitFrequencies ( numTerms = 100000 , percentSparsity = 0.01 ) : counts = SparseMatrix ( ) size = 128 * 128 counts . resize ( 1 , size ) sparseBitmap = SparseMatrix ( ) sparseBitmap . resize ( 1 , size ) random . seed ( 42 ) numWords = 0 for term in xrange ( numTerms ) : bitmap = random . sample ( xrange ... | Create a uniformly random counts matrix through sampling . |
9,956 | def plotlyFrequencyHistogram ( counts ) : data = [ go . Histogram ( x = tuple ( count for _ , _ , count in counts . getNonZerosSorted ( ) ) ) ] py . plot ( data , filename = os . environ . get ( "HEATMAP_NAME" , str ( datetime . datetime . now ( ) ) ) ) | x - axis is a count of how many times a bit was active y - axis is number of bits that have that frequency |
9,957 | def getSparseTensor ( numNonzeros , inputSize , outputSize , onlyPositive = False , fixedRange = 1.0 / 24 ) : w = torch . Tensor ( outputSize , inputSize , ) if onlyPositive : w . data . uniform_ ( 0 , fixedRange ) else : w . data . uniform_ ( - fixedRange , fixedRange ) if numNonzeros < inputSize : numZeros = inputSiz... | Return a random tensor that is initialized like a weight matrix Size is outputSize X inputSize where weightSparsity% of each row is non - zero |
9,958 | def getPermutedTensors ( W , kw , n , m2 , noisePct ) : W2 = W . repeat ( m2 , 1 ) nz = W [ 0 ] . nonzero ( ) numberToZero = int ( round ( noisePct * kw ) ) for i in range ( m2 ) : indices = np . random . permutation ( kw ) [ 0 : numberToZero ] for j in indices : W2 [ i , nz [ j ] ] = 0 return W2 | Generate m2 noisy versions of W . Noisy version of W is generated by randomly permuting noisePct of the non - zero components to other components . |
9,959 | def getTheta ( k , nTrials = 100000 ) : theDots = np . zeros ( nTrials ) w1 = getSparseTensor ( k , k , nTrials , fixedRange = 1.0 / k ) for i in range ( nTrials ) : theDots [ i ] = w1 [ i ] . dot ( w1 [ i ] ) dotMean = theDots . mean ( ) print ( "k=" , k , "min/mean/max diag of w dot products" , theDots . min ( ) , do... | Estimate a reasonable value of theta for this k . |
9,960 | def returnFalseNegatives ( kw , noisePct , n , theta ) : W = getSparseTensor ( kw , n , 1 , fixedRange = 1.0 / kw ) m2 = 10 inputVectors = getPermutedTensors ( W , kw , n , m2 , noisePct ) dot = inputVectors . matmul ( W . t ( ) ) numMatches = ( ( dot >= theta ) . sum ( ) ) . item ( ) pctMatches = numMatches / float ( ... | Generate a weight vector W with kw non - zero components . Generate 1000 noisy versions of W and return the match statistics . Noisy version of W is generated by randomly setting noisePct of the non - zero components to zero . |
9,961 | def computeScaledProbabilities ( listOfScales = [ 1.0 , 1.5 , 2.0 , 2.5 , 3.0 , 3.5 , 4.0 ] , listofkValues = [ 64 , 128 , 256 ] , kw = 32 , n = 1000 , numWorkers = 10 , nTrials = 1000 , ) : args = [ ] theta , _ = getTheta ( kw ) for ki , k in enumerate ( listofkValues ) : for si , s in enumerate ( listOfScales ) : arg... | Compute the impact of S on match probabilities for a fixed value of n . |
9,962 | def computeMatchProbabilityOmega ( k , bMax , theta , nTrials = 100 ) : omegaProb = np . zeros ( bMax + 1 ) for b in range ( 1 , bMax + 1 ) : xwb = getSparseTensor ( b , b , nTrials , fixedRange = 1.0 / k ) xib = getSparseTensor ( b , b , nTrials , onlyPositive = True , fixedRange = 2.0 / k ) r = xwb . matmul ( xib . t... | The Omega match probability estimates the probability of matching when both vectors have exactly b components in common . This function computes this probability for b = 1 to bMax . |
9,963 | def plotMatches2 ( listofNValues , errors , listOfScales , scaleErrors , fileName = "images/scalar_matches.pdf" ) : w , h = figaspect ( 0.4 ) fig , ( ax1 , ax2 ) = plt . subplots ( 1 , 2 , figsize = ( w , h ) ) plotMatches ( listofNValues , errors , fileName = None , fig = fig , ax = ax1 ) plotScaledMatches ( listOfSca... | Plot two figures side by side in an aspect ratio appropriate for the paper . |
9,964 | def createPregeneratedGraphs ( ) : listofNValues = [ 250 , 500 , 1000 , 1500 , 2000 , 2500 ] kw = 32 errors = np . array ( [ [ 3.65083333e-03 , 3.06166667e-04 , 1.89166667e-05 , 4.16666667e-06 , 1.50000000e-06 , 9.16666667e-07 ] , [ 2.44633333e-02 , 3.64491667e-03 , 3.16083333e-04 , 6.93333333e-05 , 2.16666667e-05 , 8.... | Creates graphs based on previous runs of the scripts . Useful for editing graph format for writeups . |
9,965 | def _learn ( connections , rng , learningSegments , activeInput , potentialOverlaps , initialPermanence , sampleSize , permanenceIncrement , permanenceDecrement , maxSynapsesPerSegment ) : connections . adjustSynapses ( learningSegments , activeInput , permanenceIncrement , - permanenceDecrement ) if sampleSize == - 1 ... | Adjust synapse permanences grow new synapses and grow new segments . |
9,966 | def compute ( self , sensorToBodyByColumn , sensorToSpecificObjectByColumn ) : votesByCell = np . zeros ( self . cellCount , dtype = "int" ) self . activeSegmentsByColumn = [ ] for ( connections , activeSensorToBodyCells , activeSensorToSpecificObjectCells ) in zip ( self . connectionsByColumn , sensorToBodyByColumn , ... | Compute the body s location relative to a specific object from an array of sensor s location relative to a specific object and an array of sensor s location relative to body |
9,967 | def metricCompute ( self , sensorToBody , bodyToSpecificObject ) : overlaps = self . metricConnections . computeActivity ( { "bodyToSpecificObject" : bodyToSpecificObject , "sensorToBody" : sensorToBody , } ) self . activeMetricSegments = np . where ( overlaps >= 2 ) [ 0 ] self . activeCells = np . unique ( self . metr... | Compute the sensor s location relative to a specific object from the body s location relative to a specific object and the sensor s location relative to body |
9,968 | def anchorCompute ( self , anchorInput , learn ) : if learn : self . _anchorComputeLearningMode ( anchorInput ) else : overlaps = self . anchorConnections . computeActivity ( anchorInput , self . connectedPermanence ) self . activeSegments = np . where ( overlaps >= self . activationThreshold ) [ 0 ] self . activeCells... | Compute the sensor s location relative to a specific object from the feature - location pair . |
9,969 | def compute ( self , egocentricLocation ) : offsetInCellFields = ( np . matmul ( self . rotationMatrix , egocentricLocation ) * self . cellFieldsPerUnitDistance ) np . mod ( offsetInCellFields , self . cellDimensions , out = offsetInCellFields ) self . activeCells = np . unique ( np . ravel_multi_index ( np . floor ( o... | Compute the new active cells from the given sensor location relative to body vector . |
9,970 | def htmresearchCorePrereleaseInstalled ( ) : try : coreDistribution = pkg_resources . get_distribution ( "htmresearch-core" ) if pkg_resources . parse_version ( coreDistribution . version ) . is_prerelease : return True except pkg_resources . DistributionNotFound : pass return False | Make an attempt to determine if a pre - release version of htmresearch - core is installed already . |
9,971 | def infer ( self , sensationList , reset = True , objectName = None ) : self . _unsetLearningMode ( ) statistics = collections . defaultdict ( list ) if objectName is not None : if objectName not in self . objectRepresentationsL2 : raise ValueError ( "The provided objectName was not given during" " learning" ) for sens... | Infer on a given set of sensations for a single object . |
9,972 | def getDefaultParams ( self ) : return { "sensorParams" : { "outputWidth" : self . sensorInputSize , } , "coarseSensorParams" : { "outputWidth" : self . sensorInputSize , } , "locationParams" : { "activeBits" : 41 , "outputWidth" : self . sensorInputSize , "radius" : 2 , "verbosity" : 0 , } , "L4Params" : { "columnCoun... | Returns a good default set of parameters to use in L2456 regions |
9,973 | def _retrieveRegions ( self ) : self . sensors = [ ] self . coarseSensors = [ ] self . locationInputs = [ ] self . L4Columns = [ ] self . L2Columns = [ ] self . L5Columns = [ ] self . L6Columns = [ ] for i in xrange ( self . numColumns ) : self . sensors . append ( self . network . regions [ "sensorInput_" + str ( i ) ... | Retrieve and store Python region instances for each column |
9,974 | def plotSuccessRate_varyNumColumns ( noiseSigma , noiseEverywhere ) : noiseLevels = [ x * 0.01 for x in xrange ( 0 , 101 , 5 ) ] l2Overrides = { "sampleSizeDistal" : 20 } columnCounts = [ 1 , 2 , 3 , 4 ] results = defaultdict ( list ) for trial in xrange ( 1 ) : print "trial" , trial objectDescriptions = createRandomOb... | Run and plot the experiment varying the number of cortical columns . |
9,975 | def randomTraversal ( sensations , numTraversals ) : newSensations = [ ] for _ in range ( numTraversals ) : s = copy . deepcopy ( sensations ) random . shuffle ( s ) newSensations += s return newSensations | Given a list of sensations return the SDRs that would be obtained by numTraversals random traversals of that set of sensations . |
9,976 | def compute ( self , xt1 , yt1 , xt , yt , theta1t1 , theta2t1 , theta1 , theta2 ) : dx = xt - xt1 dy = yt - yt1 if self . numPoints < self . maxPoints : self . dxValues [ self . numPoints , 0 ] = dx self . dxValues [ self . numPoints , 1 ] = dy self . thetaValues [ self . numPoints , 0 ] = theta1 self . thetaValues [ ... | Accumulate the various inputs . |
9,977 | def bind ( cell1 , cell2 , moduleDimensions ) : cell1Coords = np . unravel_index ( cell1 , moduleDimensions ) cell2Coords = np . unravel_index ( cell2 , moduleDimensions ) transformCoords = [ ( c2 - c1 ) % m for c1 , c2 , m in itertools . izip ( cell1Coords , cell2Coords , moduleDimensions ) ] return np . ravel_multi_i... | Return transform index for given cells . |
9,978 | def unbind ( cell1 , transform , moduleDimensions ) : cell1Coords = np . unravel_index ( cell1 , moduleDimensions ) transformCoords = np . unravel_index ( transform , moduleDimensions ) cell2Coords = [ ( t + c1 ) % m for c1 , t , m in itertools . izip ( cell1Coords , transformCoords , moduleDimensions ) ] return np . r... | Return the cell index corresponding to the other half of the transform . |
9,979 | def updatePlaceWeights ( self ) : self . weightsPI += np . outer ( self . activationsI - self . boostTarget , self . activationsP ) * self . dt * self . learnFactorP * self . learningRate self . weightsPEL += np . outer ( self . activationsEL - self . boostTarget , self . activationsP ) * self . dt * self . learnFactor... | We use a simplified version of Hebbian learning to learn place weights . Cells above the boost target are wired to the currently - active places cells below it have their connection strength to them reduced . |
9,980 | def create_union_mnist_dataset ( ) : transform = transforms . Compose ( [ transforms . ToTensor ( ) , transforms . Normalize ( ( 0.1307 , ) , ( 0.3081 , ) ) ] ) mnist1 = datasets . MNIST ( 'data' , train = False , download = True , transform = transform ) data1 = zip ( mnist1 . test_data , mnist1 . test_labels ) mnist2... | Create a UnionDataset composed of two versions of the MNIST datasets where each item in the dataset contains 2 distinct images superimposed |
9,981 | def noisy ( pattern , noiseLevel , totalNumCells ) : n = int ( noiseLevel * len ( pattern ) ) noised = set ( pattern ) noised . difference_update ( random . sample ( noised , n ) ) for _ in xrange ( n ) : while True : v = random . randint ( 0 , totalNumCells - 1 ) if v not in pattern and v not in noised : noised . add ... | Generate a noisy copy of a pattern . |
9,982 | def createRandomObjects ( numObjects , locationsPerObject , featurePoolSize ) : allFeatures = range ( featurePoolSize ) allLocations = range ( locationsPerObject ) objects = dict ( ( name , [ random . choice ( allFeatures ) for _ in xrange ( locationsPerObject ) ] ) for name in xrange ( numObjects ) ) return objects | Generate random objects . |
9,983 | def createL4L2Column ( network , networkConfig , suffix = "" ) : externalInputName = "externalInput" + suffix sensorInputName = "sensorInput" + suffix L4ColumnName = "L4Column" + suffix L2ColumnName = "L2Column" + suffix L4Params = copy . deepcopy ( networkConfig [ "L4Params" ] ) L4Params [ "basalInputWidth" ] = networ... | Create a a single column containing one L4 and one L2 . |
9,984 | def createMultipleL4L2Columns ( network , networkConfig ) : numCorticalColumns = networkConfig [ "numCorticalColumns" ] for i in xrange ( numCorticalColumns ) : networkConfigCopy = copy . deepcopy ( networkConfig ) layerConfig = networkConfigCopy [ "L2Params" ] layerConfig [ "seed" ] = layerConfig . get ( "seed" , 42 )... | Create a network consisting of multiple columns . Each column contains one L4 and one L2 is identical in structure to the network created by createL4L2Column . In addition all the L2 columns are fully connected to each other through their lateral inputs . |
9,985 | def createMultipleL4L2ColumnsWithTopology ( network , networkConfig ) : numCorticalColumns = networkConfig [ "numCorticalColumns" ] output_lateral_connections = [ [ ] for i in xrange ( numCorticalColumns ) ] input_lateral_connections = [ [ ] for i in xrange ( numCorticalColumns ) ] columnPositions = networkConfig . get... | Create a network consisting of multiple columns . Each column contains one L4 and one L2 is identical in structure to the network created by createL4L2Column . In addition the L2 columns are connected to each other through their lateral inputs based on the topological information provided . |
9,986 | def loadExperimentData ( folder , area ) : filename = os . path . join ( folder , "Combo3_{}.mat" . format ( area ) ) contents = sio . loadmat ( filename , variable_names = [ 'data' ] , struct_as_record = False , squeeze_me = True ) return contents [ 'data' ] | Loads the experiment s data from a MATLAB file into a python friendly structure . |
9,987 | def classifierPredict ( testVector , storedVectors ) : numClasses = storedVectors . shape [ 0 ] output = np . zeros ( ( numClasses , ) ) for i in range ( numClasses ) : output [ i ] = np . sum ( np . minimum ( testVector , storedVectors [ i , : ] ) ) return output | Return overlap of the testVector with stored representations for each object . |
9,988 | def run_multiple_column_experiment ( ) : featureRange = [ 5 , 10 , 20 , 30 ] pointRange = 1 objectRange = [ 100 ] numLocations = [ 10 ] numPoints = 10 numTrials = 10 columnRange = [ 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 ] useLocation = 1 resultsDir = os . path . dirname ( os . path . realpath ( __file__ ) ) args = [ ] for c in... | Compare the ideal observer against a multi - column sensorimotor network . |
9,989 | def save ( callLog , logFilename ) : with open ( logFilename , "wb" ) as outp : cPickle . dump ( callLog , outp ) | Save the call log history into this file . |
9,990 | def _getDefaultCombinedL4Params ( self , numInputBits , inputSize , numExternalInputBits , externalInputSize , L2CellCount ) : sampleSize = numExternalInputBits + numInputBits activationThreshold = int ( max ( numExternalInputBits , numInputBits ) * .6 ) minThreshold = activationThreshold return { "columnCount" : input... | Returns a good default set of parameters to use in a combined L4 region . |
9,991 | def findBinomialNsWithExpectedSampleMinimum ( desiredValuesSorted , p , numSamples , nMax ) : actualValues = [ getExpectedValue ( SampleMinimumDistribution ( numSamples , BinomialDistribution ( n , p , cache = True ) ) ) for n in xrange ( nMax + 1 ) ] results = [ ] n = 0 for desiredValue in desiredValuesSorted : while ... | For each desired value find an approximate n for which the sample minimum has a expected value equal to this value . |
9,992 | def findBinomialNsWithLowerBoundSampleMinimum ( confidence , desiredValuesSorted , p , numSamples , nMax ) : def P ( n , numOccurrences ) : return 1 - SampleMinimumDistribution ( numSamples , BinomialDistribution ( n , p ) ) . cdf ( numOccurrences - 1 ) results = [ ] n = 0 for desiredValue in desiredValuesSorted : whil... | For each desired value find an approximate n for which the sample minimum has a probabilistic lower bound equal to this value . |
9,993 | def tempoAdjust1 ( self , tempoFactor ) : if self . apicalIntersect . any ( ) : tempoFactor = tempoFactor * 0.5 else : tempoFactor = tempoFactor * 2 return tempoFactor | Adjust tempo based on recent active apical input only |
9,994 | def tempoAdjust2 ( self , tempoFactor ) : late_votes = ( len ( self . adtm . getNextBasalPredictedCells ( ) ) - len ( self . apicalIntersect ) ) * - 1 early_votes = len ( self . apicalIntersect ) votes = late_votes + early_votes print ( 'vote tally' , votes ) if votes > 0 : tempoFactor = tempoFactor * 0.5 print 'speed ... | Adjust tempo by aggregating active basal cell votes for pre vs . post |
9,995 | def _countWhereGreaterEqualInRows ( sparseMatrix , rows , threshold ) : return sum ( sparseMatrix . countWhereGreaterOrEqual ( row , row + 1 , 0 , sparseMatrix . nCols ( ) , threshold ) for row in rows ) | Like countWhereGreaterOrEqual but for an arbitrary selection of rows and without any column filtering . |
9,996 | def compute ( self , feedforwardInput = ( ) , lateralInputs = ( ) , feedforwardGrowthCandidates = None , learn = True , predictedInput = None , ) : if feedforwardGrowthCandidates is None : feedforwardGrowthCandidates = feedforwardInput if not learn : self . _computeInferenceMode ( feedforwardInput , lateralInputs ) eli... | Runs one time step of the column pooler algorithm . |
9,997 | def numberOfConnectedProximalSynapses ( self , cells = None ) : if cells is None : cells = xrange ( self . numberOfCells ( ) ) return _countWhereGreaterEqualInRows ( self . proximalPermanences , cells , self . connectedPermanenceProximal ) | Returns the number of proximal connected synapses on these cells . |
9,998 | def numberOfProximalSynapses ( self , cells = None ) : if cells is None : cells = xrange ( self . numberOfCells ( ) ) n = 0 for cell in cells : n += self . proximalPermanences . nNonZerosOnRow ( cell ) return n | Returns the number of proximal synapses with permanence > 0 on these cells . |
9,999 | def numberOfDistalSegments ( self , cells = None ) : if cells is None : cells = xrange ( self . numberOfCells ( ) ) n = 0 for cell in cells : if self . internalDistalPermanences . nNonZerosOnRow ( cell ) > 0 : n += 1 for permanences in self . distalPermanences : if permanences . nNonZerosOnRow ( cell ) > 0 : n += 1 ret... | Returns the total number of distal segments for these cells . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.