Search is not available for this dataset
text
stringlengths
75
104k
def getOptimizationMetricInfo(cls, searchJobParams): """Retrives the optimization key name and optimization function. Parameters: --------------------------------------------------------- searchJobParams: Parameter for passing as the searchParams arg to Hypersearch constructor. retval: (optimizationMetricKey, maximize) optimizationMetricKey: which report key to optimize for maximize: True if we should try and maximize the optimizeKey metric. False if we should minimize it. """ if searchJobParams["hsVersion"] == "v2": search = HypersearchV2(searchParams=searchJobParams) else: raise RuntimeError("Unsupported hypersearch version \"%s\"" % \ (searchJobParams["hsVersion"])) info = search.getOptimizationMetricInfo() return info
def getModelDescription(self): """ Parameters: ---------------------------------------------------------------------- retval: Printable description of the model. """ params = self.__unwrapParams() if "experimentName" in params: return params["experimentName"] else: paramSettings = self.getParamLabels() # Form a csv friendly string representation of this model items = [] for key, value in paramSettings.items(): items.append("%s_%s" % (key, value)) return ".".join(items)
def getParamLabels(self): """ Parameters: ---------------------------------------------------------------------- retval: a dictionary of model parameter labels. For each entry the key is the name of the parameter and the value is the value chosen for it. """ params = self.__unwrapParams() # Hypersearch v2 stores the flattened parameter settings in "particleState" if "particleState" in params: retval = dict() queue = [(pair, retval) for pair in params["particleState"]["varStates"].iteritems()] while len(queue) > 0: pair, output = queue.pop() k, v = pair if ("position" in v and "bestPosition" in v and "velocity" in v): output[k] = v["position"] else: if k not in output: output[k] = dict() queue.extend((pair, output[k]) for pair in v.iteritems()) return retval
def __unwrapParams(self): """Unwraps self.__rawInfo.params into the equivalent python dictionary and caches it in self.__cachedParams. Returns the unwrapped params Parameters: ---------------------------------------------------------------------- retval: Model params dictionary as correpsonding to the json as returned in ClientJobsDAO.modelsInfo()[x].params """ if self.__cachedParams is None: self.__cachedParams = json.loads(self.__rawInfo.params) assert self.__cachedParams is not None, \ "%s resulted in None" % self.__rawInfo.params return self.__cachedParams
def getAllMetrics(self): """Retrives a dictionary of metrics that combines all report and optimization metrics Parameters: ---------------------------------------------------------------------- retval: a dictionary of optimization metrics that were collected for the model; an empty dictionary if there aren't any. """ result = self.getReportMetrics() result.update(self.getOptimizationMetrics()) return result
def __unwrapResults(self): """Unwraps self.__rawInfo.results and caches it in self.__cachedResults; Returns the unwrapped params Parameters: ---------------------------------------------------------------------- retval: ModelResults namedtuple instance """ if self.__cachedResults is None: if self.__rawInfo.results is not None: resultList = json.loads(self.__rawInfo.results) assert len(resultList) == 2, \ "Expected 2 elements, but got %s (%s)." % ( len(resultList), resultList) self.__cachedResults = self.ModelResults( reportMetrics=resultList[0], optimizationMetrics=resultList[1]) else: self.__cachedResults = self.ModelResults( reportMetrics={}, optimizationMetrics={}) return self.__cachedResults
def getData(self, n): """Returns the next n values for the distribution as a list.""" records = [self.getNext() for x in range(n)] return records
def getTerminationCallbacks(self, terminationFunc): """ Returns the periodic checks to see if the model should continue running. Parameters: ----------------------------------------------------------------------- terminationFunc: The function that will be called in the model main loop as a wrapper around this function. Must have a parameter called 'index' Returns: A list of PeriodicActivityRequest objects. """ activities = [None] * len(ModelTerminator._MILESTONES) for index, (iteration, _) in enumerate(ModelTerminator._MILESTONES): cb = functools.partial(terminationFunc, index=index) activities[index] = PeriodicActivityRequest(repeating =False, period = iteration, cb=cb)
def groupby2(*args): """ Like itertools.groupby, with the following additions: - Supports multiple sequences. Instead of returning (k, g), each iteration returns (k, g0, g1, ...), with one `g` for each input sequence. The value of each `g` is either a non-empty iterator or `None`. - It treats the value `None` as an empty sequence. So you can make subsequent calls to groupby2 on any `g` value. .. note:: Read up on groupby here: https://docs.python.org/dev/library/itertools.html#itertools.groupby :param args: (list) Parameters alternating between sorted lists and their respective key functions. The lists should be sorted with respect to their key function. :returns: (tuple) A n + 1 dimensional tuple, where the first element is the key of the iteration, and the other n entries are groups of objects that share this key. Each group corresponds to the an input sequence. `groupby2` is a generator that returns a tuple for every iteration. If an input sequence has no members with the current key, None is returned in place of a generator. """ generatorList = [] # list of each list's (k, group) tuples if len(args) % 2 == 1: raise ValueError("Must have a key function for every list.") advanceList = [] # populate above lists for i in xrange(0, len(args), 2): listn = args[i] fn = args[i + 1] if listn is not None: generatorList.append(groupby(listn, fn)) advanceList.append(True) # start by advancing everyone. else: generatorList.append(None) advanceList.append(False) n = len(generatorList) nextList = [None] * n # while all lists aren't exhausted walk through each group in order while True: for i in xrange(n): if advanceList[i]: try: nextList[i] = generatorList[i].next() except StopIteration: nextList[i] = None # no more values to process in any of the generators if all(entry is None for entry in nextList): break # the minimum key value in the nextList minKeyVal = min(nextVal[0] for nextVal in nextList if nextVal is not None) # populate the tuple to return based on minKeyVal retGroups = [minKeyVal] for i in xrange(n): if nextList[i] is not None and nextList[i][0] == minKeyVal: retGroups.append(nextList[i][1]) advanceList[i] = True else: advanceList[i] = False retGroups.append(None) yield tuple(retGroups)
def _openStream(dataUrl, isBlocking, # pylint: disable=W0613 maxTimeout, # pylint: disable=W0613 bookmark, firstRecordIdx): """Open the underlying file stream This only supports 'file://' prefixed paths. :returns: record stream instance :rtype: FileRecordStream """ filePath = dataUrl[len(FILE_PREF):] if not os.path.isabs(filePath): filePath = os.path.join(os.getcwd(), filePath) return FileRecordStream(streamID=filePath, write=False, bookmark=bookmark, firstRecord=firstRecordIdx)
def getNextRecord(self): """ Returns combined data from all sources (values only). :returns: None on EOF; empty sequence on timeout. """ # Keep reading from the raw input till we get enough for an aggregated # record while True: # Reached EOF due to lastRow constraint? if self._sourceLastRecordIdx is not None and \ self._recordStore.getNextRecordIdx() >= self._sourceLastRecordIdx: preAggValues = None # indicates EOF bookmark = self._recordStore.getBookmark() else: # Get the raw record and bookmark preAggValues = self._recordStore.getNextRecord() bookmark = self._recordStore.getBookmark() if preAggValues == (): # means timeout error occurred if self._eofOnTimeout: preAggValues = None # act as if we got EOF else: return preAggValues # Timeout indicator self._logger.debug('Read source record #%d: %r', self._recordStore.getNextRecordIdx()-1, preAggValues) # Perform aggregation (fieldValues, aggBookmark) = self._aggregator.next(preAggValues, bookmark) # Update the aggregated record bookmark if we got a real record back if fieldValues is not None: self._aggBookmark = aggBookmark # Reached EOF? if preAggValues is None and fieldValues is None: return None # Return it if we have a record if fieldValues is not None: break # Do we need to re-order the fields in the record? if self._needFieldsFiltering: values = [] srcDict = dict(zip(self._recordStoreFieldNames, fieldValues)) for name in self._streamFieldNames: values.append(srcDict[name]) fieldValues = values # Write to debug output? if self._writer is not None: self._writer.appendRecord(fieldValues) self._recordCount += 1 self._logger.debug('Returning aggregated record #%d from getNextRecord(): ' '%r. Bookmark: %r', self._recordCount-1, fieldValues, self._aggBookmark) return fieldValues
def getDataRowCount(self): """ Iterates through stream to calculate total records after aggregation. This will alter the bookmark state. """ inputRowCountAfterAggregation = 0 while True: record = self.getNextRecord() if record is None: return inputRowCountAfterAggregation inputRowCountAfterAggregation += 1 if inputRowCountAfterAggregation > 10000: raise RuntimeError('No end of datastream found.')
def getStats(self): """ TODO: This method needs to be enhanced to get the stats on the *aggregated* records. :returns: stats (like min and max values of the fields). """ # The record store returns a dict of stats, each value in this dict is # a list with one item per field of the record store # { # 'min' : [f1_min, f2_min, f3_min], # 'max' : [f1_max, f2_max, f3_max] # } recordStoreStats = self._recordStore.getStats() # We need to convert each item to represent the fields of the *stream* streamStats = dict() for (key, values) in recordStoreStats.items(): fieldStats = dict(zip(self._recordStoreFieldNames, values)) streamValues = [] for name in self._streamFieldNames: streamValues.append(fieldStats[name]) streamStats[key] = streamValues return streamStats
def get(self, number): """ Return a pattern for a number. @param number (int) Number of pattern @return (set) Indices of on bits """ if not number in self._patterns: raise IndexError("Invalid number") return self._patterns[number]
def addNoise(self, bits, amount): """ Add noise to pattern. @param bits (set) Indices of on bits @param amount (float) Probability of switching an on bit with a random bit @return (set) Indices of on bits in noisy pattern """ newBits = set() for bit in bits: if self._random.getReal64() < amount: newBits.add(self._random.getUInt32(self._n)) else: newBits.add(bit) return newBits
def numbersForBit(self, bit): """ Return the set of pattern numbers that match a bit. @param bit (int) Index of bit @return (set) Indices of numbers """ if bit >= self._n: raise IndexError("Invalid bit") numbers = set() for index, pattern in self._patterns.iteritems(): if bit in pattern: numbers.add(index) return numbers
def numberMapForBits(self, bits): """ Return a map from number to matching on bits, for all numbers that match a set of bits. @param bits (set) Indices of bits @return (dict) Mapping from number => on bits. """ numberMap = dict() for bit in bits: numbers = self.numbersForBit(bit) for number in numbers: if not number in numberMap: numberMap[number] = set() numberMap[number].add(bit) return numberMap
def prettyPrintPattern(self, bits, verbosity=1): """ Pretty print a pattern. @param bits (set) Indices of on bits @param verbosity (int) Verbosity level @return (string) Pretty-printed text """ numberMap = self.numberMapForBits(bits) text = "" numberList = [] numberItems = sorted(numberMap.iteritems(), key=lambda (number, bits): len(bits), reverse=True) for number, bits in numberItems: if verbosity > 2: strBits = [str(n) for n in bits] numberText = "{0} (bits: {1})".format(number, ",".join(strBits)) elif verbosity > 1: numberText = "{0} ({1} bits)".format(number, len(bits)) else: numberText = str(number) numberList.append(numberText) text += "[{0}]".format(", ".join(numberList)) return text
def _generate(self): """ Generates set of random patterns. """ candidates = np.array(range(self._n), np.uint32) for i in xrange(self._num): self._random.shuffle(candidates) pattern = candidates[0:self._getW()] self._patterns[i] = set(pattern)
def _getW(self): """ Gets a value of `w` for use in generating a pattern. """ w = self._w if type(w) is list: return w[self._random.getUInt32(len(w))] else: return w
def _generate(self): """ Generates set of consecutive patterns. """ n = self._n w = self._w assert type(w) is int, "List for w not supported" for i in xrange(n / w): pattern = set(xrange(i * w, (i+1) * w)) self._patterns[i] = pattern
def compute(self, recordNum, patternNZ, classification, learn, infer): """ Process one input sample. This method is called by outer loop code outside the nupic-engine. We use this instead of the nupic engine compute() because our inputs and outputs aren't fixed size vectors of reals. :param recordNum: Record number of this input pattern. Record numbers normally increase sequentially by 1 each time unless there are missing records in the dataset. Knowing this information insures that we don't get confused by missing records. :param patternNZ: List of the active indices from the output below. When the input is from TemporalMemory, this list should be the indices of the active cells. :param classification: Dict of the classification information where: - bucketIdx: list of indices of the encoder bucket - actValue: list of actual values going into the encoder Classification could be None for inference mode. :param learn: (bool) if true, learn this sample :param infer: (bool) if true, perform inference :return: Dict containing inference results, there is one entry for each step in self.steps, where the key is the number of steps, and the value is an array containing the relative likelihood for each bucketIdx starting from bucketIdx 0. There is also an entry containing the average actual value to use for each bucket. The key is 'actualValues'. for example: .. code-block:: python {1 : [0.1, 0.3, 0.2, 0.7], 4 : [0.2, 0.4, 0.3, 0.5], 'actualValues': [1.5, 3,5, 5,5, 7.6], } """ if self.verbosity >= 1: print " learn:", learn print " recordNum:", recordNum print " patternNZ (%d):" % len(patternNZ), patternNZ print " classificationIn:", classification # ensures that recordNum increases monotonically if len(self._patternNZHistory) > 0: if recordNum < self._patternNZHistory[-1][0]: raise ValueError("the record number has to increase monotonically") # Store pattern in our history if this is a new record if len(self._patternNZHistory) == 0 or \ recordNum > self._patternNZHistory[-1][0]: self._patternNZHistory.append((recordNum, patternNZ)) # To allow multi-class classification, we need to be able to run learning # without inference being on. So initialize retval outside # of the inference block. retval = {} # Update maxInputIdx and augment weight matrix with zero padding if max(patternNZ) > self._maxInputIdx: newMaxInputIdx = max(patternNZ) for nSteps in self.steps: self._weightMatrix[nSteps] = numpy.concatenate(( self._weightMatrix[nSteps], numpy.zeros(shape=(newMaxInputIdx-self._maxInputIdx, self._maxBucketIdx+1))), axis=0) self._maxInputIdx = int(newMaxInputIdx) # Get classification info if classification is not None: if type(classification["bucketIdx"]) is not list: bucketIdxList = [classification["bucketIdx"]] actValueList = [classification["actValue"]] numCategory = 1 else: bucketIdxList = classification["bucketIdx"] actValueList = classification["actValue"] numCategory = len(classification["bucketIdx"]) else: if learn: raise ValueError("classification cannot be None when learn=True") actValueList = None bucketIdxList = None # ------------------------------------------------------------------------ # Inference: # For each active bit in the activationPattern, get the classification # votes if infer: retval = self.infer(patternNZ, actValueList) if learn and classification["bucketIdx"] is not None: for categoryI in range(numCategory): bucketIdx = bucketIdxList[categoryI] actValue = actValueList[categoryI] # Update maxBucketIndex and augment weight matrix with zero padding if bucketIdx > self._maxBucketIdx: for nSteps in self.steps: self._weightMatrix[nSteps] = numpy.concatenate(( self._weightMatrix[nSteps], numpy.zeros(shape=(self._maxInputIdx+1, bucketIdx-self._maxBucketIdx))), axis=1) self._maxBucketIdx = int(bucketIdx) # Update rolling average of actual values if it's a scalar. If it's # not, it must be a category, in which case each bucket only ever # sees one category so we don't need a running average. while self._maxBucketIdx > len(self._actualValues) - 1: self._actualValues.append(None) if self._actualValues[bucketIdx] is None: self._actualValues[bucketIdx] = actValue else: if (isinstance(actValue, int) or isinstance(actValue, float) or isinstance(actValue, long)): self._actualValues[bucketIdx] = ((1.0 - self.actValueAlpha) * self._actualValues[bucketIdx] + self.actValueAlpha * actValue) else: self._actualValues[bucketIdx] = actValue for (learnRecordNum, learnPatternNZ) in self._patternNZHistory: error = self._calculateError(recordNum, bucketIdxList) nSteps = recordNum - learnRecordNum if nSteps in self.steps: for bit in learnPatternNZ: self._weightMatrix[nSteps][bit, :] += self.alpha * error[nSteps] # ------------------------------------------------------------------------ # Verbose print if infer and self.verbosity >= 1: print " inference: combined bucket likelihoods:" print " actual bucket values:", retval["actualValues"] for (nSteps, votes) in retval.items(): if nSteps == "actualValues": continue print " %d steps: " % (nSteps), _pFormatArray(votes) bestBucketIdx = votes.argmax() print (" most likely bucket idx: " "%d, value: %s" % (bestBucketIdx, retval["actualValues"][bestBucketIdx])) print return retval
def infer(self, patternNZ, actValueList): """ Return the inference value from one input sample. The actual learning happens in compute(). :param patternNZ: list of the active indices from the output below :param classification: dict of the classification information: bucketIdx: index of the encoder bucket actValue: actual value going into the encoder :return: dict containing inference results, one entry for each step in self.steps. The key is the number of steps, the value is an array containing the relative likelihood for each bucketIdx starting from bucketIdx 0. for example: .. code-block:: python {'actualValues': [0.0, 1.0, 2.0, 3.0] 1 : [0.1, 0.3, 0.2, 0.7] 4 : [0.2, 0.4, 0.3, 0.5]} """ # Return value dict. For buckets which we don't have an actual value # for yet, just plug in any valid actual value. It doesn't matter what # we use because that bucket won't have non-zero likelihood anyways. # NOTE: If doing 0-step prediction, we shouldn't use any knowledge # of the classification input during inference. if self.steps[0] == 0 or actValueList is None: defaultValue = 0 else: defaultValue = actValueList[0] actValues = [x if x is not None else defaultValue for x in self._actualValues] retval = {"actualValues": actValues} for nSteps in self.steps: predictDist = self.inferSingleStep(patternNZ, self._weightMatrix[nSteps]) retval[nSteps] = predictDist return retval
def inferSingleStep(self, patternNZ, weightMatrix): """ Perform inference for a single step. Given an SDR input and a weight matrix, return a predicted distribution. :param patternNZ: list of the active indices from the output below :param weightMatrix: numpy array of the weight matrix :return: numpy array of the predicted class label distribution """ outputActivation = weightMatrix[patternNZ].sum(axis=0) # softmax normalization outputActivation = outputActivation - numpy.max(outputActivation) expOutputActivation = numpy.exp(outputActivation) predictDist = expOutputActivation / numpy.sum(expOutputActivation) return predictDist
def _calculateError(self, recordNum, bucketIdxList): """ Calculate error signal :param bucketIdxList: list of encoder buckets :return: dict containing error. The key is the number of steps The value is a numpy array of error at the output layer """ error = dict() targetDist = numpy.zeros(self._maxBucketIdx + 1) numCategories = len(bucketIdxList) for bucketIdx in bucketIdxList: targetDist[bucketIdx] = 1.0/numCategories for (learnRecordNum, learnPatternNZ) in self._patternNZHistory: nSteps = recordNum - learnRecordNum if nSteps in self.steps: predictDist = self.inferSingleStep(learnPatternNZ, self._weightMatrix[nSteps]) error[nSteps] = targetDist - predictDist return error
def sort(filename, key, outputFile, fields=None, watermark=1024 * 1024 * 100): """Sort a potentially big file filename - the input file (standard File format) key - a list of field names to sort by outputFile - the name of the output file fields - a list of fields that should be included (all fields if None) watermark - when available memory goes bellow the watermark create a new chunk sort() works by reading as records from the file into memory and calling _sortChunk() on each chunk. In the process it gets rid of unneeded fields if any. Once all the chunks have been sorted and written to chunk files it calls _merge() to merge all the chunks into a single sorted file. Note, that sort() gets a key that contains field names, which it converts into field indices for _sortChunk() becuase _sortChunk() doesn't need to know the field name. sort() figures out by itself how many chunk files to use by reading records from the file until the low watermark value of availabel memory is hit and then it sorts the current records, generates a chunk file, clears the sorted records and starts on a new chunk. The key field names are turned into indices """ if fields is not None: assert set(key).issubset(set([f[0] for f in fields])) with FileRecordStream(filename) as f: # Find the indices of the requested fields if fields: fieldNames = [ff[0] for ff in fields] indices = [f.getFieldNames().index(name) for name in fieldNames] assert len(indices) == len(fields) else: fileds = f.getFields() fieldNames = f.getFieldNames() indices = None # turn key fields to key indices key = [fieldNames.index(name) for name in key] chunk = 0 records = [] for i, r in enumerate(f): # Select requested fields only if indices: temp = [] for i in indices: temp.append(r[i]) r = temp # Store processed record records.append(r) # Check memory available_memory = psutil.avail_phymem() # If bellow the watermark create a new chunk, reset and keep going if available_memory < watermark: _sortChunk(records, key, chunk, fields) records = [] chunk += 1 # Sort and write the remainder if len(records) > 0: _sortChunk(records, key, chunk, fields) chunk += 1 # Marge all the files _mergeFiles(key, chunk, outputFile, fields)
def _sortChunk(records, key, chunkIndex, fields): """Sort in memory chunk of records records - a list of records read from the original dataset key - a list of indices to sort the records by chunkIndex - the index of the current chunk The records contain only the fields requested by the user. _sortChunk() will write the sorted records to a standard File named "chunk_<chunk index>.csv" (chunk_0.csv, chunk_1.csv,...). """ title(additional='(key=%s, chunkIndex=%d)' % (str(key), chunkIndex)) assert len(records) > 0 # Sort the current records records.sort(key=itemgetter(*key)) # Write to a chunk file if chunkIndex is not None: filename = 'chunk_%d.csv' % chunkIndex with FileRecordStream(filename, write=True, fields=fields) as o: for r in records: o.appendRecord(r) assert os.path.getsize(filename) > 0 return records
def _mergeFiles(key, chunkCount, outputFile, fields): """Merge sorted chunk files into a sorted output file chunkCount - the number of available chunk files outputFile the name of the sorted output file _mergeFiles() """ title() # Open all chun files files = [FileRecordStream('chunk_%d.csv' % i) for i in range(chunkCount)] # Open output file with FileRecordStream(outputFile, write=True, fields=fields) as o: # Open all chunk files files = [FileRecordStream('chunk_%d.csv' % i) for i in range(chunkCount)] records = [f.getNextRecord() for f in files] # This loop will run until all files are exhausted while not all(r is None for r in records): # Cleanup None values (files that were exhausted) indices = [i for i,r in enumerate(records) if r is not None] records = [records[i] for i in indices] files = [files[i] for i in indices] # Find the current record r = min(records, key=itemgetter(*key)) # Write it to the file o.appendRecord(r) # Find the index of file that produced the current record index = records.index(r) # Read a new record from the file records[index] = files[index].getNextRecord() # Cleanup chunk files for i, f in enumerate(files): f.close() os.remove('chunk_%d.csv' % i)
def compute(self, activeColumns, learn=True): """ Feeds input record through TM, performing inference and learning. Updates member variables with new state. @param activeColumns (set) Indices of active columns in `t` """ bottomUpInput = numpy.zeros(self.numberOfCols, dtype=dtype) bottomUpInput[list(activeColumns)] = 1 super(TemporalMemoryShim, self).compute(bottomUpInput, enableLearn=learn, enableInference=True) predictedState = self.getPredictedState() self.predictiveCells = set(numpy.flatnonzero(predictedState))
def read(cls, proto): """Deserialize from proto instance. :param proto: (TemporalMemoryShimProto) the proto instance to read from """ tm = super(TemporalMemoryShim, cls).read(proto.baseTM) tm.predictiveCells = set(proto.predictedState) tm.connections = Connections.read(proto.conncetions)
def write(self, proto): """Populate serialization proto instance. :param proto: (TemporalMemoryShimProto) the proto instance to populate """ super(TemporalMemoryShim, self).write(proto.baseTM) proto.connections.write(self.connections) proto.predictiveCells = self.predictiveCells
def cPrint(self, level, message, *args, **kw): """Print a message to the console. Prints only if level <= self.consolePrinterVerbosity Printing with level 0 is equivalent to using a print statement, and should normally be avoided. :param level: (int) indicating the urgency of the message with lower values meaning more urgent (messages at level 0 are the most urgent and are always printed) :param message: (string) possibly with format specifiers :param args: specifies the values for any format specifiers in message :param kw: newline is the only keyword argument. True (default) if a newline should be printed """ if level > self.consolePrinterVerbosity: return if len(kw) > 1: raise KeyError("Invalid keywords for cPrint: %s" % str(kw.keys())) newline = kw.get("newline", True) if len(kw) == 1 and 'newline' not in kw: raise KeyError("Invalid keyword for cPrint: %s" % kw.keys()[0]) if len(args) == 0: if newline: print message else: print message, else: if newline: print message % args else: print message % args,
def profileTM(tmClass, tmDim, nRuns): """ profiling performance of TemporalMemory (TM) using the python cProfile module and ordered by cumulative time, see how to run on command-line above. @param tmClass implementation of TM (cpp, py, ..) @param tmDim number of columns in TM @param nRuns number of calls of the profiled code (epochs) """ # create TM instance to measure tm = tmClass(numberOfCols=tmDim) # generate input data data = numpy.random.randint(0, 2, [tmDim, nRuns]).astype('float32') for i in xrange(nRuns): # new data every time, this is the worst case performance # real performance would be better, as the input data would not be completely random d = data[:,i] # the actual function to profile! tm.compute(d, True)
def runPermutations(args): """ The main function of the RunPermutations utility. This utility will automatically generate and run multiple prediction framework experiments that are permutations of a base experiment via the Grok engine. For example, if you have an experiment that you want to test with 3 possible values of variable A and 2 possible values of variable B, this utility will automatically generate the experiment directories and description files for each of the 6 different experiments. Here is an example permutations file which is read by this script below. The permutations file must be in the same directory as the description.py for the base experiment that you want to permute. It contains a permutations dict, an optional list of the result items to report on for each experiment, and an optional result item to optimize for. When an 'optimize' entry is provided, this tool will attempt to prioritize the order in which the various permutations are run in order to improve the odds of running the best permutations sooner. It does this by watching the results for various parameter values and putting parameter values that give generally better results at the head of the queue. In addition, when the optimize key is provided, we periodically update the UI with the best results obtained so far on that metric. --------------------------------------------------------------------------- permutations = dict( iterationCount = [1000, 5000], coincCount = [50, 100], trainTP = [False], ) report = ['.*reconstructErrAvg', '.*inputPredScore.*', ] optimize = 'postProc_gym1_baseline:inputPredScore' Parameters: ---------------------------------------------------------------------- args: Command-line args; the equivalent of sys.argv[1:] retval: for the actions 'run', 'pickup', and 'dryRun', returns the Hypersearch job ID (in ClinetJobs table); otherwise returns None """ helpString = ( "\n\n%prog [options] permutationsScript\n" "%prog [options] expDescription.json\n\n" "This script runs permutations of an experiment via Grok engine, as " "defined in a\npermutations.py script or an expGenerator experiment " "description json file.\nIn the expDescription.json form, the json file " "MUST have the file extension\n'.json' and MUST conform to " "expGenerator/experimentDescriptionSchema.json.") parser = optparse.OptionParser(usage=helpString) parser.add_option( "--replaceReport", dest="replaceReport", action="store_true", default=DEFAULT_OPTIONS["replaceReport"], help="Replace existing csv report file if it exists. Default is to " "append to the existing file. [default: %default].") parser.add_option( "--action", dest="action", default=DEFAULT_OPTIONS["action"], choices=["run", "pickup", "report", "dryRun"], help="Which action to perform. Possible actions are run, pickup, choices, " "report, list. " "run: run a new HyperSearch via Grok. " "pickup: pick up the latest run of a HyperSearch job. " "dryRun: run a single HypersearchWorker inline within the application " "process without the Grok infrastructure to flush out bugs in " "description and permutations scripts; defaults to " "maxPermutations=1: use --maxPermutations to change this; " "report: just print results from the last or current run. " "[default: %default].") parser.add_option( "--maxPermutations", dest="maxPermutations", default=DEFAULT_OPTIONS["maxPermutations"], type="int", help="Maximum number of models to search. Applies only to the 'run' and " "'dryRun' actions. [default: %default].") parser.add_option( "--exports", dest="exports", default=DEFAULT_OPTIONS["exports"], type="string", help="json dump of environment variable settings that should be applied" "for the job before running. [default: %default].") parser.add_option( "--useTerminators", dest="useTerminators", action="store_true", default=DEFAULT_OPTIONS["useTerminators"], help="Use early model terminators in HyperSearch" "[default: %default].") parser.add_option( "--maxWorkers", dest="maxWorkers", default=DEFAULT_OPTIONS["maxWorkers"], type="int", help="Maximum number of concurrent workers to launch. Applies only to " "the 'run' action. [default: %default].") parser.add_option( "-v", dest="verbosityCount", action="count", default=0, help="Increase verbosity of the output. Specify multiple times for " "increased verbosity. e.g., -vv is more verbose than -v.") parser.add_option( "--timeout", dest="timeout", default=DEFAULT_OPTIONS["timeout"], type="int", help="Time out for this search in minutes" "[default: %default].") parser.add_option( "--overwrite", default=DEFAULT_OPTIONS["overwrite"], action="store_true", help="If 'yes', overwrite existing description.py and permutations.py" " (in the same directory as the <expDescription.json> file) if they" " already exist. [default: %default].") parser.add_option( "--genTopNDescriptions", dest="genTopNDescriptions", default=DEFAULT_OPTIONS["genTopNDescriptions"], type="int", help="Generate description files for the top N models. Each one will be" " placed into it's own subdirectory under the base description file." "[default: %default].") (options, positionalArgs) = parser.parse_args(args) # Get the permutations script's filepath if len(positionalArgs) != 1: parser.error("You must supply the name of exactly one permutations script " "or JSON description file.") fileArgPath = os.path.expanduser(positionalArgs[0]) fileArgPath = os.path.expandvars(fileArgPath) fileArgPath = os.path.abspath(fileArgPath) permWorkDir = os.path.dirname(fileArgPath) outputLabel = os.path.splitext(os.path.basename(fileArgPath))[0] basename = os.path.basename(fileArgPath) fileExtension = os.path.splitext(basename)[1] optionsDict = vars(options) if fileExtension == ".json": returnValue = permutations_runner.runWithJsonFile( fileArgPath, optionsDict, outputLabel, permWorkDir) else: returnValue = permutations_runner.runWithPermutationsScript( fileArgPath, optionsDict, outputLabel, permWorkDir) return returnValue
def _generateCategory(filename="simple.csv", numSequences=2, elementsPerSeq=1, numRepeats=10, resets=False): """ Generate a simple dataset. This contains a bunch of non-overlapping sequences. Parameters: ---------------------------------------------------- filename: name of the file to produce, including extension. It will be created in a 'datasets' sub-directory within the directory containing this script. numSequences: how many sequences to generate elementsPerSeq: length of each sequence numRepeats: how many times to repeat each sequence in the output resets: if True, turn on reset at start of each sequence """ # Create the output file scriptDir = os.path.dirname(__file__) pathname = os.path.join(scriptDir, 'datasets', filename) print "Creating %s..." % (pathname) fields = [('reset', 'int', 'R'), ('category', 'int', 'C'), ('field1', 'string', '')] outFile = FileRecordStream(pathname, write=True, fields=fields) # Create the sequences sequences = [] for i in range(numSequences): seq = [x for x in range(i*elementsPerSeq, (i+1)*elementsPerSeq)] sequences.append(seq) # Write out the sequences in random order seqIdxs = [] for i in range(numRepeats): seqIdxs += range(numSequences) random.shuffle(seqIdxs) for seqIdx in seqIdxs: reset = int(resets) seq = sequences[seqIdx] for x in seq: outFile.appendRecord([reset, str(seqIdx), str(x)]) reset = 0 outFile.close()
def encodeIntoArray(self, inputData, output): """ See `nupic.encoders.base.Encoder` for more information. :param: inputData (tuple) Contains speed (float), longitude (float), latitude (float), altitude (float) :param: output (numpy.array) Stores encoded SDR in this numpy array """ altitude = None if len(inputData) == 4: (speed, longitude, latitude, altitude) = inputData else: (speed, longitude, latitude) = inputData coordinate = self.coordinateForPosition(longitude, latitude, altitude) radius = self.radiusForSpeed(speed) super(GeospatialCoordinateEncoder, self).encodeIntoArray( (coordinate, radius), output)
def coordinateForPosition(self, longitude, latitude, altitude=None): """ Returns coordinate for given GPS position. :param: longitude (float) Longitude of position :param: latitude (float) Latitude of position :param: altitude (float) Altitude of position :returns: (numpy.array) Coordinate that the given GPS position maps to """ coords = PROJ(longitude, latitude) if altitude is not None: coords = transform(PROJ, geocentric, coords[0], coords[1], altitude) coordinate = numpy.array(coords) coordinate = coordinate / self.scale return coordinate.astype(int)
def radiusForSpeed(self, speed): """ Returns radius for given speed. Tries to get the encodings of consecutive readings to be adjacent with some overlap. :param: speed (float) Speed (in meters per second) :returns: (int) Radius for given speed """ overlap = 1.5 coordinatesPerTimestep = speed * self.timestep / self.scale radius = int(round(float(coordinatesPerTimestep) / 2 * overlap)) minRadius = int(math.ceil((math.sqrt(self.w) - 1) / 2)) return max(radius, minRadius)
def getSearch(rootDir): """ This method returns search description. See the following file for the schema of the dictionary this method returns: py/nupic/swarming/exp_generator/experimentDescriptionSchema.json The streamDef element defines the stream for this model. The schema for this element can be found at: py/nupicengine/cluster/database/StreamDef.json """ # Form the stream definition dataPath = os.path.abspath(os.path.join(rootDir, 'datasets', 'scalar_1.csv')) streamDef = dict( version = 1, info = "testSpatialClassification", streams = [ dict(source="file://%s" % (dataPath), info="scalar_1.csv", columns=["*"], ), ], ) # Generate the experiment description expDesc = { "environment": 'nupic', "inferenceArgs":{ "predictedField":"classification", "predictionSteps": [0], }, "inferenceType": "MultiStep", "streamDef": streamDef, "includedFields": [ { "fieldName": "field1", "fieldType": "float", }, { "fieldName": "classification", "fieldType": "string", }, { "fieldName": "randomData", "fieldType": "float", }, ], "iterationCount": -1, } return expDesc
def encodeIntoArray(self, value, output): """ See method description in base.py """ denseInput = numpy.zeros(output.shape) try: denseInput[value] = 1 except IndexError: if isinstance(value, numpy.ndarray): raise ValueError( "Numpy array must have integer dtype but got {}".format( value.dtype)) raise super(SparsePassThroughEncoder, self).encodeIntoArray(denseInput, output)
def readFromFile(cls, f, packed=True): """ Read serialized object from file. :param f: input file :param packed: If true, will assume content is packed :return: first-class instance initialized from proto obj """ # Get capnproto schema from instance schema = cls.getSchema() # Read from file if packed: proto = schema.read_packed(f) else: proto = schema.read(f) # Return first-class instance initialized from proto obj return cls.read(proto)
def writeToFile(self, f, packed=True): """ Write serialized object to file. :param f: output file :param packed: If true, will pack contents. """ # Get capnproto schema from instance schema = self.getSchema() # Construct new message, otherwise refered to as `proto` proto = schema.new_message() # Populate message w/ `write()` instance method self.write(proto) # Finally, write to file if packed: proto.write_packed(f) else: proto.write(f)
def read(cls, proto): """ :param proto: capnp TwoGramModelProto message reader """ instance = object.__new__(cls) super(TwoGramModel, instance).__init__(proto=proto.modelBase) instance._logger = opf_utils.initLogger(instance) instance._reset = proto.reset instance._hashToValueDict = {x.hash: x.value for x in proto.hashToValueDict} instance._learningEnabled = proto.learningEnabled instance._encoder = encoders.MultiEncoder.read(proto.encoder) instance._fieldNames = instance._encoder.getScalarNames() instance._prevValues = list(proto.prevValues) instance._twoGramDicts = [dict() for _ in xrange(len(proto.twoGramDicts))] for idx, field in enumerate(proto.twoGramDicts): for entry in field: prev = None if entry.value == -1 else entry.value instance._twoGramDicts[idx][prev] = collections.defaultdict(int) for bucket in entry.buckets: instance._twoGramDicts[idx][prev][bucket.index] = bucket.count return instance
def write(self, proto): """ :param proto: capnp TwoGramModelProto message builder """ super(TwoGramModel, self).writeBaseToProto(proto.modelBase) proto.reset = self._reset proto.learningEnabled = self._learningEnabled proto.prevValues = self._prevValues self._encoder.write(proto.encoder) proto.hashToValueDict = [{"hash": h, "value": v} for h, v in self._hashToValueDict.items()] twoGramDicts = [] for items in self._twoGramDicts: twoGramArr = [] for prev, values in items.iteritems(): buckets = [{"index": index, "count": count} for index, count in values.iteritems()] if prev is None: prev = -1 twoGramArr.append({"value": prev, "buckets": buckets}) twoGramDicts.append(twoGramArr) proto.twoGramDicts = twoGramDicts
def requireAnomalyModel(func): """ Decorator for functions that require anomaly models. """ @wraps(func) def _decorator(self, *args, **kwargs): if not self.getInferenceType() == InferenceType.TemporalAnomaly: raise RuntimeError("Method required a TemporalAnomaly model.") if self._getAnomalyClassifier() is None: raise RuntimeError("Model does not support this command. Model must" "be an active anomalyDetector model.") return func(self, *args, **kwargs) return _decorator
def anomalyRemoveLabels(self, start, end, labelFilter): """ Remove labels from the anomaly classifier within this model. Removes all records if ``labelFilter==None``, otherwise only removes the labels equal to ``labelFilter``. :param start: (int) index to start removing labels :param end: (int) index to end removing labels :param labelFilter: (string) If specified, only removes records that match """ self._getAnomalyClassifier().getSelf().removeLabels(start, end, labelFilter)
def anomalyAddLabel(self, start, end, labelName): """ Add labels from the anomaly classifier within this model. :param start: (int) index to start label :param end: (int) index to end label :param labelName: (string) name of label """ self._getAnomalyClassifier().getSelf().addLabel(start, end, labelName)
def anomalyGetLabels(self, start, end): """ Get labels from the anomaly classifier within this model. :param start: (int) index to start getting labels :param end: (int) index to end getting labels """ return self._getAnomalyClassifier().getSelf().getLabels(start, end)
def _getSensorInputRecord(self, inputRecord): """ inputRecord - dict containing the input to the sensor Return a 'SensorInput' object, which represents the 'parsed' representation of the input record """ sensor = self._getSensorRegion() dataRow = copy.deepcopy(sensor.getSelf().getOutputValues('sourceOut')) dataDict = copy.deepcopy(inputRecord) inputRecordEncodings = sensor.getSelf().getOutputValues('sourceEncodings') inputRecordCategory = int(sensor.getOutputData('categoryOut')[0]) resetOut = sensor.getOutputData('resetOut')[0] return SensorInput(dataRow=dataRow, dataDict=dataDict, dataEncodings=inputRecordEncodings, sequenceReset=resetOut, category=inputRecordCategory)
def _getClassifierInputRecord(self, inputRecord): """ inputRecord - dict containing the input to the sensor Return a 'ClassifierInput' object, which contains the mapped bucket index for input Record """ absoluteValue = None bucketIdx = None if self._predictedFieldName is not None and self._classifierInputEncoder is not None: absoluteValue = inputRecord[self._predictedFieldName] bucketIdx = self._classifierInputEncoder.getBucketIndices(absoluteValue)[0] return ClassifierInput(dataRow=absoluteValue, bucketIndex=bucketIdx)
def _anomalyCompute(self): """ Compute Anomaly score, if required """ inferenceType = self.getInferenceType() inferences = {} sp = self._getSPRegion() score = None if inferenceType == InferenceType.NontemporalAnomaly: score = sp.getOutputData("anomalyScore")[0] #TODO move from SP to Anomaly ? elif inferenceType == InferenceType.TemporalAnomaly: tm = self._getTPRegion() if sp is not None: activeColumns = sp.getOutputData("bottomUpOut").nonzero()[0] else: sensor = self._getSensorRegion() activeColumns = sensor.getOutputData('dataOut').nonzero()[0] if not self._predictedFieldName in self._input: raise ValueError( "Expected predicted field '%s' in input row, but was not found!" % self._predictedFieldName ) # Calculate the anomaly score using the active columns # and previous predicted columns. score = tm.getOutputData("anomalyScore")[0] # Calculate the classifier's output and use the result as the anomaly # label. Stores as string of results. # TODO: make labels work with non-SP models if sp is not None: self._getAnomalyClassifier().setParameter( "activeColumnCount", len(activeColumns)) self._getAnomalyClassifier().prepareInputs() self._getAnomalyClassifier().compute() labels = self._getAnomalyClassifier().getSelf().getLabelResults() inferences[InferenceElement.anomalyLabel] = "%s" % labels inferences[InferenceElement.anomalyScore] = score return inferences
def _handleSDRClassifierMultiStep(self, patternNZ, inputTSRecordIdx, rawInput): """ Handle the CLA Classifier compute logic when implementing multi-step prediction. This is where the patternNZ is associated with one of the other fields from the dataset 0 to N steps in the future. This method is used by each type of network (encoder only, SP only, SP +TM) to handle the compute logic through the CLA Classifier. It fills in the inference dict with the results of the compute. Parameters: ------------------------------------------------------------------- patternNZ: The input to the CLA Classifier as a list of active input indices inputTSRecordIdx: The index of the record as computed from the timestamp and aggregation interval. This normally increments by 1 each time unless there are missing records. If there is no aggregation interval or timestamp in the data, this will be None. rawInput: The raw input to the sensor, as a dict. """ inferenceArgs = self.getInferenceArgs() predictedFieldName = inferenceArgs.get('predictedField', None) if predictedFieldName is None: raise ValueError( "No predicted field was enabled! Did you call enableInference()?" ) self._predictedFieldName = predictedFieldName classifier = self._getClassifierRegion() if not self._hasCL or classifier is None: # No classifier so return an empty dict for inferences. return {} sensor = self._getSensorRegion() minLikelihoodThreshold = self._minLikelihoodThreshold maxPredictionsPerStep = self._maxPredictionsPerStep needLearning = self.isLearningEnabled() inferences = {} # Get the classifier input encoder, if we don't have it already if self._classifierInputEncoder is None: if predictedFieldName is None: raise RuntimeError("This experiment description is missing " "the 'predictedField' in its config, which is required " "for multi-step prediction inference.") encoderList = sensor.getSelf().encoder.getEncoderList() self._numFields = len(encoderList) # This is getting index of predicted field if being fed to CLA. fieldNames = sensor.getSelf().encoder.getScalarNames() if predictedFieldName in fieldNames: self._predictedFieldIdx = fieldNames.index(predictedFieldName) else: # Predicted field was not fed into the network, only to the classifier self._predictedFieldIdx = None # In a multi-step model, the classifier input encoder is separate from # the other encoders and always disabled from going into the bottom of # the network. if sensor.getSelf().disabledEncoder is not None: encoderList = sensor.getSelf().disabledEncoder.getEncoderList() else: encoderList = [] if len(encoderList) >= 1: fieldNames = sensor.getSelf().disabledEncoder.getScalarNames() self._classifierInputEncoder = encoderList[fieldNames.index( predictedFieldName)] else: # Legacy multi-step networks don't have a separate encoder for the # classifier, so use the one that goes into the bottom of the network encoderList = sensor.getSelf().encoder.getEncoderList() self._classifierInputEncoder = encoderList[self._predictedFieldIdx] # Get the actual value and the bucket index for this sample. The # predicted field may not be enabled for input to the network, so we # explicitly encode it outside of the sensor # TODO: All this logic could be simpler if in the encoder itself if not predictedFieldName in rawInput: raise ValueError("Input row does not contain a value for the predicted " "field configured for this model. Missing value for '%s'" % predictedFieldName) absoluteValue = rawInput[predictedFieldName] bucketIdx = self._classifierInputEncoder.getBucketIndices(absoluteValue)[0] # Convert the absolute values to deltas if necessary # The bucket index should be handled correctly by the underlying delta encoder if isinstance(self._classifierInputEncoder, DeltaEncoder): # Make the delta before any values have been seen 0 so that we do not mess up the # range for the adaptive scalar encoder. if not hasattr(self,"_ms_prevVal"): self._ms_prevVal = absoluteValue prevValue = self._ms_prevVal self._ms_prevVal = absoluteValue actualValue = absoluteValue - prevValue else: actualValue = absoluteValue if isinstance(actualValue, float) and math.isnan(actualValue): actualValue = SENTINEL_VALUE_FOR_MISSING_DATA # Pass this information to the classifier's custom compute method # so that it can assign the current classification to possibly # multiple patterns from the past and current, and also provide # the expected classification for some time step(s) in the future. classifier.setParameter('inferenceMode', True) classifier.setParameter('learningMode', needLearning) classificationIn = {'bucketIdx': bucketIdx, 'actValue': actualValue} # Handle missing records if inputTSRecordIdx is not None: recordNum = inputTSRecordIdx else: recordNum = self.__numRunCalls clResults = classifier.getSelf().customCompute(recordNum=recordNum, patternNZ=patternNZ, classification=classificationIn) # --------------------------------------------------------------- # Get the prediction for every step ahead learned by the classifier predictionSteps = classifier.getParameter('steps') predictionSteps = [int(x) for x in predictionSteps.split(',')] # We will return the results in this dict. The top level keys # are the step number, the values are the relative likelihoods for # each classification value in that time step, represented as # another dict where the keys are the classification values and # the values are the relative likelihoods. inferences[InferenceElement.multiStepPredictions] = dict() inferences[InferenceElement.multiStepBestPredictions] = dict() inferences[InferenceElement.multiStepBucketLikelihoods] = dict() # ====================================================================== # Plug in the predictions for each requested time step. for steps in predictionSteps: # From the clResults, compute the predicted actual value. The # SDRClassifier classifies the bucket index and returns a list of # relative likelihoods for each bucket. Let's find the max one # and then look up the actual value from that bucket index likelihoodsVec = clResults[steps] bucketValues = clResults['actualValues'] # Create a dict of value:likelihood pairs. We can't simply use # dict(zip(bucketValues, likelihoodsVec)) because there might be # duplicate bucketValues (this happens early on in the model when # it doesn't have actual values for each bucket so it returns # multiple buckets with the same default actual value). likelihoodsDict = dict() bestActValue = None bestProb = None for (actValue, prob) in zip(bucketValues, likelihoodsVec): if actValue in likelihoodsDict: likelihoodsDict[actValue] += prob else: likelihoodsDict[actValue] = prob # Keep track of best if bestProb is None or likelihoodsDict[actValue] > bestProb: bestProb = likelihoodsDict[actValue] bestActValue = actValue # Remove entries with 0 likelihood or likelihood less than # minLikelihoodThreshold, but don't leave an empty dict. likelihoodsDict = HTMPredictionModel._removeUnlikelyPredictions( likelihoodsDict, minLikelihoodThreshold, maxPredictionsPerStep) # calculate likelihood for each bucket bucketLikelihood = {} for k in likelihoodsDict.keys(): bucketLikelihood[self._classifierInputEncoder.getBucketIndices(k)[0]] = ( likelihoodsDict[k]) # --------------------------------------------------------------------- # If we have a delta encoder, we have to shift our predicted output value # by the sum of the deltas if isinstance(self._classifierInputEncoder, DeltaEncoder): # Get the prediction history for this number of timesteps. # The prediction history is a store of the previous best predicted values. # This is used to get the final shift from the current absolute value. if not hasattr(self, '_ms_predHistories'): self._ms_predHistories = dict() predHistories = self._ms_predHistories if not steps in predHistories: predHistories[steps] = deque() predHistory = predHistories[steps] # Find the sum of the deltas for the steps and use this to generate # an offset from the current absolute value sumDelta = sum(predHistory) offsetDict = dict() for (k, v) in likelihoodsDict.iteritems(): if k is not None: # Reconstruct the absolute value based on the current actual value, # the best predicted values from the previous iterations, # and the current predicted delta offsetDict[absoluteValue+float(k)+sumDelta] = v # calculate likelihood for each bucket bucketLikelihoodOffset = {} for k in offsetDict.keys(): bucketLikelihoodOffset[self._classifierInputEncoder.getBucketIndices(k)[0]] = ( offsetDict[k]) # Push the current best delta to the history buffer for reconstructing the final delta if bestActValue is not None: predHistory.append(bestActValue) # If we don't need any more values in the predictionHistory, pop off # the earliest one. if len(predHistory) >= steps: predHistory.popleft() # Provide the offsetDict as the return value if len(offsetDict)>0: inferences[InferenceElement.multiStepPredictions][steps] = offsetDict inferences[InferenceElement.multiStepBucketLikelihoods][steps] = bucketLikelihoodOffset else: inferences[InferenceElement.multiStepPredictions][steps] = likelihoodsDict inferences[InferenceElement.multiStepBucketLikelihoods][steps] = bucketLikelihood if bestActValue is None: inferences[InferenceElement.multiStepBestPredictions][steps] = None else: inferences[InferenceElement.multiStepBestPredictions][steps] = ( absoluteValue + sumDelta + bestActValue) # --------------------------------------------------------------------- # Normal case, no delta encoder. Just plug in all our multi-step predictions # with likelihoods as well as our best prediction else: # The multiStepPredictions element holds the probabilities for each # bucket inferences[InferenceElement.multiStepPredictions][steps] = ( likelihoodsDict) inferences[InferenceElement.multiStepBestPredictions][steps] = ( bestActValue) inferences[InferenceElement.multiStepBucketLikelihoods][steps] = ( bucketLikelihood) return inferences
def _removeUnlikelyPredictions(cls, likelihoodsDict, minLikelihoodThreshold, maxPredictionsPerStep): """Remove entries with 0 likelihood or likelihood less than minLikelihoodThreshold, but don't leave an empty dict. """ maxVal = (None, None) for (k, v) in likelihoodsDict.items(): if len(likelihoodsDict) <= 1: break if maxVal[0] is None or v >= maxVal[1]: if maxVal[0] is not None and maxVal[1] < minLikelihoodThreshold: del likelihoodsDict[maxVal[0]] maxVal = (k, v) elif v < minLikelihoodThreshold: del likelihoodsDict[k] # Limit the number of predictions to include. likelihoodsDict = dict(sorted(likelihoodsDict.iteritems(), key=itemgetter(1), reverse=True)[:maxPredictionsPerStep]) return likelihoodsDict
def getRuntimeStats(self): """ Only returns data for a stat called ``numRunCalls``. :return: """ ret = {"numRunCalls" : self.__numRunCalls} #-------------------------------------------------- # Query temporal network stats temporalStats = dict() if self._hasTP: for stat in self._netInfo.statsCollectors: sdict = stat.getStats() temporalStats.update(sdict) ret[InferenceType.getLabel(InferenceType.TemporalNextStep)] = temporalStats return ret
def _getClassifierRegion(self): """ Returns reference to the network's Classifier region """ if (self._netInfo.net is not None and "Classifier" in self._netInfo.net.regions): return self._netInfo.net.regions["Classifier"] else: return None
def __createHTMNetwork(self, sensorParams, spEnable, spParams, tmEnable, tmParams, clEnable, clParams, anomalyParams): """ Create a CLA network and return it. description: HTMPredictionModel description dictionary (TODO: define schema) Returns: NetworkInfo instance; """ #-------------------------------------------------- # Create the network n = Network() #-------------------------------------------------- # Add the Sensor n.addRegion("sensor", "py.RecordSensor", json.dumps(dict(verbosity=sensorParams['verbosity']))) sensor = n.regions['sensor'].getSelf() enabledEncoders = copy.deepcopy(sensorParams['encoders']) for name, params in enabledEncoders.items(): if params is not None: classifierOnly = params.pop('classifierOnly', False) if classifierOnly: enabledEncoders.pop(name) # Disabled encoders are encoders that are fed to SDRClassifierRegion but not # SP or TM Regions. This is to handle the case where the predicted field # is not fed through the SP/TM. We typically just have one of these now. disabledEncoders = copy.deepcopy(sensorParams['encoders']) for name, params in disabledEncoders.items(): if params is None: disabledEncoders.pop(name) else: classifierOnly = params.pop('classifierOnly', False) if not classifierOnly: disabledEncoders.pop(name) encoder = MultiEncoder(enabledEncoders) sensor.encoder = encoder sensor.disabledEncoder = MultiEncoder(disabledEncoders) sensor.dataSource = DataBuffer() prevRegion = "sensor" prevRegionWidth = encoder.getWidth() # SP is not enabled for spatial classification network if spEnable: spParams = spParams.copy() spParams['inputWidth'] = prevRegionWidth self.__logger.debug("Adding SPRegion; spParams: %r" % spParams) n.addRegion("SP", "py.SPRegion", json.dumps(spParams)) # Link SP region n.link("sensor", "SP", "UniformLink", "") n.link("sensor", "SP", "UniformLink", "", srcOutput="resetOut", destInput="resetIn") n.link("SP", "sensor", "UniformLink", "", srcOutput="spatialTopDownOut", destInput="spatialTopDownIn") n.link("SP", "sensor", "UniformLink", "", srcOutput="temporalTopDownOut", destInput="temporalTopDownIn") prevRegion = "SP" prevRegionWidth = spParams['columnCount'] if tmEnable: tmParams = tmParams.copy() if prevRegion == 'sensor': tmParams['inputWidth'] = tmParams['columnCount'] = prevRegionWidth else: assert tmParams['columnCount'] == prevRegionWidth tmParams['inputWidth'] = tmParams['columnCount'] self.__logger.debug("Adding TMRegion; tmParams: %r" % tmParams) n.addRegion("TM", "py.TMRegion", json.dumps(tmParams)) # Link TM region n.link(prevRegion, "TM", "UniformLink", "") if prevRegion != "sensor": n.link("TM", prevRegion, "UniformLink", "", srcOutput="topDownOut", destInput="topDownIn") else: n.link("TM", prevRegion, "UniformLink", "", srcOutput="topDownOut", destInput="temporalTopDownIn") n.link("sensor", "TM", "UniformLink", "", srcOutput="resetOut", destInput="resetIn") prevRegion = "TM" prevRegionWidth = tmParams['inputWidth'] if clEnable and clParams is not None: clParams = clParams.copy() clRegionName = clParams.pop('regionName') self.__logger.debug("Adding %s; clParams: %r" % (clRegionName, clParams)) n.addRegion("Classifier", "py.%s" % str(clRegionName), json.dumps(clParams)) # SDR Classifier-specific links if str(clRegionName) == "SDRClassifierRegion": n.link("sensor", "Classifier", "UniformLink", "", srcOutput="actValueOut", destInput="actValueIn") n.link("sensor", "Classifier", "UniformLink", "", srcOutput="bucketIdxOut", destInput="bucketIdxIn") # This applies to all (SDR and KNN) classifiers n.link("sensor", "Classifier", "UniformLink", "", srcOutput="categoryOut", destInput="categoryIn") n.link(prevRegion, "Classifier", "UniformLink", "") if self.getInferenceType() == InferenceType.TemporalAnomaly: anomalyClParams = dict( trainRecords=anomalyParams.get('autoDetectWaitRecords', None), cacheSize=anomalyParams.get('anomalyCacheRecords', None) ) self._addAnomalyClassifierRegion(n, anomalyClParams, spEnable, tmEnable) #-------------------------------------------------- # NuPIC doesn't initialize the network until you try to run it # but users may want to access components in a setup callback n.initialize() return NetworkInfo(net=n, statsCollectors=[])
def write(self, proto): """ :param proto: capnp HTMPredictionModelProto message builder """ super(HTMPredictionModel, self).writeBaseToProto(proto.modelBase) proto.numRunCalls = self.__numRunCalls proto.minLikelihoodThreshold = self._minLikelihoodThreshold proto.maxPredictionsPerStep = self._maxPredictionsPerStep self._netInfo.net.write(proto.network) proto.spLearningEnabled = self.__spLearningEnabled proto.tpLearningEnabled = self.__tpLearningEnabled if self._predictedFieldIdx is None: proto.predictedFieldIdx.none = None else: proto.predictedFieldIdx.value = self._predictedFieldIdx if self._predictedFieldName is None: proto.predictedFieldName.none = None else: proto.predictedFieldName.value = self._predictedFieldName if self._numFields is None: proto.numFields.none = None else: proto.numFields.value = self._numFields proto.trainSPNetOnlyIfRequested = self.__trainSPNetOnlyIfRequested proto.finishedLearning = self.__finishedLearning
def read(cls, proto): """ :param proto: capnp HTMPredictionModelProto message reader """ obj = object.__new__(cls) # model.capnp super(HTMPredictionModel, obj).__init__(proto=proto.modelBase) # HTMPredictionModelProto.capnp obj._minLikelihoodThreshold = round(proto.minLikelihoodThreshold, EPSILON_ROUND) obj._maxPredictionsPerStep = proto.maxPredictionsPerStep network = Network.read(proto.network) obj._hasSP = ("SP" in network.regions) obj._hasTP = ("TM" in network.regions) obj._hasCL = ("Classifier" in network.regions) obj._netInfo = NetworkInfo(net=network, statsCollectors=[]) obj.__spLearningEnabled = bool(proto.spLearningEnabled) obj.__tpLearningEnabled = bool(proto.tpLearningEnabled) obj.__numRunCalls = proto.numRunCalls obj._classifierInputEncoder = None if proto.predictedFieldIdx.which() == "none": obj._predictedFieldIdx = None else: obj._predictedFieldIdx = proto.predictedFieldIdx.value if proto.predictedFieldName.which() == "none": obj._predictedFieldName = None else: obj._predictedFieldName = proto.predictedFieldName.value obj._numFields = proto.numFields if proto.numFields.which() == "none": obj._numFields = None else: obj._numFields = proto.numFields.value obj.__trainSPNetOnlyIfRequested = proto.trainSPNetOnlyIfRequested obj.__finishedLearning = proto.finishedLearning obj._input = None sensor = network.regions['sensor'].getSelf() sensor.dataSource = DataBuffer() network.initialize() obj.__logger = initLogger(obj) obj.__logger.debug("Instantiating %s." % obj.__myClassName) # Mark end of restoration from state obj.__restoringFromState = False obj.__restoringFromV1 = False return obj
def _serializeExtraData(self, extraDataDir): """ [virtual method override] This method is called during serialization with an external directory path that can be used to bypass pickle for saving large binary states. extraDataDir: Model's extra data directory path """ makeDirectoryFromAbsolutePath(extraDataDir) #-------------------------------------------------- # Save the network outputDir = self.__getNetworkStateDirectory(extraDataDir=extraDataDir) self.__logger.debug("Serializing network...") self._netInfo.net.save(outputDir) self.__logger.debug("Finished serializing network") return
def _deSerializeExtraData(self, extraDataDir): """ [virtual method override] This method is called during deserialization (after __setstate__) with an external directory path that can be used to bypass pickle for loading large binary states. extraDataDir: Model's extra data directory path """ assert self.__restoringFromState #-------------------------------------------------- # Check to make sure that our Network member wasn't restored from # serialized data assert (self._netInfo.net is None), "Network was already unpickled" #-------------------------------------------------- # Restore the network stateDir = self.__getNetworkStateDirectory(extraDataDir=extraDataDir) self.__logger.debug( "(%s) De-serializing network...", self) self._netInfo.net = Network(stateDir) self.__logger.debug( "(%s) Finished de-serializing network", self) # NuPIC doesn't initialize the network until you try to run it # but users may want to access components in a setup callback self._netInfo.net.initialize() # Used for backwards compatibility for anomaly classification models. # Previous versions used the HTMPredictionModelClassifierHelper class for utilizing # the KNN classifier. Current version uses KNNAnomalyClassifierRegion to # encapsulate all the classifier functionality. if self.getInferenceType() == InferenceType.TemporalAnomaly: classifierType = self._getAnomalyClassifier().getSelf().__class__.__name__ if classifierType is 'KNNClassifierRegion': anomalyClParams = dict( trainRecords=self._classifier_helper._autoDetectWaitRecords, cacheSize=self._classifier_helper._history_length, ) spEnable = (self._getSPRegion() is not None) tmEnable = True # Store original KNN region knnRegion = self._getAnomalyClassifier().getSelf() # Add new KNNAnomalyClassifierRegion self._addAnomalyClassifierRegion(self._netInfo.net, anomalyClParams, spEnable, tmEnable) # Restore state self._getAnomalyClassifier().getSelf()._iteration = self.__numRunCalls self._getAnomalyClassifier().getSelf()._recordsCache = ( self._classifier_helper.saved_states) self._getAnomalyClassifier().getSelf().saved_categories = ( self._classifier_helper.saved_categories) self._getAnomalyClassifier().getSelf()._knnclassifier = knnRegion # Set TM to output neccessary information self._getTPRegion().setParameter('anomalyMode', True) # Remove old classifier_helper del self._classifier_helper self._netInfo.net.initialize() #-------------------------------------------------- # Mark end of restoration from state self.__restoringFromState = False self.__logger.debug("(%s) Finished restoring from state", self) return
def _addAnomalyClassifierRegion(self, network, params, spEnable, tmEnable): """ Attaches an 'AnomalyClassifier' region to the network. Will remove current 'AnomalyClassifier' region if it exists. Parameters ----------- network - network to add the AnomalyClassifier region params - parameters to pass to the region spEnable - True if network has an SP region tmEnable - True if network has a TM region; Currently requires True """ allParams = copy.deepcopy(params) knnParams = dict(k=1, distanceMethod='rawOverlap', distanceNorm=1, doBinarization=1, replaceDuplicates=0, maxStoredPatterns=1000) allParams.update(knnParams) # Set defaults if not set if allParams['trainRecords'] is None: allParams['trainRecords'] = DEFAULT_ANOMALY_TRAINRECORDS if allParams['cacheSize'] is None: allParams['cacheSize'] = DEFAULT_ANOMALY_CACHESIZE # Remove current instance if already created (used for deserializing) if self._netInfo is not None and self._netInfo.net is not None \ and self._getAnomalyClassifier() is not None: self._netInfo.net.removeRegion('AnomalyClassifier') network.addRegion("AnomalyClassifier", "py.KNNAnomalyClassifierRegion", json.dumps(allParams)) # Attach link to SP if spEnable: network.link("SP", "AnomalyClassifier", "UniformLink", "", srcOutput="bottomUpOut", destInput="spBottomUpOut") else: network.link("sensor", "AnomalyClassifier", "UniformLink", "", srcOutput="dataOut", destInput="spBottomUpOut") # Attach link to TM if tmEnable: network.link("TM", "AnomalyClassifier", "UniformLink", "", srcOutput="topDownOut", destInput="tpTopDownOut") network.link("TM", "AnomalyClassifier", "UniformLink", "", srcOutput="lrnActiveStateT", destInput="tpLrnActiveStateT") else: raise RuntimeError("TemporalAnomaly models require a TM region.")
def __getNetworkStateDirectory(self, extraDataDir): """ extraDataDir: Model's extra data directory path Returns: Absolute directory path for saving CLA Network """ if self.__restoringFromV1: if self.getInferenceType() == InferenceType.TemporalNextStep: leafName = 'temporal'+ "-network.nta" else: leafName = 'nonTemporal'+ "-network.nta" else: leafName = InferenceType.getLabel(self.getInferenceType()) + "-network.nta" path = os.path.join(extraDataDir, leafName) path = os.path.abspath(path) return path
def __manglePrivateMemberName(self, privateMemberName, skipCheck=False): """ Mangles the given mangled (private) member name; a mangled member name is one whose name begins with two or more underscores and ends with one or zero underscores. privateMemberName: The private member name (e.g., "__logger") skipCheck: Pass True to skip test for presence of the demangled member in our instance. Returns: The demangled member name (e.g., "_HTMPredictionModel__logger") """ assert privateMemberName.startswith("__"), \ "%r doesn't start with __" % privateMemberName assert not privateMemberName.startswith("___"), \ "%r starts with ___" % privateMemberName assert not privateMemberName.endswith("__"), \ "%r ends with more than one underscore" % privateMemberName realName = "_" + (self.__myClassName).lstrip("_") + privateMemberName if not skipCheck: # This will throw an exception if the member is missing getattr(self, realName) return realName
def _setEncoderParams(self): """ Set the radius, resolution and range. These values are updated when minval and/or maxval change. """ self.rangeInternal = float(self.maxval - self.minval) self.resolution = float(self.rangeInternal) / (self.n - self.w) self.radius = self.w * self.resolution self.range = self.rangeInternal + self.resolution # nInternal represents the output area excluding the possible padding on each side self.nInternal = self.n - 2 * self.padding # Invalidate the bucket values cache so that they get recomputed self._bucketValues = None
def setFieldStats(self, fieldName, fieldStats): """ TODO: document """ #If the stats are not fully formed, ignore. if fieldStats[fieldName]['min'] == None or \ fieldStats[fieldName]['max'] == None: return self.minval = fieldStats[fieldName]['min'] self.maxval = fieldStats[fieldName]['max'] if self.minval == self.maxval: self.maxval+=1 self._setEncoderParams()
def _setMinAndMax(self, input, learn): """ Potentially change the minval and maxval using input. **The learn flag is currently not supported by cla regions.** """ self.slidingWindow.next(input) if self.minval is None and self.maxval is None: self.minval = input self.maxval = input+1 #When the min and max and unspecified and only one record has been encoded self._setEncoderParams() elif learn: sorted = self.slidingWindow.getSlidingWindow() sorted.sort() minOverWindow = sorted[0] maxOverWindow = sorted[len(sorted)-1] if minOverWindow < self.minval: #initialBump = abs(self.minval-minOverWindow)*(1-(min(self.recordNum, 200.0)/200.0))*2 #decrement minval more aggressively in the beginning if self.verbosity >= 2: print "Input %s=%.2f smaller than minval %.2f. Adjusting minval to %.2f"\ % (self.name, input, self.minval, minOverWindow) self.minval = minOverWindow #-initialBump self._setEncoderParams() if maxOverWindow > self.maxval: #initialBump = abs(self.maxval-maxOverWindow)*(1-(min(self.recordNum, 200.0)/200.0))*2 #decrement maxval more aggressively in the beginning if self.verbosity >= 2: print "Input %s=%.2f greater than maxval %.2f. Adjusting maxval to %.2f" \ % (self.name, input, self.maxval, maxOverWindow) self.maxval = maxOverWindow #+initialBump self._setEncoderParams()
def getBucketIndices(self, input, learn=None): """ [overrides nupic.encoders.scalar.ScalarEncoder.getBucketIndices] """ self.recordNum +=1 if learn is None: learn = self._learningEnabled if type(input) is float and math.isnan(input): input = SENTINEL_VALUE_FOR_MISSING_DATA if input == SENTINEL_VALUE_FOR_MISSING_DATA: return [None] else: self._setMinAndMax(input, learn) return super(AdaptiveScalarEncoder, self).getBucketIndices(input)
def encodeIntoArray(self, input, output,learn=None): """ [overrides nupic.encoders.scalar.ScalarEncoder.encodeIntoArray] """ self.recordNum +=1 if learn is None: learn = self._learningEnabled if input == SENTINEL_VALUE_FOR_MISSING_DATA: output[0:self.n] = 0 elif not math.isnan(input): self._setMinAndMax(input, learn) super(AdaptiveScalarEncoder, self).encodeIntoArray(input, output)
def getBucketInfo(self, buckets): """ [overrides nupic.encoders.scalar.ScalarEncoder.getBucketInfo] """ if self.minval is None or self.maxval is None: return [EncoderResult(value=0, scalar=0, encoding=numpy.zeros(self.n))] return super(AdaptiveScalarEncoder, self).getBucketInfo(buckets)
def topDownCompute(self, encoded): """ [overrides nupic.encoders.scalar.ScalarEncoder.topDownCompute] """ if self.minval is None or self.maxval is None: return [EncoderResult(value=0, scalar=0, encoding=numpy.zeros(self.n))] return super(AdaptiveScalarEncoder, self).topDownCompute(encoded)
def recordDataPoint(self, swarmId, generation, errScore): """Record the best score for a swarm's generation index (x) Returns list of swarmIds to terminate. """ terminatedSwarms = [] # Append score to existing swarm. if swarmId in self.swarmScores: entry = self.swarmScores[swarmId] assert(len(entry) == generation) entry.append(errScore) entry = self.swarmBests[swarmId] entry.append(min(errScore, entry[-1])) assert(len(self.swarmBests[swarmId]) == len(self.swarmScores[swarmId])) else: # Create list of scores for a new swarm assert (generation == 0) self.swarmScores[swarmId] = [errScore] self.swarmBests[swarmId] = [errScore] # If the current swarm hasn't completed at least MIN_GENERATIONS, it should # not be candidate for maturation or termination. This prevents the initial # allocation of particles in PSO from killing off a field combination too # early. if generation + 1 < self.MATURITY_WINDOW: return terminatedSwarms # If the swarm has completed more than MAX_GENERATIONS, it should be marked # as mature, regardless of how its value is changing. if self.MAX_GENERATIONS is not None and generation > self.MAX_GENERATIONS: self._logger.info( 'Swarm %s has matured (more than %d generations). Stopping' % (swarmId, self.MAX_GENERATIONS)) terminatedSwarms.append(swarmId) if self._isTerminationEnabled: terminatedSwarms.extend(self._getTerminatedSwarms(generation)) # Return which swarms to kill when we've reached maturity # If there is no change in the swarm's best for some time, # Mark it dead cumulativeBestScores = self.swarmBests[swarmId] if cumulativeBestScores[-1] == cumulativeBestScores[-self.MATURITY_WINDOW]: self._logger.info('Swarm %s has matured (no change in %d generations).' 'Stopping...'% (swarmId, self.MATURITY_WINDOW)) terminatedSwarms.append(swarmId) self.terminatedSwarms = self.terminatedSwarms.union(terminatedSwarms) return terminatedSwarms
def getState(self): """See comments in base class.""" return dict(_position = self._position, position = self.getPosition(), velocity = self._velocity, bestPosition = self._bestPosition, bestResult = self._bestResult)
def setState(self, state): """See comments in base class.""" self._position = state['_position'] self._velocity = state['velocity'] self._bestPosition = state['bestPosition'] self._bestResult = state['bestResult']
def getPosition(self): """See comments in base class.""" if self.stepSize is None: return self._position # Find nearest step numSteps = (self._position - self.min) / self.stepSize numSteps = int(round(numSteps)) position = self.min + (numSteps * self.stepSize) position = max(self.min, position) position = min(self.max, position) return position
def agitate(self): """See comments in base class.""" # Increase velocity enough that it will be higher the next time # newPosition() is called. We know that newPosition multiplies by inertia, # so take that into account. self._velocity *= 1.5 / self._inertia # Clip velocity maxV = (self.max - self.min)/2 if self._velocity > maxV: self._velocity = maxV elif self._velocity < -maxV: self._velocity = -maxV # if we at the max or min, reverse direction if self._position == self.max and self._velocity > 0: self._velocity *= -1 if self._position == self.min and self._velocity < 0: self._velocity *= -1
def newPosition(self, globalBestPosition, rng): """See comments in base class.""" # First, update the velocity. The new velocity is given as: # v = (inertia * v) + (cogRate * r1 * (localBest-pos)) # + (socRate * r2 * (globalBest-pos)) # # where r1 and r2 are random numbers between 0 and 1.0 lb=float(Configuration.get("nupic.hypersearch.randomLowerBound")) ub=float(Configuration.get("nupic.hypersearch.randomUpperBound")) self._velocity = (self._velocity * self._inertia + rng.uniform(lb, ub) * self._cogRate * (self._bestPosition - self.getPosition())) if globalBestPosition is not None: self._velocity += rng.uniform(lb, ub) * self._socRate * ( globalBestPosition - self.getPosition()) # update position based on velocity self._position += self._velocity # Clip it self._position = max(self.min, self._position) self._position = min(self.max, self._position) # Return it return self.getPosition()
def pushAwayFrom(self, otherPositions, rng): """See comments in base class.""" # If min and max are the same, nothing to do if self.max == self.min: return # How many potential other positions to evaluate? numPositions = len(otherPositions) * 4 if numPositions == 0: return # Assign a weight to each potential position based on how close it is # to other particles. stepSize = float(self.max-self.min) / numPositions positions = numpy.arange(self.min, self.max + stepSize, stepSize) # Get rid of duplicates. numPositions = len(positions) weights = numpy.zeros(numPositions) # Assign a weight to each potential position, based on a gaussian falloff # from each existing variable. The weight of a variable to each potential # position is given as: # e ^ -(dist^2/stepSize^2) maxDistanceSq = -1 * (stepSize ** 2) for pos in otherPositions: distances = pos - positions varWeights = numpy.exp(numpy.power(distances, 2) / maxDistanceSq) weights += varWeights # Put this particle at the position with smallest weight. positionIdx = weights.argmin() self._position = positions[positionIdx] # Set its best position to this. self._bestPosition = self.getPosition() # Give it a random direction. self._velocity *= rng.choice([1, -1])
def resetVelocity(self, rng): """See comments in base class.""" maxVelocity = (self.max - self.min) / 5.0 self._velocity = maxVelocity #min(abs(self._velocity), maxVelocity) self._velocity *= rng.choice([1, -1])
def getPosition(self): """See comments in base class.""" position = super(PermuteInt, self).getPosition() position = int(round(position)) return position
def getState(self): """See comments in base class.""" return dict(_position = self.getPosition(), position = self.getPosition(), velocity = None, bestPosition = self.choices[self._bestPositionIdx], bestResult = self._bestResult)
def setState(self, state): """See comments in base class.""" self._positionIdx = self.choices.index(state['_position']) self._bestPositionIdx = self.choices.index(state['bestPosition']) self._bestResult = state['bestResult']
def setResultsPerChoice(self, resultsPerChoice): """Setup our resultsPerChoice history based on the passed in resultsPerChoice. For example, if this variable has the following choices: ['a', 'b', 'c'] resultsPerChoice will have up to 3 elements, each element is a tuple containing (choiceValue, errors) where errors is the list of errors received from models that used the specific choice: retval: [('a', [0.1, 0.2, 0.3]), ('b', [0.5, 0.1, 0.6]), ('c', [0.2])] """ # Keep track of the results obtained for each choice. self._resultsPerChoice = [[]] * len(self.choices) for (choiceValue, values) in resultsPerChoice: choiceIndex = self.choices.index(choiceValue) self._resultsPerChoice[choiceIndex] = list(values)
def newPosition(self, globalBestPosition, rng): """See comments in base class.""" # Compute the mean score per choice. numChoices = len(self.choices) meanScorePerChoice = [] overallSum = 0 numResults = 0 for i in range(numChoices): if len(self._resultsPerChoice[i]) > 0: data = numpy.array(self._resultsPerChoice[i]) meanScorePerChoice.append(data.mean()) overallSum += data.sum() numResults += data.size else: meanScorePerChoice.append(None) if numResults == 0: overallSum = 1.0 numResults = 1 # For any choices we don't have a result for yet, set to the overall mean. for i in range(numChoices): if meanScorePerChoice[i] is None: meanScorePerChoice[i] = overallSum / numResults # Now, pick a new choice based on the above probabilities. Note that the # best result is the lowest result. We want to make it more likely to # pick the choice that produced the lowest results. So, we need to invert # the scores (someLargeNumber - score). meanScorePerChoice = numpy.array(meanScorePerChoice) # Invert meaning. meanScorePerChoice = (1.1 * meanScorePerChoice.max()) - meanScorePerChoice # If you want the scores to quickly converge to the best choice, raise the # results to a power. This will cause lower scores to become lower # probability as you see more results, until it eventually should # assymptote to only choosing the best choice. if self._fixEarly: meanScorePerChoice **= (numResults * self._fixEarlyFactor / numChoices) # Normalize. total = meanScorePerChoice.sum() if total == 0: total = 1.0 meanScorePerChoice /= total # Get distribution and choose one based on those probabilities. distribution = meanScorePerChoice.cumsum() r = rng.random() * distribution[-1] choiceIdx = numpy.where(r <= distribution)[0][0] self._positionIdx = choiceIdx return self.getPosition()
def pushAwayFrom(self, otherPositions, rng): """See comments in base class.""" # Get the count of how many in each position positions = [self.choices.index(x) for x in otherPositions] positionCounts = [0] * len(self.choices) for pos in positions: positionCounts[pos] += 1 self._positionIdx = numpy.array(positionCounts).argmin() self._bestPositionIdx = self._positionIdx
def getDict(self, encoderName, flattenedChosenValues): """ Return a dict that can be used to construct this encoder. This dict can be passed directly to the addMultipleEncoders() method of the multi encoder. Parameters: ---------------------------------------------------------------------- encoderName: name of the encoder flattenedChosenValues: dict of the flattened permutation variables. Any variables within this dict whose key starts with encoderName will be substituted for encoder constructor args which are being permuted over. """ encoder = dict(fieldname=self.fieldName, name=self.name) # Get the position of each encoder argument for encoderArg, value in self.kwArgs.iteritems(): # If a permuted variable, get its chosen value. if isinstance(value, PermuteVariable): value = flattenedChosenValues["%s:%s" % (encoderName, encoderArg)] encoder[encoderArg] = value # Special treatment for DateEncoder timeOfDay and dayOfWeek stuff. In the # permutations file, the class can be one of: # DateEncoder.timeOfDay # DateEncoder.dayOfWeek # DateEncoder.season # If one of these, we need to intelligently set the constructor args. if '.' in self.encoderClass: (encoder['type'], argName) = self.encoderClass.split('.') argValue = (encoder['w'], encoder['radius']) encoder[argName] = argValue encoder.pop('w') encoder.pop('radius') else: encoder['type'] = self.encoderClass return encoder
def _translateMetricsToJSON(self, metrics, label): """ Translates the given metrics value to JSON string metrics: A list of dictionaries per OPFTaskDriver.getMetrics(): Returns: JSON string representing the given metrics object. """ # Transcode the MetricValueElement values into JSON-compatible # structure metricsDict = metrics # Convert the structure to a display-friendly JSON string def _mapNumpyValues(obj): """ """ import numpy if isinstance(obj, numpy.float32): return float(obj) elif isinstance(obj, numpy.bool_): return bool(obj) elif isinstance(obj, numpy.ndarray): return obj.tolist() else: raise TypeError("UNEXPECTED OBJ: %s; class=%s" % (obj, obj.__class__)) jsonString = json.dumps(metricsDict, indent=4, default=_mapNumpyValues) return jsonString
def __openDatafile(self, modelResult): """Open the data file and write the header row""" # Write reset bit resetFieldMeta = FieldMetaInfo( name="reset", type=FieldMetaType.integer, special = FieldMetaSpecial.reset) self.__outputFieldsMeta.append(resetFieldMeta) # ----------------------------------------------------------------------- # Write each of the raw inputs that go into the encoders rawInput = modelResult.rawInput rawFields = rawInput.keys() rawFields.sort() for field in rawFields: if field.startswith('_') or field == 'reset': continue value = rawInput[field] meta = FieldMetaInfo(name=field, type=FieldMetaType.string, special=FieldMetaSpecial.none) self.__outputFieldsMeta.append(meta) self._rawInputNames.append(field) # ----------------------------------------------------------------------- # Handle each of the inference elements for inferenceElement, value in modelResult.inferences.iteritems(): inferenceLabel = InferenceElement.getLabel(inferenceElement) # TODO: Right now we assume list inferences are associated with # The input field metadata if type(value) in (list, tuple): # Append input and prediction field meta-info self.__outputFieldsMeta.extend(self.__getListMetaInfo(inferenceElement)) elif isinstance(value, dict): self.__outputFieldsMeta.extend(self.__getDictMetaInfo(inferenceElement, value)) else: if InferenceElement.getInputElement(inferenceElement): self.__outputFieldsMeta.append(FieldMetaInfo(name=inferenceLabel+".actual", type=FieldMetaType.string, special = '')) self.__outputFieldsMeta.append(FieldMetaInfo(name=inferenceLabel, type=FieldMetaType.string, special = '')) if self.__metricNames: for metricName in self.__metricNames: metricField = FieldMetaInfo( name = metricName, type = FieldMetaType.float, special = FieldMetaSpecial.none) self.__outputFieldsMeta.append(metricField) # Create the inference directory for our experiment inferenceDir = _FileUtils.createExperimentInferenceDir(self.__experimentDir) # Consctruct the prediction dataset file path filename = (self.__label + "." + opf_utils.InferenceType.getLabel(self.__inferenceType) + ".predictionLog.csv") self.__datasetPath = os.path.join(inferenceDir, filename) # Create the output dataset print "OPENING OUTPUT FOR PREDICTION WRITER AT: %r" % self.__datasetPath print "Prediction field-meta: %r" % ([tuple(i) for i in self.__outputFieldsMeta],) self.__dataset = FileRecordStream(streamID=self.__datasetPath, write=True, fields=self.__outputFieldsMeta) # Copy data from checkpoint cache if self.__checkpointCache is not None: self.__checkpointCache.seek(0) reader = csv.reader(self.__checkpointCache, dialect='excel') # Skip header row try: header = reader.next() except StopIteration: print "Empty record checkpoint initializer for %r" % (self.__datasetPath,) else: assert tuple(self.__dataset.getFieldNames()) == tuple(header), \ "dataset.getFieldNames(): %r; predictionCheckpointFieldNames: %r" % ( tuple(self.__dataset.getFieldNames()), tuple(header)) # Copy the rows from checkpoint numRowsCopied = 0 while True: try: row = reader.next() except StopIteration: break #print "DEBUG: restoring row from checkpoint: %r" % (row,) self.__dataset.appendRecord(row) numRowsCopied += 1 self.__dataset.flush() print "Restored %d rows from checkpoint for %r" % ( numRowsCopied, self.__datasetPath) # Dispose of our checkpoint cache self.__checkpointCache.close() self.__checkpointCache = None return
def setLoggedMetrics(self, metricNames): """ Tell the writer which metrics should be written Parameters: ----------------------------------------------------------------------- metricsNames: A list of metric lables to be written """ if metricNames is None: self.__metricNames = set([]) else: self.__metricNames = set(metricNames)
def __getListMetaInfo(self, inferenceElement): """ Get field metadata information for inferences that are of list type TODO: Right now we assume list inferences are associated with the input field metadata """ fieldMetaInfo = [] inferenceLabel = InferenceElement.getLabel(inferenceElement) for inputFieldMeta in self.__inputFieldsMeta: if InferenceElement.getInputElement(inferenceElement): outputFieldMeta = FieldMetaInfo( name=inputFieldMeta.name + ".actual", type=inputFieldMeta.type, special=inputFieldMeta.special ) predictionField = FieldMetaInfo( name=inputFieldMeta.name + "." + inferenceLabel, type=inputFieldMeta.type, special=inputFieldMeta.special ) fieldMetaInfo.append(outputFieldMeta) fieldMetaInfo.append(predictionField) return fieldMetaInfo
def __getDictMetaInfo(self, inferenceElement, inferenceDict): """Get field metadate information for inferences that are of dict type""" fieldMetaInfo = [] inferenceLabel = InferenceElement.getLabel(inferenceElement) if InferenceElement.getInputElement(inferenceElement): fieldMetaInfo.append(FieldMetaInfo(name=inferenceLabel+".actual", type=FieldMetaType.string, special = '')) keys = sorted(inferenceDict.keys()) for key in keys: fieldMetaInfo.append(FieldMetaInfo(name=inferenceLabel+"."+str(key), type=FieldMetaType.string, special='')) return fieldMetaInfo
def append(self, modelResult): """ [virtual method override] Emits a single prediction as input versus predicted. modelResult: An opf_utils.ModelResult object that contains the model input and output for the current timestep. """ #print "DEBUG: _BasicPredictionWriter: writing modelResult: %r" % (modelResult,) # If there are no inferences, don't write anything inferences = modelResult.inferences hasInferences = False if inferences is not None: for value in inferences.itervalues(): hasInferences = hasInferences or (value is not None) if not hasInferences: return if self.__dataset is None: self.__openDatafile(modelResult) inputData = modelResult.sensorInput sequenceReset = int(bool(inputData.sequenceReset)) outputRow = [sequenceReset] # ----------------------------------------------------------------------- # Write out the raw inputs rawInput = modelResult.rawInput for field in self._rawInputNames: outputRow.append(str(rawInput[field])) # ----------------------------------------------------------------------- # Write out the inference element info for inferenceElement, outputVal in inferences.iteritems(): inputElement = InferenceElement.getInputElement(inferenceElement) if inputElement: inputVal = getattr(inputData, inputElement) else: inputVal = None if type(outputVal) in (list, tuple): assert type(inputVal) in (list, tuple, None) for iv, ov in zip(inputVal, outputVal): # Write actual outputRow.append(str(iv)) # Write inferred outputRow.append(str(ov)) elif isinstance(outputVal, dict): if inputVal is not None: # If we have a predicted field, include only that in the actuals if modelResult.predictedFieldName is not None: outputRow.append(str(inputVal[modelResult.predictedFieldName])) else: outputRow.append(str(inputVal)) for key in sorted(outputVal.keys()): outputRow.append(str(outputVal[key])) else: if inputVal is not None: outputRow.append(str(inputVal)) outputRow.append(str(outputVal)) metrics = modelResult.metrics for metricName in self.__metricNames: outputRow.append(metrics.get(metricName, 0.0)) #print "DEBUG: _BasicPredictionWriter: writing outputRow: %r" % (outputRow,) self.__dataset.appendRecord(outputRow) self.__dataset.flush() return
def checkpoint(self, checkpointSink, maxRows): """ [virtual method override] Save a checkpoint of the prediction output stream. The checkpoint comprises up to maxRows of the most recent inference records. Parameters: ---------------------------------------------------------------------- checkpointSink: A File-like object where predictions checkpoint data, if any, will be stored. maxRows: Maximum number of most recent inference rows to checkpoint. """ checkpointSink.truncate() if self.__dataset is None: if self.__checkpointCache is not None: self.__checkpointCache.seek(0) shutil.copyfileobj(self.__checkpointCache, checkpointSink) checkpointSink.flush() return else: # Nothing to checkpoint return self.__dataset.flush() totalDataRows = self.__dataset.getDataRowCount() if totalDataRows == 0: # Nothing to checkpoint return # Open reader of prediction file (suppress missingValues conversion) reader = FileRecordStream(self.__datasetPath, missingValues=[]) # Create CSV writer for writing checkpoint rows writer = csv.writer(checkpointSink) # Write the header row to checkpoint sink -- just field names writer.writerow(reader.getFieldNames()) # Determine number of rows to checkpoint numToWrite = min(maxRows, totalDataRows) # Skip initial rows to get to the rows that we actually need to checkpoint numRowsToSkip = totalDataRows - numToWrite for i in xrange(numRowsToSkip): reader.next() # Write the data rows to checkpoint sink numWritten = 0 while True: row = reader.getNextRecord() if row is None: break; row = [str(element) for element in row] #print "DEBUG: _BasicPredictionWriter: checkpointing row: %r" % (row,) writer.writerow(row) numWritten +=1 assert numWritten == numToWrite, \ "numWritten (%s) != numToWrite (%s)" % (numWritten, numToWrite) checkpointSink.flush() return
def update(self, modelResult): """ Queue up the T(i+1) prediction value and emit a T(i) input/prediction pair, if possible. E.g., if the previous T(i-1) iteration was learn-only, then we would not have a T(i) prediction in our FIFO and would not be able to emit a meaningful input/prediction pair. modelResult: An opf_utils.ModelResult object that contains the model input and output for the current timestep. """ self.__writer.append(self.__inferenceShifter.shift(modelResult))
def createExperimentInferenceDir(cls, experimentDir): """ Creates the inference output directory for the given experiment experimentDir: experiment directory path that contains description.py Returns: path of the inference output directory """ path = cls.getExperimentInferenceDirPath(experimentDir) cls.makeDirectory(path) return path
def _generateModel0(numCategories): """ Generate the initial, first order, and second order transition probabilities for 'model0'. For this model, we generate the following set of sequences: 1-2-3 (4X) 1-2-4 (1X) 5-2-3 (1X) 5-2-4 (4X) Parameters: ---------------------------------------------------------------------- numCategories: Number of categories retval: (initProb, firstOrder, secondOrder, seqLen) initProb: Initial probability for each category. This is a vector of length len(categoryList). firstOrder: A dictionary of the 1st order probabilities. The key is the 1st element of the sequence, the value is the probability of each 2nd element given the first. secondOrder: A dictionary of the 2nd order probabilities. The key is the first 2 elements of the sequence, the value is the probability of each possible 3rd element given the first two. seqLen: Desired length of each sequence. The 1st element will be generated using the initProb, the 2nd element by the firstOrder table, and the 3rd and all successive elements by the secondOrder table. Here is an example of some return values: initProb: [0.7, 0.2, 0.1] firstOrder: {'[0]': [0.3, 0.3, 0.4], '[1]': [0.3, 0.3, 0.4], '[2]': [0.3, 0.3, 0.4]} secondOrder: {'[0,0]': [0.3, 0.3, 0.4], '[0,1]': [0.3, 0.3, 0.4], '[0,2]': [0.3, 0.3, 0.4], '[1,0]': [0.3, 0.3, 0.4], '[1,1]': [0.3, 0.3, 0.4], '[1,2]': [0.3, 0.3, 0.4], '[2,0]': [0.3, 0.3, 0.4], '[2,1]': [0.3, 0.3, 0.4], '[2,2]': [0.3, 0.3, 0.4]} """ # =============================================================== # Let's model the following: # a-b-c (4X) # a-b-d (1X) # e-b-c (1X) # e-b-d (4X) # -------------------------------------------------------------------- # Initial probabilities, 'a' and 'e' equally likely initProb = numpy.zeros(numCategories) initProb[0] = 0.5 initProb[4] = 0.5 # -------------------------------------------------------------------- # 1st order transitions # both 'a' and 'e' should lead to 'b' firstOrder = dict() for catIdx in range(numCategories): key = str([catIdx]) probs = numpy.ones(numCategories) / numCategories if catIdx == 0 or catIdx == 4: probs.fill(0) probs[1] = 1.0 # lead only to b firstOrder[key] = probs # -------------------------------------------------------------------- # 2nd order transitions # a-b should lead to c 80% and d 20% # e-b should lead to c 20% and d 80% secondOrder = dict() for firstIdx in range(numCategories): for secondIdx in range(numCategories): key = str([firstIdx, secondIdx]) probs = numpy.ones(numCategories) / numCategories if key == str([0,1]): probs.fill(0) probs[2] = 0.80 # 'ab' leads to 'c' 80% of the time probs[3] = 0.20 # 'ab' leads to 'd' 20% of the time elif key == str([4,1]): probs.fill(0) probs[2] = 0.20 # 'eb' leads to 'c' 20% of the time probs[3] = 0.80 # 'eb' leads to 'd' 80% of the time secondOrder[key] = probs return (initProb, firstOrder, secondOrder, 3)
def _generateModel1(numCategories): """ Generate the initial, first order, and second order transition probabilities for 'model1'. For this model, we generate the following set of sequences: 0-10-15 (1X) 0-11-16 (1X) 0-12-17 (1X) 0-13-18 (1X) 0-14-19 (1X) 1-10-20 (1X) 1-11-21 (1X) 1-12-22 (1X) 1-13-23 (1X) 1-14-24 (1X) Parameters: ---------------------------------------------------------------------- numCategories: Number of categories retval: (initProb, firstOrder, secondOrder, seqLen) initProb: Initial probability for each category. This is a vector of length len(categoryList). firstOrder: A dictionary of the 1st order probabilities. The key is the 1st element of the sequence, the value is the probability of each 2nd element given the first. secondOrder: A dictionary of the 2nd order probabilities. The key is the first 2 elements of the sequence, the value is the probability of each possible 3rd element given the first two. seqLen: Desired length of each sequence. The 1st element will be generated using the initProb, the 2nd element by the firstOrder table, and the 3rd and all successive elements by the secondOrder table. Here is an example of some return values: initProb: [0.7, 0.2, 0.1] firstOrder: {'[0]': [0.3, 0.3, 0.4], '[1]': [0.3, 0.3, 0.4], '[2]': [0.3, 0.3, 0.4]} secondOrder: {'[0,0]': [0.3, 0.3, 0.4], '[0,1]': [0.3, 0.3, 0.4], '[0,2]': [0.3, 0.3, 0.4], '[1,0]': [0.3, 0.3, 0.4], '[1,1]': [0.3, 0.3, 0.4], '[1,2]': [0.3, 0.3, 0.4], '[2,0]': [0.3, 0.3, 0.4], '[2,1]': [0.3, 0.3, 0.4], '[2,2]': [0.3, 0.3, 0.4]} """ # -------------------------------------------------------------------- # Initial probabilities, 0 and 1 equally likely initProb = numpy.zeros(numCategories) initProb[0] = 0.5 initProb[1] = 0.5 # -------------------------------------------------------------------- # 1st order transitions # both 0 and 1 should lead to 10,11,12,13,14 with equal probability firstOrder = dict() for catIdx in range(numCategories): key = str([catIdx]) probs = numpy.ones(numCategories) / numCategories if catIdx == 0 or catIdx == 1: indices = numpy.array([10,11,12,13,14]) probs.fill(0) probs[indices] = 1.0 # lead only to b probs /= probs.sum() firstOrder[key] = probs # -------------------------------------------------------------------- # 2nd order transitions # 0-10 should lead to 15 # 0-11 to 16 # ... # 1-10 should lead to 20 # 1-11 shold lean to 21 # ... secondOrder = dict() for firstIdx in range(numCategories): for secondIdx in range(numCategories): key = str([firstIdx, secondIdx]) probs = numpy.ones(numCategories) / numCategories if key == str([0,10]): probs.fill(0) probs[15] = 1 elif key == str([0,11]): probs.fill(0) probs[16] = 1 elif key == str([0,12]): probs.fill(0) probs[17] = 1 elif key == str([0,13]): probs.fill(0) probs[18] = 1 elif key == str([0,14]): probs.fill(0) probs[19] = 1 elif key == str([1,10]): probs.fill(0) probs[20] = 1 elif key == str([1,11]): probs.fill(0) probs[21] = 1 elif key == str([1,12]): probs.fill(0) probs[22] = 1 elif key == str([1,13]): probs.fill(0) probs[23] = 1 elif key == str([1,14]): probs.fill(0) probs[24] = 1 secondOrder[key] = probs return (initProb, firstOrder, secondOrder, 3)
def _generateModel2(numCategories, alpha=0.25): """ Generate the initial, first order, and second order transition probabilities for 'model2'. For this model, we generate peaked random transitions using dirichlet distributions. Parameters: ---------------------------------------------------------------------- numCategories: Number of categories alpha: Determines the peakedness of the transitions. Low alpha values (alpha=0.01) place the entire weight on a single transition. Large alpha values (alpha=10) distribute the evenly among all transitions. Intermediate values (alpha=0.5) give a moderately peaked transitions. retval: (initProb, firstOrder, secondOrder, seqLen) initProb: Initial probability for each category. This is a vector of length len(categoryList). firstOrder: A dictionary of the 1st order probabilities. The key is the 1st element of the sequence, the value is the probability of each 2nd element given the first. secondOrder: A dictionary of the 2nd order probabilities. The key is the first 2 elements of the sequence, the value is the probability of each possible 3rd element given the first two. seqLen: Desired length of each sequence. The 1st element will be generated using the initProb, the 2nd element by the firstOrder table, and the 3rd and all successive elements by the secondOrder table. None means infinite length. Here is an example of some return values for an intermediate alpha value: initProb: [0.33, 0.33, 0.33] firstOrder: {'[0]': [0.2, 0.7, 0.1], '[1]': [0.1, 0.1, 0.8], '[2]': [0.1, 0.0, 0.9]} secondOrder: {'[0,0]': [0.1, 0.0, 0.9], '[0,1]': [0.0, 0.2, 0.8], '[0,2]': [0.1, 0.8, 0.1], ... '[2,2]': [0.8, 0.2, 0.0]} """ # -------------------------------------------------------------------- # All initial probabilities, are equally likely initProb = numpy.ones(numCategories)/numCategories def generatePeakedProbabilities(lastIdx, numCategories=numCategories, alpha=alpha): probs = numpy.random.dirichlet(alpha=[alpha]*numCategories) probs[lastIdx] = 0.0 probs /= probs.sum() return probs # -------------------------------------------------------------------- # 1st order transitions firstOrder = dict() for catIdx in range(numCategories): key = str([catIdx]) probs = generatePeakedProbabilities(catIdx) firstOrder[key] = probs # -------------------------------------------------------------------- # 2nd order transitions secondOrder = dict() for firstIdx in range(numCategories): for secondIdx in range(numCategories): key = str([firstIdx, secondIdx]) probs = generatePeakedProbabilities(secondIdx) secondOrder[key] = probs return (initProb, firstOrder, secondOrder, None)
def _generateFile(filename, numRecords, categoryList, initProb, firstOrderProb, secondOrderProb, seqLen, numNoise=0, resetsEvery=None): """ Generate a set of records reflecting a set of probabilities. Parameters: ---------------------------------------------------------------- filename: name of .csv file to generate numRecords: number of records to generate categoryList: list of category names initProb: Initial probability for each category. This is a vector of length len(categoryList). firstOrderProb: A dictionary of the 1st order probabilities. The key is the 1st element of the sequence, the value is the probability of each 2nd element given the first. secondOrderProb: A dictionary of the 2nd order probabilities. The key is the first 2 elements of the sequence, the value is the probability of each possible 3rd element given the first two. seqLen: Desired length of each sequence. The 1st element will be generated using the initProb, the 2nd element by the firstOrder table, and the 3rd and all successive elements by the secondOrder table. None means infinite length. numNoise: Number of noise elements to place between each sequence. The noise elements are evenly distributed from all categories. resetsEvery: If not None, generate a reset every N records Here is an example of some parameters: categoryList: ['cat1', 'cat2', 'cat3'] initProb: [0.7, 0.2, 0.1] firstOrderProb: {'[0]': [0.3, 0.3, 0.4], '[1]': [0.3, 0.3, 0.4], '[2]': [0.3, 0.3, 0.4]} secondOrderProb: {'[0,0]': [0.3, 0.3, 0.4], '[0,1]': [0.3, 0.3, 0.4], '[0,2]': [0.3, 0.3, 0.4], '[1,0]': [0.3, 0.3, 0.4], '[1,1]': [0.3, 0.3, 0.4], '[1,2]': [0.3, 0.3, 0.4], '[2,0]': [0.3, 0.3, 0.4], '[2,1]': [0.3, 0.3, 0.4], '[2,2]': [0.3, 0.3, 0.4]} """ # Create the file print "Creating %s..." % (filename) fields = [('reset', 'int', 'R'), ('name', 'string', '')] outFile = FileRecordStream(filename, write=True, fields=fields) # -------------------------------------------------------------------- # Convert the probabilitie tables into cumulative probabilities initCumProb = initProb.cumsum() firstOrderCumProb = dict() for (key,value) in firstOrderProb.iteritems(): firstOrderCumProb[key] = value.cumsum() secondOrderCumProb = dict() for (key,value) in secondOrderProb.iteritems(): secondOrderCumProb[key] = value.cumsum() # -------------------------------------------------------------------- # Write out the sequences elementsInSeq = [] numElementsSinceReset = 0 maxCatIdx = len(categoryList) - 1 for i in xrange(numRecords): # Generate a reset? if numElementsSinceReset == 0: reset = 1 else: reset = 0 # Pick the next element, based on how are we are into the 2nd order # sequence. rand = numpy.random.rand() if len(elementsInSeq) == 0: catIdx = numpy.searchsorted(initCumProb, rand) elif len(elementsInSeq) == 1: catIdx = numpy.searchsorted(firstOrderCumProb[str(elementsInSeq)], rand) elif (len(elementsInSeq) >=2) and \ (seqLen is None or len(elementsInSeq) < seqLen-numNoise): catIdx = numpy.searchsorted(secondOrderCumProb[str(elementsInSeq[-2:])], rand) else: # random "noise" catIdx = numpy.random.randint(len(categoryList)) # Write out the record catIdx = min(maxCatIdx, catIdx) outFile.appendRecord([reset,categoryList[catIdx]]) #print categoryList[catIdx] # ------------------------------------------------------------ # Increment counters elementsInSeq.append(catIdx) numElementsSinceReset += 1 # Generate another reset? if resetsEvery is not None and numElementsSinceReset == resetsEvery: numElementsSinceReset = 0 elementsInSeq = [] # Start another 2nd order sequence? if seqLen is not None and (len(elementsInSeq) == seqLen+numNoise): elementsInSeq = [] outFile.close()
def _allow_new_attributes(f): """A decorator that maintains the attribute lock state of an object It coperates with the LockAttributesMetaclass (see bellow) that replaces the __setattr__ method with a custom one that checks the _canAddAttributes counter and allows setting new attributes only if _canAddAttributes > 0. New attributes can be set only from methods decorated with this decorator (should be only __init__ and __setstate__ normally) The decorator is reentrant (e.g. if from inside a decorated function another decorated function is invoked). Before invoking the target function it increments the counter (or sets it to 1). After invoking the target function it decrements the counter and if it's 0 it removed the counter. """ def decorated(self, *args, **kw): """The decorated function that replaces __init__() or __setstate__() """ # Run the original function if not hasattr(self, '_canAddAttributes'): self.__dict__['_canAddAttributes'] = 1 else: self._canAddAttributes += 1 assert self._canAddAttributes >= 1 # Save add attribute counter count = self._canAddAttributes f(self, *args, **kw) # Restore _CanAddAttributes if deleted from dict (can happen in __setstte__) if hasattr(self, '_canAddAttributes'): self._canAddAttributes -= 1 else: self._canAddAttributes = count - 1 assert self._canAddAttributes >= 0 if self._canAddAttributes == 0: del self._canAddAttributes decorated.__doc__ = f.__doc__ decorated.__name__ = f.__name__ return decorated
def _simple_init(self, *args, **kw): """trivial init method that just calls base class's __init__() This method is attached to classes that don't define __init__(). It is needed because LockAttributesMetaclass must decorate the __init__() method of its target class. """ type(self).__base__.__init__(self, *args, **kw)