Search is not available for this dataset
text
stringlengths
75
104k
def MultiArgMax(x): """ Get tuple (actually a generator) of indices where the max value of array x occurs. Requires that x have a max() method, as x.max() (in the case of NumPy) is much faster than max(x). For a simpler, faster argmax when there is only a single maximum entry, or when knowing only the first index where the maximum occurs, call argmax() on a NumPy array. :param x: Any sequence that has a max() method. :returns: Generator with the indices where the max value occurs. """ m = x.max() return (i for i, v in enumerate(x) if v == m)
def Any(sequence): """ Tests much faster (30%) than bool(sum(bool(x) for x in sequence)). :returns: (bool) true if any element of the sequence satisfies True. :param sequence: Any sequence whose elements can be evaluated as booleans. """ return bool(reduce(lambda x, y: x or y, sequence, False))
def All(sequence): """ :param sequence: Any sequence whose elements can be evaluated as booleans. :returns: true if all elements of the sequence satisfy True and x. """ return bool(reduce(lambda x, y: x and y, sequence, True))
def MultiIndicator(pos, size, dtype): """ Returns an array of length size and type dtype that is everywhere 0, except in the indices listed in sequence pos. :param pos: A single integer or sequence of integers that specify the position of ones to be set. :param size: The total size of the array to be returned. :param dtype: The element type (compatible with NumPy array()) of the array to be returned. :returns: An array of length size and element type dtype. """ x = numpy.zeros(size, dtype=dtype) if hasattr(pos, '__iter__'): for i in pos: x[i] = 1 else: x[pos] = 1 return x
def Distribution(pos, size, counts, dtype): """ Returns an array of length size and type dtype that is everywhere 0, except in the indices listed in sequence pos. The non-zero indices contain a normalized distribution based on the counts. :param pos: A single integer or sequence of integers that specify the position of ones to be set. :param size: The total size of the array to be returned. :param counts: The number of times we have observed each index. :param dtype: The element type (compatible with NumPy array()) of the array to be returned. :returns: An array of length size and element type dtype. """ x = numpy.zeros(size, dtype=dtype) if hasattr(pos, '__iter__'): # calculate normalization constant total = 0 for i in pos: total += counts[i] total = float(total) # set included positions to normalized probability for i in pos: x[i] = counts[i]/total # If we don't have a set of positions, assume there's only one position else: x[pos] = 1 return x
def grow(self, rows, cols): """ Grows the histogram to have rows rows and cols columns. Must not have been initialized before, or already have the same number of columns. If rows is smaller than the current number of rows, does not shrink. Also updates the sizes of the row and column sums. :param rows: Integer number of rows. :param cols: Integer number of columns. """ if not self.hist_: self.hist_ = SparseMatrix(rows, cols) self.rowSums_ = numpy.zeros(rows, dtype=dtype) self.colSums_ = numpy.zeros(cols, dtype=dtype) self.hack_ = None else: oldRows = self.hist_.nRows() oldCols = self.hist_.nCols() nextRows = max(oldRows, rows) nextCols = max(oldCols, cols) if (oldRows < nextRows) or (oldCols < nextCols): self.hist_.resize(nextRows, nextCols) if oldRows < nextRows: oldSums = self.rowSums_ self.rowSums_ = numpy.zeros(nextRows, dtype=dtype) self.rowSums_[0:len(oldSums)] = oldSums self.hack_ = None if oldCols < nextCols: oldSums = self.colSums_ self.colSums_ = numpy.zeros(nextCols, dtype=dtype) self.colSums_[0:len(oldSums)] = oldSums self.hack_ = None
def updateRow(self, row, distribution): """ Add distribution to row row. Distribution should be an array of probabilities or counts. :param row: Integer index of the row to add to. May be larger than the current number of rows, in which case the histogram grows. :param distribution: Array of length equal to the number of columns. """ self.grow(row+1, len(distribution)) self.hist_.axby(row, 1, 1, distribution) self.rowSums_[row] += distribution.sum() self.colSums_ += distribution self.hack_ = None
def inferRowCompat(self, distribution): """ Equivalent to the category inference of zeta1.TopLevel. Computes the max_prod (maximum component of a component-wise multiply) between the rows of the histogram and the incoming distribution. May be slow if the result of clean_outcpd() is not valid. :param distribution: Array of length equal to the number of columns. :returns: array of length equal to the number of rows. """ if self.hack_ is None: self.clean_outcpd() return self.hack_.vecMaxProd(distribution)
def clean_outcpd(self): """Hack to act like clean_outcpd on zeta1.TopLevelNode. Take the max element in each to column, set it to 1, and set all the other elements to 0. Only called by inferRowMaxProd() and only needed if an updateRow() has been called since the last clean_outcpd(). """ m = self.hist_.toDense() for j in xrange(m.shape[1]): # For each column. cmax = m[:,j].max() if cmax: m[:,j] = numpy.array(m[:,j] == cmax, dtype=dtype) self.hack_ = SparseMatrix(0, self.hist_.nCols()) for i in xrange(m.shape[0]): self.hack_.addRow(m[i,:])
def importAndRunFunction( path, moduleName, funcName, **keywords ): """ Run a named function specified by a filesystem path, module name and function name. Returns the value returned by the imported function. Use this when access is needed to code that has not been added to a package accessible from the ordinary Python path. Encapsulates the multiple lines usually needed to safely manipulate and restore the Python path. Parameters ---------- path: filesystem path Path to the directory where the desired module is stored. This will be used to temporarily augment the Python path. moduleName: basestring Name of the module, without trailing extension, where the desired function is stored. This module should be in the directory specified with path. funcName: basestring Name of the function to import and call. keywords: Keyword arguments to be passed to the imported function. """ import sys originalPath = sys.path try: augmentedPath = [path] + sys.path sys.path = augmentedPath func = getattr(__import__(moduleName, fromlist=[funcName]), funcName) sys.path = originalPath except: # Restore the original path in case of an exception. sys.path = originalPath raise return func(**keywords)
def transferCoincidences(network, fromElementName, toElementName): """ Gets the coincidence matrix from one element and sets it on another element (using locked handles, a la nupic.bindings.research.lockHandle). TODO: Generalize to more node types, parameter name pairs, etc. Does not work across processes. """ coincidenceHandle = getLockedHandle( runtimeElement=network.getElement(fromElementName), # TODO: Re-purpose for use with nodes other than PMXClassifierNode. expression="self._cd._W" ) network.getElement(toElementName).setParameter("coincidencesAbove", coincidenceHandle)
def compute(slidingWindow, total, newVal, windowSize): """Routine for computing a moving average. @param slidingWindow a list of previous values to use in computation that will be modified and returned @param total the sum of the values in slidingWindow to be used in the calculation of the moving average @param newVal a new number compute the new windowed average @param windowSize how many values to use in the moving window @returns an updated windowed average, the modified input slidingWindow list, and the new total sum of the sliding window """ if len(slidingWindow) == windowSize: total -= slidingWindow.pop(0) slidingWindow.append(newVal) total += newVal return float(total) / len(slidingWindow), slidingWindow, total
def next(self, newValue): """Instance method wrapper around compute.""" newAverage, self.slidingWindow, self.total = self.compute( self.slidingWindow, self.total, newValue, self.windowSize) return newAverage
def getModule(metricSpec): """ Factory method to return an appropriate :class:`MetricsIface` module. - ``rmse``: :class:`MetricRMSE` - ``nrmse``: :class:`MetricNRMSE` - ``aae``: :class:`MetricAAE` - ``acc``: :class:`MetricAccuracy` - ``avg_err``: :class:`MetricAveError` - ``trivial``: :class:`MetricTrivial` - ``two_gram``: :class:`MetricTwoGram` - ``moving_mean``: :class:`MetricMovingMean` - ``moving_mode``: :class:`MetricMovingMode` - ``neg_auc``: :class:`MetricNegAUC` - ``custom_error_metric``: :class:`CustomErrorMetric` - ``multiStep``: :class:`MetricMultiStep` - ``ms_aae``: :class:`MetricMultiStepAAE` - ``ms_avg_err``: :class:`MetricMultiStepAveError` - ``passThruPrediction``: :class:`MetricPassThruPrediction` - ``altMAPE``: :class:`MetricAltMAPE` - ``MAPE``: :class:`MetricMAPE` - ``multi``: :class:`MetricMulti` - ``negativeLogLikelihood``: :class:`MetricNegativeLogLikelihood` :param metricSpec: (:class:`MetricSpec`) metric to find module for. ``metricSpec.metric`` must be in the list above. :returns: (:class:`AggregateMetric`) an appropriate metric module """ metricName = metricSpec.metric if metricName == 'rmse': return MetricRMSE(metricSpec) if metricName == 'nrmse': return MetricNRMSE(metricSpec) elif metricName == 'aae': return MetricAAE(metricSpec) elif metricName == 'acc': return MetricAccuracy(metricSpec) elif metricName == 'avg_err': return MetricAveError(metricSpec) elif metricName == 'trivial': return MetricTrivial(metricSpec) elif metricName == 'two_gram': return MetricTwoGram(metricSpec) elif metricName == 'moving_mean': return MetricMovingMean(metricSpec) elif metricName == 'moving_mode': return MetricMovingMode(metricSpec) elif metricName == 'neg_auc': return MetricNegAUC(metricSpec) elif metricName == 'custom_error_metric': return CustomErrorMetric(metricSpec) elif metricName == 'multiStep': return MetricMultiStep(metricSpec) elif metricName == 'multiStepProbability': return MetricMultiStepProbability(metricSpec) elif metricName == 'ms_aae': return MetricMultiStepAAE(metricSpec) elif metricName == 'ms_avg_err': return MetricMultiStepAveError(metricSpec) elif metricName == 'passThruPrediction': return MetricPassThruPrediction(metricSpec) elif metricName == 'altMAPE': return MetricAltMAPE(metricSpec) elif metricName == 'MAPE': return MetricMAPE(metricSpec) elif metricName == 'multi': return MetricMulti(metricSpec) elif metricName == 'negativeLogLikelihood': return MetricNegativeLogLikelihood(metricSpec) else: raise Exception("Unsupported metric type: %s" % metricName)
def getLabel(self, inferenceType=None): """ Helper method that generates a unique label for a :class:`MetricSpec` / :class:`~nupic.frameworks.opf.opf_utils.InferenceType` pair. The label is formatted as follows: :: <predictionKind>:<metric type>:(paramName=value)*:field=<fieldname> For example: :: classification:aae:paramA=10.2:paramB=20:window=100:field=pounds :returns: (string) label for inference type """ result = [] if inferenceType is not None: result.append(InferenceType.getLabel(inferenceType)) result.append(self.inferenceElement) result.append(self.metric) params = self.params if params is not None: sortedParams= params.keys() sortedParams.sort() for param in sortedParams: # Don't include the customFuncSource - it is too long an unwieldy if param in ('customFuncSource', 'customFuncDef', 'customExpr'): continue value = params[param] if isinstance(value, str): result.extend(["%s='%s'"% (param, value)]) else: result.extend(["%s=%s"% (param, value)]) if self.field: result.append("field=%s"% (self.field) ) return self._LABEL_SEPARATOR.join(result)
def getInferenceTypeFromLabel(cls, label): """ Extracts the PredictionKind (temporal vs. nontemporal) from the given metric label. :param label: (string) for a metric spec generated by :meth:`getMetricLabel` :returns: (:class:`~nupic.frameworks.opf.opf_utils.InferenceType`) """ infType, _, _= label.partition(cls._LABEL_SEPARATOR) if not InferenceType.validate(infType): return None return infType
def _getShiftedGroundTruth(self, groundTruth): """ Utility function that saves the passed in groundTruth into a local history buffer, and returns the groundTruth from self._predictionSteps ago, where self._predictionSteps is defined by the 'steps' parameter. This can be called from the beginning of a derived class's addInstance() before it passes groundTruth and prediction onto accumulate(). """ # Save this ground truth into our input history self._groundTruthHistory.append(groundTruth) # This is only supported when _predictionSteps has one item in it assert (len(self._predictionSteps) == 1) # Return the one from N steps ago if len(self._groundTruthHistory) > self._predictionSteps[0]: return self._groundTruthHistory.popleft() else: if hasattr(groundTruth, '__iter__'): return [None] * len(groundTruth) else: return None
def addInstance(self, groundTruth, prediction, record = None, result = None): """Compute and store metric value""" self.value = self.avg(prediction)
def mostLikely(self, pred): """ Helper function to return a scalar value representing the most likely outcome given a probability distribution """ if len(pred) == 1: return pred.keys()[0] mostLikelyOutcome = None maxProbability = 0 for prediction, probability in pred.items(): if probability > maxProbability: mostLikelyOutcome = prediction maxProbability = probability return mostLikelyOutcome
def expValue(self, pred): """ Helper function to return a scalar value representing the expected value of a probability distribution """ if len(pred) == 1: return pred.keys()[0] return sum([x*p for x,p in pred.items()])
def encode(self, inputData): """Convenience wrapper for :meth:`.encodeIntoArray`. This may be less efficient because it allocates a new numpy array every call. :param inputData: input data to be encoded :return: a numpy array with the encoded representation of inputData """ output = numpy.zeros((self.getWidth(),), dtype=defaultDtype) self.encodeIntoArray(inputData, output) return output
def getScalarNames(self, parentFieldName=''): """ Return the field names for each of the scalar values returned by getScalars. :param parentFieldName: The name of the encoder which is our parent. This name is prefixed to each of the field names within this encoder to form the keys of the dict() in the retval. :return: array of field names """ names = [] if self.encoders is not None: for (name, encoder, offset) in self.encoders: subNames = encoder.getScalarNames(parentFieldName=name) if parentFieldName != '': subNames = ['%s.%s' % (parentFieldName, name) for name in subNames] names.extend(subNames) else: if parentFieldName != '': names.append(parentFieldName) else: names.append(self.name) return names
def getDecoderOutputFieldTypes(self): """ Returns a sequence of field types corresponding to the elements in the decoded output field array. The types are defined by :class:`~nupic.data.field_meta.FieldMetaType`. :return: list of :class:`~nupic.data.field_meta.FieldMetaType` objects """ if hasattr(self, '_flattenedFieldTypeList') and \ self._flattenedFieldTypeList is not None: return self._flattenedFieldTypeList fieldTypes = [] # NOTE: we take care of the composites, but leaf encoders must override # this method and return a list of one field_meta.FieldMetaType.XXXX # element corresponding to the encoder's decoder output field type for (name, encoder, offset) in self.encoders: subTypes = encoder.getDecoderOutputFieldTypes() fieldTypes.extend(subTypes) self._flattenedFieldTypeList = fieldTypes return fieldTypes
def _getInputValue(self, obj, fieldName): """ Gets the value of a given field from the input record """ if isinstance(obj, dict): if not fieldName in obj: knownFields = ", ".join( key for key in obj.keys() if not key.startswith("_") ) raise ValueError( "Unknown field name '%s' in input record. Known fields are '%s'.\n" "This could be because input headers are mislabeled, or because " "input data rows do not contain a value for '%s'." % ( fieldName, knownFields, fieldName ) ) return obj[fieldName] else: return getattr(obj, fieldName)
def getEncoderList(self): """ :return: a reference to each sub-encoder in this encoder. They are returned in the same order as they are for :meth:`.getScalarNames` and :meth:`.getScalars`. """ if hasattr(self, '_flattenedEncoderList') and \ self._flattenedEncoderList is not None: return self._flattenedEncoderList encoders = [] if self.encoders is not None: for (name, encoder, offset) in self.encoders: subEncoders = encoder.getEncoderList() encoders.extend(subEncoders) else: encoders.append(self) self._flattenedEncoderList = encoders return encoders
def getScalars(self, inputData): """ Returns a numpy array containing the sub-field scalar value(s) for each sub-field of the ``inputData``. To get the associated field names for each of the scalar values, call :meth:`.getScalarNames()`. For a simple scalar encoder, the scalar value is simply the input unmodified. For category encoders, it is the scalar representing the category string that is passed in. For the datetime encoder, the scalar value is the the number of seconds since epoch. The intent of the scalar representation of a sub-field is to provide a baseline for measuring error differences. You can compare the scalar value of the inputData with the scalar value returned from :meth:`.topDownCompute` on a top-down representation to evaluate prediction accuracy, for example. :param inputData: The data from the source. This is typically an object with members :return: array of scalar values """ retVals = numpy.array([]) if self.encoders is not None: for (name, encoder, offset) in self.encoders: values = encoder.getScalars(self._getInputValue(inputData, name)) retVals = numpy.hstack((retVals, values)) else: retVals = numpy.hstack((retVals, inputData)) return retVals
def getEncodedValues(self, inputData): """ Returns the input in the same format as is returned by :meth:`.topDownCompute`. For most encoder types, this is the same as the input data. For instance, for scalar and category types, this corresponds to the numeric and string values, respectively, from the inputs. For datetime encoders, this returns the list of scalars for each of the sub-fields (timeOfDay, dayOfWeek, etc.) This method is essentially the same as :meth:`.getScalars` except that it returns strings. :param inputData: The input data in the format it is received from the data source :return: A list of values, in the same format and in the same order as they are returned by :meth:`.topDownCompute`. """ retVals = [] if self.encoders is not None: for name, encoders, offset in self.encoders: values = encoders.getEncodedValues(self._getInputValue(inputData, name)) if _isSequence(values): retVals.extend(values) else: retVals.append(values) else: if _isSequence(inputData): retVals.extend(inputData) else: retVals.append(inputData) return tuple(retVals)
def getBucketIndices(self, inputData): """ Returns an array containing the sub-field bucket indices for each sub-field of the inputData. To get the associated field names for each of the buckets, call :meth:`.getScalarNames`. :param inputData: The data from the source. This is typically an object with members. :return: array of bucket indices """ retVals = [] if self.encoders is not None: for (name, encoder, offset) in self.encoders: values = encoder.getBucketIndices(self._getInputValue(inputData, name)) retVals.extend(values) else: assert False, "Should be implemented in base classes that are not " \ "containers for other encoders" return retVals
def scalarsToStr(self, scalarValues, scalarNames=None): """ Return a pretty print string representing the return values from :meth:`.getScalars` and :meth:`.getScalarNames`. :param scalarValues: input values to encode to string :param scalarNames: optional input of scalar names to convert. If None, gets scalar names from :meth:`.getScalarNames` :return: string representation of scalar values """ if scalarNames is None: scalarNames = self.getScalarNames() desc = '' for (name, value) in zip(scalarNames, scalarValues): if len(desc) > 0: desc += ", %s:%.2f" % (name, value) else: desc += "%s:%.2f" % (name, value) return desc
def getFieldDescription(self, fieldName): """ Return the offset and length of a given field within the encoded output. :param fieldName: Name of the field :return: tuple(``offset``, ``width``) of the field within the encoded output """ # Find which field it's in description = self.getDescription() + [("end", self.getWidth())] for i in xrange(len(description)): (name, offset) = description[i] if (name == fieldName): break if i >= len(description)-1: raise RuntimeError("Field name %s not found in this encoder" % fieldName) # Return the offset and width return (offset, description[i+1][1] - offset)
def encodedBitDescription(self, bitOffset, formatted=False): """ Return a description of the given bit in the encoded output. This will include the field name and the offset within the field. :param bitOffset: Offset of the bit to get the description of :param formatted: If True, the bitOffset is w.r.t. formatted output, which includes separators :return: tuple(``fieldName``, ``offsetWithinField``) """ # Find which field it's in (prevFieldName, prevFieldOffset) = (None, None) description = self.getDescription() for i in xrange(len(description)): (name, offset) = description[i] if formatted: offset = offset + i if bitOffset == offset-1: prevFieldName = "separator" prevFieldOffset = bitOffset break if bitOffset < offset: break (prevFieldName, prevFieldOffset) = (name, offset) # Return the field name and offset within the field # return (fieldName, bitOffset - fieldOffset) width = self.getDisplayWidth() if formatted else self.getWidth() if prevFieldOffset is None or bitOffset > self.getWidth(): raise IndexError("Bit is outside of allowable range: [0 - %d]" % width) return (prevFieldName, bitOffset - prevFieldOffset)
def pprintHeader(self, prefix=""): """ Pretty-print a header that labels the sub-fields of the encoded output. This can be used in conjuction with :meth:`.pprint`. :param prefix: printed before the header if specified """ print prefix, description = self.getDescription() + [("end", self.getWidth())] for i in xrange(len(description) - 1): name = description[i][0] width = description[i+1][1] - description[i][1] formatStr = "%%-%ds |" % width if len(name) > width: pname = name[0:width] else: pname = name print formatStr % pname, print print prefix, "-" * (self.getWidth() + (len(description) - 1)*3 - 1)
def pprint(self, output, prefix=""): """ Pretty-print the encoded output using ascii art. :param output: to print :param prefix: printed before the header if specified """ print prefix, description = self.getDescription() + [("end", self.getWidth())] for i in xrange(len(description) - 1): offset = description[i][1] nextoffset = description[i+1][1] print "%s |" % bitsToString(output[offset:nextoffset]), print
def decode(self, encoded, parentFieldName=''): """ Takes an encoded output and does its best to work backwards and generate the input that would have generated it. In cases where the encoded output contains more ON bits than an input would have generated, this routine will return one or more ranges of inputs which, if their encoded outputs were ORed together, would produce the target output. This behavior makes this method suitable for doing things like generating a description of a learned coincidence in the SP, which in many cases might be a union of one or more inputs. If instead, you want to figure the *most likely* single input scalar value that would have generated a specific encoded output, use the :meth:`.topDownCompute` method. If you want to pretty print the return value from this method, use the :meth:`.decodedToStr` method. :param encoded: The encoded output that you want decode :param parentFieldName: The name of the encoder which is our parent. This name is prefixed to each of the field names within this encoder to form the keys of the dict() in the retval. :return: tuple(``fieldsDict``, ``fieldOrder``) ``fieldsDict`` is a dict() where the keys represent field names (only 1 if this is a simple encoder, > 1 if this is a multi or date encoder) and the values are the result of decoding each field. If there are no bits in encoded that would have been generated by a field, it won't be present in the dict. The key of each entry in the dict is formed by joining the passed in parentFieldName with the child encoder name using a '.'. Each 'value' in ``fieldsDict`` consists of (ranges, desc), where ranges is a list of one or more (minVal, maxVal) ranges of input that would generate bits in the encoded output and 'desc' is a pretty print description of the ranges. For encoders like the category encoder, the 'desc' will contain the category names that correspond to the scalar values included in the ranges. ``fieldOrder`` is a list of the keys from ``fieldsDict``, in the same order as the fields appear in the encoded output. TODO: when we switch to Python 2.7 or 3.x, use OrderedDict Example retvals for a scalar encoder: .. code-block:: python {'amount': ( [[1,3], [7,10]], '1-3, 7-10' )} {'amount': ( [[2.5,2.5]], '2.5' )} Example retval for a category encoder: .. code-block:: python {'country': ( [[1,1], [5,6]], 'US, GB, ES' )} Example retval for a multi encoder: .. code-block:: python {'amount': ( [[2.5,2.5]], '2.5' ), 'country': ( [[1,1], [5,6]], 'US, GB, ES' )} """ fieldsDict = dict() fieldsOrder = [] # What is the effective parent name? if parentFieldName == '': parentName = self.name else: parentName = "%s.%s" % (parentFieldName, self.name) if self.encoders is not None: # Merge decodings of all child encoders together for i in xrange(len(self.encoders)): # Get the encoder and the encoded output (name, encoder, offset) = self.encoders[i] if i < len(self.encoders)-1: nextOffset = self.encoders[i+1][2] else: nextOffset = self.width fieldOutput = encoded[offset:nextOffset] (subFieldsDict, subFieldsOrder) = encoder.decode(fieldOutput, parentFieldName=parentName) fieldsDict.update(subFieldsDict) fieldsOrder.extend(subFieldsOrder) return (fieldsDict, fieldsOrder)
def decodedToStr(self, decodeResults): """ Return a pretty print string representing the return value from :meth:`.decode`. """ (fieldsDict, fieldsOrder) = decodeResults desc = '' for fieldName in fieldsOrder: (ranges, rangesStr) = fieldsDict[fieldName] if len(desc) > 0: desc += ", %s:" % (fieldName) else: desc += "%s:" % (fieldName) desc += "[%s]" % (rangesStr) return desc
def getBucketInfo(self, buckets): """ Returns a list of :class:`.EncoderResult` namedtuples describing the inputs for each sub-field that correspond to the bucket indices passed in ``buckets``. To get the associated field names for each of the values, call :meth:`.getScalarNames`. :param buckets: The list of bucket indices, one for each sub-field encoder. These bucket indices for example may have been retrieved from the :meth:`.getBucketIndices` call. :return: A list of :class:`.EncoderResult`. """ # Fall back topdown compute if self.encoders is None: raise RuntimeError("Must be implemented in sub-class") # Concatenate the results from bucketInfo on each child encoder retVals = [] bucketOffset = 0 for i in xrange(len(self.encoders)): (name, encoder, offset) = self.encoders[i] if encoder.encoders is not None: nextBucketOffset = bucketOffset + len(encoder.encoders) else: nextBucketOffset = bucketOffset + 1 bucketIndices = buckets[bucketOffset:nextBucketOffset] values = encoder.getBucketInfo(bucketIndices) retVals.extend(values) bucketOffset = nextBucketOffset return retVals
def topDownCompute(self, encoded): """ Returns a list of :class:`.EncoderResult` namedtuples describing the top-down best guess inputs for each sub-field given the encoded output. These are the values which are most likely to generate the given encoded output. To get the associated field names for each of the values, call :meth:`.getScalarNames`. :param encoded: The encoded output. Typically received from the topDown outputs from the spatial pooler just above us. :return: A list of :class:`.EncoderResult` """ # Fallback topdown compute if self.encoders is None: raise RuntimeError("Must be implemented in sub-class") # Concatenate the results from topDownCompute on each child encoder retVals = [] for i in xrange(len(self.encoders)): (name, encoder, offset) = self.encoders[i] if i < len(self.encoders)-1: nextOffset = self.encoders[i+1][2] else: nextOffset = self.width fieldOutput = encoded[offset:nextOffset] values = encoder.topDownCompute(fieldOutput) if _isSequence(values): retVals.extend(values) else: retVals.append(values) return retVals
def closenessScores(self, expValues, actValues, fractional=True): """ Compute closeness scores between the expected scalar value(s) and actual scalar value(s). The expected scalar values are typically those obtained from the :meth:`.getScalars` method. The actual scalar values are typically those returned from :meth:`.topDownCompute`. This method returns one closeness score for each value in expValues (or actValues which must be the same length). The closeness score ranges from 0 to 1.0, 1.0 being a perfect match and 0 being the worst possible match. If this encoder is a simple, single field encoder, then it will expect just 1 item in each of the ``expValues`` and ``actValues`` arrays. Multi-encoders will expect 1 item per sub-encoder. Each encoder type can define it's own metric for closeness. For example, a category encoder may return either 1 or 0, if the scalar matches exactly or not. A scalar encoder might return a percentage match, etc. :param expValues: Array of expected scalar values, typically obtained from :meth:`.getScalars` :param actValues: Array of actual values, typically obtained from :meth:`.topDownCompute` :return: Array of closeness scores, one per item in expValues (or actValues). """ # Fallback closenss is a percentage match if self.encoders is None: err = abs(expValues[0] - actValues[0]) if fractional: denom = max(expValues[0], actValues[0]) if denom == 0: denom = 1.0 closeness = 1.0 - float(err)/denom if closeness < 0: closeness = 0 else: closeness = err return numpy.array([closeness]) # Concatenate the results from closeness scores on each child encoder scalarIdx = 0 retVals = numpy.array([]) for (name, encoder, offset) in self.encoders: values = encoder.closenessScores(expValues[scalarIdx:], actValues[scalarIdx:], fractional=fractional) scalarIdx += len(values) retVals = numpy.hstack((retVals, values)) return retVals
def POST(self, name): """ /models/{name} schema: { "modelParams": dict containing model parameters "predictedFieldName": str } returns: {"success":name} """ global g_models data = json.loads(web.data()) modelParams = data["modelParams"] predictedFieldName = data["predictedFieldName"] if name in g_models.keys(): raise web.badrequest("Model with name <%s> already exists" % name) model = ModelFactory.create(modelParams) model.enableInference({'predictedField': predictedFieldName}) g_models[name] = model return json.dumps({"success": name})
def POST(self, name): """ /models/{name}/run schema: { predictedFieldName: value timestamp: %m/%d/%y %H:%M } NOTE: predictedFieldName MUST be the same name specified when creating the model. returns: { "predictionNumber":<number of record>, "anomalyScore":anomalyScore } """ global g_models data = json.loads(web.data()) data["timestamp"] = datetime.datetime.strptime( data["timestamp"], "%m/%d/%y %H:%M") if name not in g_models.keys(): raise web.notfound("Model with name <%s> does not exist." % name) modelResult = g_models[name].run(data) predictionNumber = modelResult.predictionNumber anomalyScore = modelResult.inferences["anomalyScore"] return json.dumps({"predictionNumber": predictionNumber, "anomalyScore": anomalyScore})
def analyzeOverlaps(activeCoincsFile, encodingsFile, dataset): '''Mirror Image Visualization: Shows the encoding space juxtaposed against the coincidence space. The encoding space is the bottom-up sensory encoding and the coincidence space depicts the corresponding activation of coincidences in the SP. Hence, the mirror image visualization is a visual depiction of the mapping of SP cells to the input representations. Note: * The files spBUOut and sensorBUOut are assumed to be in the output format used for LPF experiment outputs. * BU outputs for some sample datasets are provided. Specify the name of the dataset as an option while running this script. ''' lines = activeCoincsFile.readlines() inputs = encodingsFile.readlines() w = len(inputs[0].split(' '))-1 patterns = set([]) encodings = set([]) coincs = [] #The set of all coincidences that have won at least once reUsedCoincs = [] firstLine = inputs[0].split(' ') size = int(firstLine.pop(0)) spOutput = np.zeros((len(lines),40)) inputBits = np.zeros((len(lines),w)) print 'Total n:', size print 'Total number of records in the file:', len(lines), '\n' print 'w:', w count = 0 for x in xrange(len(lines)): inputSpace = [] #Encoded representation for each input spBUout = [int(z) for z in lines[x].split(' ')] spBUout.pop(0) #The first element of each row of spBUOut is the size of the SP temp = set(spBUout) spOutput[x]=spBUout input = [int(z) for z in inputs[x].split(' ')] input.pop(0) #The first element of each row of sensorBUout is the size of the encoding space tempInput = set(input) inputBits[x]=input #Creating the encoding space for m in xrange(size): if m in tempInput: inputSpace.append(m) else: inputSpace.append('|') #A non-active bit repeatedBits = tempInput.intersection(encodings) #Storing the bits that have been previously active reUsed = temp.intersection(patterns) #Checking if any of the active cells have been previously active #Dividing the coincidences into two difference categories. if len(reUsed)==0: coincs.append((count,temp,repeatedBits,inputSpace, tempInput)) #Pattern no, active cells, repeated bits, encoding (full), encoding (summary) else: reUsedCoincs.append((count,temp,repeatedBits,inputSpace, tempInput)) patterns=patterns.union(temp) #Adding the active cells to the set of coincs that have been active at least once encodings = encodings.union(tempInput) count +=1 overlap = {} overlapVal = 0 seen = [] seen = (printOverlaps(coincs, coincs, seen)) print len(seen), 'sets of 40 cells' seen = printOverlaps(reUsedCoincs, coincs, seen) Summ=[] for z in coincs: c=0 for y in reUsedCoincs: c += len(z[1].intersection(y[1])) Summ.append(c) print 'Sum: ', Summ for m in xrange(3): displayLimit = min(51, len(spOutput[m*200:])) if displayLimit>0: drawFile(dataset, np.zeros([len(inputBits[:(m+1)*displayLimit]),len(inputBits[:(m+1)*displayLimit])]), inputBits[:(m+1)*displayLimit], spOutput[:(m+1)*displayLimit], w, m+1) else: print 'No more records to display' pyl.show()
def drawFile(dataset, matrix, patterns, cells, w, fnum): '''The similarity of two patterns in the bit-encoding space is displayed alongside their similarity in the sp-coinc space.''' score=0 count = 0 assert len(patterns)==len(cells) for p in xrange(len(patterns)-1): matrix[p+1:,p] = [len(set(patterns[p]).intersection(set(q)))*100/w for q in patterns[p+1:]] matrix[p,p+1:] = [len(set(cells[p]).intersection(set(r)))*5/2 for r in cells[p+1:]] score += sum(abs(np.array(matrix[p+1:,p])-np.array(matrix[p,p+1:]))) count += len(matrix[p+1:,p]) print 'Score', score/count fig = pyl.figure(figsize = (10,10), num = fnum) pyl.matshow(matrix, fignum = fnum) pyl.colorbar() pyl.title('Coincidence Space', verticalalignment='top', fontsize=12) pyl.xlabel('The Mirror Image Visualization for '+dataset, fontsize=17) pyl.ylabel('Encoding space', fontsize=12)
def printOverlaps(comparedTo, coincs, seen): """ Compare the results and return True if success, False if failure Parameters: -------------------------------------------------------------------- coincs: Which cells are we comparing? comparedTo: The set of 40 cells we being compared to (they have no overlap with seen) seen: Which of the cells we are comparing to have already been encountered. This helps glue together the unique and reused coincs """ inputOverlap = 0 cellOverlap = 0 for y in comparedTo: closestInputs = [] closestCells = [] if len(seen)>0: inputOverlap = max([len(seen[m][1].intersection(y[4])) for m in xrange(len(seen))]) cellOverlap = max([len(seen[m][0].intersection(y[1])) for m in xrange(len(seen))]) for m in xrange( len(seen) ): if len(seen[m][1].intersection(y[4]))==inputOverlap: closestInputs.append(seen[m][2]) if len(seen[m][0].intersection(y[1]))==cellOverlap: closestCells.append(seen[m][2]) seen.append((y[1], y[4], y[0])) print 'Pattern',y[0]+1,':',' '.join(str(len(z[1].intersection(y[1]))).rjust(2) for z in coincs),'input overlap:', inputOverlap, ';', len(closestInputs), 'closest encodings:',','.join(str(m+1) for m in closestInputs).ljust(15), \ 'cell overlap:', cellOverlap, ';', len(closestCells), 'closest set(s):',','.join(str(m+1) for m in closestCells) return seen
def createInput(self): """create a random input vector""" print "-" * 70 + "Creating a random input vector" + "-" * 70 #clear the inputArray to zero before creating a new input vector self.inputArray[0:] = 0 for i in range(self.inputSize): #randrange returns 0 or 1 self.inputArray[i] = random.randrange(2)
def run(self): """Run the spatial pooler with the input vector""" print "-" * 80 + "Computing the SDR" + "-" * 80 #activeArray[column]=1 if column is active after spatial pooling self.sp.compute(self.inputArray, True, self.activeArray) print self.activeArray.nonzero()
def addNoise(self, noiseLevel): """Flip the value of 10% of input bits (add noise) :param noiseLevel: The percentage of total input bits that should be flipped """ for _ in range(int(noiseLevel * self.inputSize)): # 0.1*self.inputSize represents 10% of the total input bits # random.random() returns a float between 0 and 1 randomPosition = int(random.random() * self.inputSize) # Flipping the bit at the randomly picked position if self.inputArray[randomPosition] == 1: self.inputArray[randomPosition] = 0 else: self.inputArray[randomPosition] = 1
def _labeledInput(activeInputs, cellsPerCol=32): """Print the list of [column, cellIdx] indices for each of the active cells in activeInputs. """ if cellsPerCol == 0: cellsPerCol = 1 cols = activeInputs.size / cellsPerCol activeInputs = activeInputs.reshape(cols, cellsPerCol) (cols, cellIdxs) = activeInputs.nonzero() if len(cols) == 0: return "NONE" items = ["(%d): " % (len(cols))] prevCol = -1 for (col,cellIdx) in zip(cols, cellIdxs): if col != prevCol: if prevCol != -1: items.append("] ") items.append("Col %d: [" % col) prevCol = col items.append("%d," % cellIdx) items.append("]") return " ".join(items)
def clear(self): """Clears the state of the KNNClassifier.""" self._Memory = None self._numPatterns = 0 self._M = None self._categoryList = [] self._partitionIdList = [] self._partitionIdMap = {} self._finishedLearning = False self._iterationIdx = -1 # Fixed capacity KNN if self.maxStoredPatterns > 0: assert self.useSparseMemory, ("Fixed capacity KNN is implemented only " "in the sparse memory mode") self.fixedCapacity = True self._categoryRecencyList = [] else: self.fixedCapacity = False # Cached value of the store prototype sizes self._protoSizes = None # Used by PCA self._s = None self._vt = None self._nc = None self._mean = None # Used by Network Builder self._specificIndexTraining = False self._nextTrainingIndices = None
def prototypeSetCategory(self, idToCategorize, newCategory): """ Allows ids to be assigned a category and subsequently enables users to use: - :meth:`~.KNNClassifier.KNNClassifier.removeCategory` - :meth:`~.KNNClassifier.KNNClassifier.closestTrainingPattern` - :meth:`~.KNNClassifier.KNNClassifier.closestOtherTrainingPattern` """ if idToCategorize not in self._categoryRecencyList: return recordIndex = self._categoryRecencyList.index(idToCategorize) self._categoryList[recordIndex] = newCategory
def removeIds(self, idsToRemove): """ There are two caveats. First, this is a potentially slow operation. Second, pattern indices will shift if patterns before them are removed. :param idsToRemove: A list of row indices to remove. """ # Form a list of all categories to remove rowsToRemove = [k for k, rowID in enumerate(self._categoryRecencyList) \ if rowID in idsToRemove] # Remove rows from the classifier self._removeRows(rowsToRemove)
def removeCategory(self, categoryToRemove): """ There are two caveats. First, this is a potentially slow operation. Second, pattern indices will shift if patterns before them are removed. :param categoryToRemove: Category label to remove """ removedRows = 0 if self._Memory is None: return removedRows # The internal category indices are stored in float # format, so we should compare with a float catToRemove = float(categoryToRemove) # Form a list of all categories to remove rowsToRemove = [k for k, catID in enumerate(self._categoryList) \ if catID == catToRemove] # Remove rows from the classifier self._removeRows(rowsToRemove) assert catToRemove not in self._categoryList
def _removeRows(self, rowsToRemove): """ A list of row indices to remove. There are two caveats. First, this is a potentially slow operation. Second, pattern indices will shift if patterns before them are removed. """ # Form a numpy array of row indices to be removed removalArray = numpy.array(rowsToRemove) # Remove categories self._categoryList = numpy.delete(numpy.array(self._categoryList), removalArray).tolist() if self.fixedCapacity: self._categoryRecencyList = numpy.delete( numpy.array(self._categoryRecencyList), removalArray).tolist() # Remove the partition ID, if any for these rows and rebuild the id map. for row in reversed(rowsToRemove): # Go backwards # Remove these patterns from partitionList self._partitionIdList.pop(row) self._rebuildPartitionIdMap(self._partitionIdList) # Remove actual patterns if self.useSparseMemory: # Delete backwards for rowIndex in rowsToRemove[::-1]: self._Memory.deleteRow(rowIndex) else: self._M = numpy.delete(self._M, removalArray, 0) numRemoved = len(rowsToRemove) # Sanity checks numRowsExpected = self._numPatterns - numRemoved if self.useSparseMemory: if self._Memory is not None: assert self._Memory.nRows() == numRowsExpected else: assert self._M.shape[0] == numRowsExpected assert len(self._categoryList) == numRowsExpected self._numPatterns -= numRemoved return numRemoved
def learn(self, inputPattern, inputCategory, partitionId=None, isSparse=0, rowID=None): """ Train the classifier to associate specified input pattern with a particular category. :param inputPattern: (list) The pattern to be assigned a category. If isSparse is 0, this should be a dense array (both ON and OFF bits present). Otherwise, if isSparse > 0, this should be a list of the indices of the non-zero bits in sorted order :param inputCategory: (int) The category to be associated to the training pattern :param partitionId: (int) partitionID allows you to associate an id with each input vector. It can be used to associate input patterns stored in the classifier with an external id. This can be useful for debugging or visualizing. Another use case is to ignore vectors with a specific id during inference (see description of infer() for details). There can be at most one partitionId per stored pattern (i.e. if two patterns are within distThreshold, only the first partitionId will be stored). This is an optional parameter. :param isSparse: (int) 0 if the input pattern is a dense representation. When the input pattern is a list of non-zero indices, then isSparse is the number of total bits (n). E.g. for the dense array [0, 1, 1, 0, 0, 1], isSparse should be `0`. For the equivalent sparse representation [1, 2, 5] (which specifies the indices of active bits), isSparse should be `6`, which is the total number of bits in the input space. :param rowID: (int) UNKNOWN :returns: The number of patterns currently stored in the classifier """ if self.verbosity >= 1: print "%s learn:" % g_debugPrefix print " category:", int(inputCategory) print " active inputs:", _labeledInput(inputPattern, cellsPerCol=self.cellsPerCol) if isSparse > 0: assert all(inputPattern[i] <= inputPattern[i+1] for i in xrange(len(inputPattern)-1)), \ "Sparse inputPattern must be sorted." assert all(bit < isSparse for bit in inputPattern), \ ("Sparse inputPattern must not index outside the dense " "representation's bounds.") if rowID is None: rowID = self._iterationIdx # Dense vectors if not self.useSparseMemory: # Not supported assert self.cellsPerCol == 0, "not implemented for dense vectors" # If the input was given in sparse form, convert it to dense if isSparse > 0: denseInput = numpy.zeros(isSparse) denseInput[inputPattern] = 1.0 inputPattern = denseInput if self._specificIndexTraining and not self._nextTrainingIndices: # Specific index mode without any index provided - skip training return self._numPatterns if self._Memory is None: # Initialize memory with 100 rows and numPatterns = 0 inputWidth = len(inputPattern) self._Memory = numpy.zeros((100,inputWidth)) self._numPatterns = 0 self._M = self._Memory[:self._numPatterns] addRow = True if self._vt is not None: # Compute projection inputPattern = numpy.dot(self._vt, inputPattern - self._mean) if self.distThreshold > 0: # Check if input is too close to an existing input to be accepted dist = self._calcDistance(inputPattern) minDist = dist.min() addRow = (minDist >= self.distThreshold) if addRow: self._protoSizes = None # need to re-compute if self._numPatterns == self._Memory.shape[0]: # Double the size of the memory self._doubleMemoryNumRows() if not self._specificIndexTraining: # Normal learning - append the new input vector self._Memory[self._numPatterns] = inputPattern self._numPatterns += 1 self._categoryList.append(int(inputCategory)) else: # Specific index training mode - insert vector in specified slot vectorIndex = self._nextTrainingIndices.pop(0) while vectorIndex >= self._Memory.shape[0]: self._doubleMemoryNumRows() self._Memory[vectorIndex] = inputPattern self._numPatterns = max(self._numPatterns, vectorIndex + 1) if vectorIndex >= len(self._categoryList): self._categoryList += [-1] * (vectorIndex - len(self._categoryList) + 1) self._categoryList[vectorIndex] = int(inputCategory) # Set _M to the "active" part of _Memory self._M = self._Memory[0:self._numPatterns] self._addPartitionId(self._numPatterns-1, partitionId) # Sparse vectors else: # If the input was given in sparse form, convert it to dense if necessary if isSparse > 0 and (self._vt is not None or self.distThreshold > 0 \ or self.numSVDDims is not None or self.numSVDSamples > 0 \ or self.numWinners > 0): denseInput = numpy.zeros(isSparse) denseInput[inputPattern] = 1.0 inputPattern = denseInput isSparse = 0 # Get the input width if isSparse > 0: inputWidth = isSparse else: inputWidth = len(inputPattern) # Allocate storage if this is the first training vector if self._Memory is None: self._Memory = NearestNeighbor(0, inputWidth) # Support SVD if it is on if self._vt is not None: inputPattern = numpy.dot(self._vt, inputPattern - self._mean) # Threshold the input, zeroing out entries that are too close to 0. # This is only done if we are given a dense input. if isSparse == 0: thresholdedInput = self._sparsifyVector(inputPattern, True) addRow = True # If given the layout of the cells, then turn on the logic that stores # only the start cell for bursting columns. if self.cellsPerCol >= 1: burstingCols = thresholdedInput.reshape(-1, self.cellsPerCol).min(axis=1).nonzero()[0] for col in burstingCols: thresholdedInput[(col * self.cellsPerCol) + 1 : (col * self.cellsPerCol) + self.cellsPerCol] = 0 # Don't learn entries that are too close to existing entries. if self._Memory.nRows() > 0: dist = None # if this vector is a perfect match for one we already learned, then # replace the category - it may have changed with online learning on. if self.replaceDuplicates: dist = self._calcDistance(thresholdedInput, distanceNorm=1) if dist.min() == 0: rowIdx = dist.argmin() self._categoryList[rowIdx] = int(inputCategory) if self.fixedCapacity: self._categoryRecencyList[rowIdx] = rowID addRow = False # Don't add this vector if it matches closely with another we already # added if self.distThreshold > 0: if dist is None or self.distanceNorm != 1: dist = self._calcDistance(thresholdedInput) minDist = dist.min() addRow = (minDist >= self.distThreshold) if not addRow: if self.fixedCapacity: rowIdx = dist.argmin() self._categoryRecencyList[rowIdx] = rowID # If sparsity is too low, we do not want to add this vector if addRow and self.minSparsity > 0.0: if isSparse==0: sparsity = ( float(len(thresholdedInput.nonzero()[0])) / len(thresholdedInput) ) else: sparsity = float(len(inputPattern)) / isSparse if sparsity < self.minSparsity: addRow = False # Add the new sparse vector to our storage if addRow: self._protoSizes = None # need to re-compute if isSparse == 0: self._Memory.addRow(thresholdedInput) else: self._Memory.addRowNZ(inputPattern, [1]*len(inputPattern)) self._numPatterns += 1 self._categoryList.append(int(inputCategory)) self._addPartitionId(self._numPatterns-1, partitionId) if self.fixedCapacity: self._categoryRecencyList.append(rowID) if self._numPatterns > self.maxStoredPatterns and \ self.maxStoredPatterns > 0: leastRecentlyUsedPattern = numpy.argmin(self._categoryRecencyList) self._Memory.deleteRow(leastRecentlyUsedPattern) self._categoryList.pop(leastRecentlyUsedPattern) self._categoryRecencyList.pop(leastRecentlyUsedPattern) self._numPatterns -= 1 if self.numSVDDims is not None and self.numSVDSamples > 0 \ and self._numPatterns == self.numSVDSamples: self.computeSVD() return self._numPatterns
def getOverlaps(self, inputPattern): """ Return the degree of overlap between an input pattern and each category stored in the classifier. The overlap is computed by computing: .. code-block:: python logical_and(inputPattern != 0, trainingPattern != 0).sum() :param inputPattern: pattern to check overlap of :returns: (overlaps, categories) Two numpy arrays of the same length, where: * overlaps: an integer overlap amount for each category * categories: category index for each element of overlaps """ assert self.useSparseMemory, "Not implemented yet for dense storage" overlaps = self._Memory.rightVecSumAtNZ(inputPattern) return (overlaps, self._categoryList)
def getDistances(self, inputPattern): """Return the distances between the input pattern and all other stored patterns. :param inputPattern: pattern to check distance with :returns: (distances, categories) numpy arrays of the same length. - overlaps: an integer overlap amount for each category - categories: category index for each element of distances """ dist = self._getDistances(inputPattern) return (dist, self._categoryList)
def infer(self, inputPattern, computeScores=True, overCategories=True, partitionId=None): """Finds the category that best matches the input pattern. Returns the winning category index as well as a distribution over all categories. :param inputPattern: (list or array) The pattern to be classified. This must be a dense representation of the array (e.g. [0, 0, 1, 1, 0, 1]). :param computeScores: NO EFFECT :param overCategories: NO EFFECT :param partitionId: (int) If provided, all training vectors with partitionId equal to that of the input pattern are ignored. For example, this may be used to perform k-fold cross validation without repopulating the classifier. First partition all the data into k equal partitions numbered 0, 1, 2, ... and then call learn() for each vector passing in its partitionId. Then, during inference, by passing in the partition ID in the call to infer(), all other vectors with the same partitionId are ignored simulating the effect of repopulating the classifier while ommitting the training vectors in the same partition. :returns: 4-tuple with these keys: - ``winner``: The category with the greatest number of nearest neighbors within the kth nearest neighbors. If the inferenceResult contains no neighbors, the value of winner is None. This can happen, for example, in cases of exact matching, if there are no stored vectors, or if minSparsity is not met. - ``inferenceResult``: A list of length numCategories, each entry contains the number of neighbors within the top k neighbors that are in that category. - ``dist``: A list of length numPrototypes. Each entry is the distance from the unknown to that prototype. All distances are between 0.0 and 1.0. - ``categoryDist``: A list of length numCategories. Each entry is the distance from the unknown to the nearest prototype of that category. All distances are between 0 and 1.0. """ # Calculate sparsity. If sparsity is too low, we do not want to run # inference with this vector sparsity = 0.0 if self.minSparsity > 0.0: sparsity = ( float(len(inputPattern.nonzero()[0])) / len(inputPattern) ) if len(self._categoryList) == 0 or sparsity < self.minSparsity: # No categories learned yet; i.e. first inference w/ online learning or # insufficient sparsity winner = None inferenceResult = numpy.zeros(1) dist = numpy.ones(1) categoryDist = numpy.ones(1) else: maxCategoryIdx = max(self._categoryList) inferenceResult = numpy.zeros(maxCategoryIdx+1) dist = self._getDistances(inputPattern, partitionId=partitionId) validVectorCount = len(self._categoryList) - self._categoryList.count(-1) # Loop through the indices of the nearest neighbors. if self.exact: # Is there an exact match in the distances? exactMatches = numpy.where(dist<0.00001)[0] if len(exactMatches) > 0: for i in exactMatches[:min(self.k, validVectorCount)]: inferenceResult[self._categoryList[i]] += 1.0 else: sorted = dist.argsort() for j in sorted[:min(self.k, validVectorCount)]: inferenceResult[self._categoryList[j]] += 1.0 # Prepare inference results. if inferenceResult.any(): winner = inferenceResult.argmax() inferenceResult /= inferenceResult.sum() else: winner = None categoryDist = min_score_per_category(maxCategoryIdx, self._categoryList, dist) categoryDist.clip(0, 1.0, categoryDist) if self.verbosity >= 1: print "%s infer:" % (g_debugPrefix) print " active inputs:", _labeledInput(inputPattern, cellsPerCol=self.cellsPerCol) print " winner category:", winner print " pct neighbors of each category:", inferenceResult print " dist of each prototype:", dist print " dist of each category:", categoryDist result = (winner, inferenceResult, dist, categoryDist) return result
def getClosest(self, inputPattern, topKCategories=3): """Returns the index of the pattern that is closest to inputPattern, the distances of all patterns to inputPattern, and the indices of the k closest categories. """ inferenceResult = numpy.zeros(max(self._categoryList)+1) dist = self._getDistances(inputPattern) sorted = dist.argsort() validVectorCount = len(self._categoryList) - self._categoryList.count(-1) for j in sorted[:min(self.k, validVectorCount)]: inferenceResult[self._categoryList[j]] += 1.0 winner = inferenceResult.argmax() topNCats = [] for i in range(topKCategories): topNCats.append((self._categoryList[sorted[i]], dist[sorted[i]] )) return winner, dist, topNCats
def closestTrainingPattern(self, inputPattern, cat): """Returns the closest training pattern to inputPattern that belongs to category "cat". :param inputPattern: The pattern whose closest neighbor is sought :param cat: The required category of closest neighbor :returns: A dense version of the closest training pattern, or None if no such patterns exist """ dist = self._getDistances(inputPattern) sorted = dist.argsort() for patIdx in sorted: patternCat = self._categoryList[patIdx] # If closest pattern belongs to desired category, return it if patternCat == cat: if self.useSparseMemory: closestPattern = self._Memory.getRow(int(patIdx)) else: closestPattern = self._M[patIdx] return closestPattern # No patterns were found! return None
def getPattern(self, idx, sparseBinaryForm=False, cat=None): """Gets a training pattern either by index or category number. :param idx: Index of the training pattern :param sparseBinaryForm: If true, returns a list of the indices of the non-zero bits in the training pattern :param cat: If not None, get the first pattern belonging to category cat. If this is specified, idx must be None. :returns: The training pattern with specified index """ if cat is not None: assert idx is None idx = self._categoryList.index(cat) if not self.useSparseMemory: pattern = self._Memory[idx] if sparseBinaryForm: pattern = pattern.nonzero()[0] else: (nz, values) = self._Memory.rowNonZeros(idx) if not sparseBinaryForm: pattern = numpy.zeros(self._Memory.nCols()) numpy.put(pattern, nz, 1) else: pattern = nz return pattern
def getPartitionId(self, i): """ Gets the partition id given an index. :param i: index of partition :returns: the partition id associated with pattern i. Returns None if no id is associated with it. """ if (i < 0) or (i >= self._numPatterns): raise RuntimeError("index out of bounds") partitionId = self._partitionIdList[i] if partitionId == numpy.inf: return None else: return partitionId
def _addPartitionId(self, index, partitionId=None): """ Adds partition id for pattern index """ if partitionId is None: self._partitionIdList.append(numpy.inf) else: self._partitionIdList.append(partitionId) indices = self._partitionIdMap.get(partitionId, []) indices.append(index) self._partitionIdMap[partitionId] = indices
def _rebuildPartitionIdMap(self, partitionIdList): """ Rebuilds the partition Id map using the given partitionIdList """ self._partitionIdMap = {} for row, partitionId in enumerate(partitionIdList): indices = self._partitionIdMap.get(partitionId, []) indices.append(row) self._partitionIdMap[partitionId] = indices
def _calcDistance(self, inputPattern, distanceNorm=None): """Calculate the distances from inputPattern to all stored patterns. All distances are between 0.0 and 1.0 :param inputPattern The pattern from which distances to all other patterns are calculated :param distanceNorm Degree of the distance norm """ if distanceNorm is None: distanceNorm = self.distanceNorm # Sparse memory if self.useSparseMemory: if self._protoSizes is None: self._protoSizes = self._Memory.rowSums() overlapsWithProtos = self._Memory.rightVecSumAtNZ(inputPattern) inputPatternSum = inputPattern.sum() if self.distanceMethod == "rawOverlap": dist = inputPattern.sum() - overlapsWithProtos elif self.distanceMethod == "pctOverlapOfInput": dist = inputPatternSum - overlapsWithProtos if inputPatternSum > 0: dist /= inputPatternSum elif self.distanceMethod == "pctOverlapOfProto": overlapsWithProtos /= self._protoSizes dist = 1.0 - overlapsWithProtos elif self.distanceMethod == "pctOverlapOfLarger": maxVal = numpy.maximum(self._protoSizes, inputPatternSum) if maxVal.all() > 0: overlapsWithProtos /= maxVal dist = 1.0 - overlapsWithProtos elif self.distanceMethod == "norm": dist = self._Memory.vecLpDist(self.distanceNorm, inputPattern) distMax = dist.max() if distMax > 0: dist /= distMax else: raise RuntimeError("Unimplemented distance method %s" % self.distanceMethod) # Dense memory else: if self.distanceMethod == "norm": dist = numpy.power(numpy.abs(self._M - inputPattern), self.distanceNorm) dist = dist.sum(1) dist = numpy.power(dist, 1.0/self.distanceNorm) dist /= dist.max() else: raise RuntimeError ("Not implemented yet for dense storage....") return dist
def _getDistances(self, inputPattern, partitionId=None): """Return the distances from inputPattern to all stored patterns. :param inputPattern The pattern from which distances to all other patterns are returned :param partitionId If provided, ignore all training vectors with this partitionId. """ if not self._finishedLearning: self.finishLearning() self._finishedLearning = True if self._vt is not None and len(self._vt) > 0: inputPattern = numpy.dot(self._vt, inputPattern - self._mean) sparseInput = self._sparsifyVector(inputPattern) # Compute distances dist = self._calcDistance(sparseInput) # Invalidate results where category is -1 if self._specificIndexTraining: dist[numpy.array(self._categoryList) == -1] = numpy.inf # Ignore vectors with this partition id by setting their distances to inf if partitionId is not None: dist[self._partitionIdMap.get(partitionId, [])] = numpy.inf return dist
def computeSVD(self, numSVDSamples=0, finalize=True): """ Compute the singular value decomposition (SVD). The SVD is a factorization of a real or complex matrix. It factors the matrix `a` as `u * np.diag(s) * v`, where `u` and `v` are unitary and `s` is a 1-d array of `a`'s singular values. **Reason for computing the SVD:** There are cases where you want to feed a lot of vectors to the KNNClassifier. However, this can be slow. You can speed up training by (1) computing the SVD of the input patterns which will give you the eigenvectors, (2) only keeping a fraction of the eigenvectors, and (3) projecting the input patterns onto the remaining eigenvectors. Note that all input patterns are projected onto the eigenvectors in the same fashion. Keeping only the highest eigenvectors increases training performance since it reduces the dimensionality of the input. :param numSVDSamples: (int) the number of samples to use for the SVD computation. :param finalize: (bool) whether to apply SVD to the input patterns. :returns: (array) The singular values for every matrix, sorted in descending order. """ if numSVDSamples == 0: numSVDSamples = self._numPatterns if not self.useSparseMemory: self._a = self._Memory[:self._numPatterns] else: self._a = self._Memory.toDense()[:self._numPatterns] self._mean = numpy.mean(self._a, axis=0) self._a -= self._mean u,self._s,self._vt = numpy.linalg.svd(self._a[:numSVDSamples]) if finalize: self._finalizeSVD() return self._s
def getAdaptiveSVDDims(self, singularValues, fractionOfMax=0.001): """ Compute the number of eigenvectors (singularValues) to keep. :param singularValues: :param fractionOfMax: :return: """ v = singularValues/singularValues[0] idx = numpy.where(v<fractionOfMax)[0] if len(idx): print "Number of PCA dimensions chosen: ", idx[0], "out of ", len(v) return idx[0] else: print "Number of PCA dimensions chosen: ", len(v)-1, "out of ", len(v) return len(v)-1
def _finalizeSVD(self, numSVDDims=None): """ Called by finalizeLearning(). This will project all the patterns onto the SVD eigenvectors. :param numSVDDims: (int) number of egeinvectors used for projection. :return: """ if numSVDDims is not None: self.numSVDDims = numSVDDims if self.numSVDDims=="adaptive": if self.fractionOfMax is not None: self.numSVDDims = self.getAdaptiveSVDDims(self._s, self.fractionOfMax) else: self.numSVDDims = self.getAdaptiveSVDDims(self._s) if self._vt.shape[0] < self.numSVDDims: print "******************************************************************" print ("Warning: The requested number of PCA dimensions is more than " "the number of pattern dimensions.") print "Setting numSVDDims = ", self._vt.shape[0] print "******************************************************************" self.numSVDDims = self._vt.shape[0] self._vt = self._vt[:self.numSVDDims] # Added when svd is not able to decompose vectors - uses raw spare vectors if len(self._vt) == 0: return self._Memory = numpy.zeros((self._numPatterns,self.numSVDDims)) self._M = self._Memory self.useSparseMemory = False for i in range(self._numPatterns): self._Memory[i] = numpy.dot(self._vt, self._a[i]) self._a = None
def remapCategories(self, mapping): """Change the category indices. Used by the Network Builder to keep the category indices in sync with the ImageSensor categoryInfo when the user renames or removes categories. :param mapping: List of new category indices. For example, mapping=[2,0,1] would change all vectors of category 0 to be category 2, category 1 to 0, and category 2 to 1 """ categoryArray = numpy.array(self._categoryList) newCategoryArray = numpy.zeros(categoryArray.shape[0]) newCategoryArray.fill(-1) for i in xrange(len(mapping)): newCategoryArray[categoryArray==i] = mapping[i] self._categoryList = list(newCategoryArray)
def setCategoryOfVectors(self, vectorIndices, categoryIndices): """Change the category associated with this vector(s). Used by the Network Builder to move vectors between categories, to enable categories, and to invalidate vectors by setting the category to -1. :param vectorIndices: Single index or list of indices :param categoryIndices: Single index or list of indices. Can also be a single index when vectorIndices is a list, in which case the same category will be used for all vectors """ if not hasattr(vectorIndices, "__iter__"): vectorIndices = [vectorIndices] categoryIndices = [categoryIndices] elif not hasattr(categoryIndices, "__iter__"): categoryIndices = [categoryIndices] * len(vectorIndices) for i in xrange(len(vectorIndices)): vectorIndex = vectorIndices[i] categoryIndex = categoryIndices[i] # Out-of-bounds is not an error, because the KNN may not have seen the # vector yet if vectorIndex < len(self._categoryList): self._categoryList[vectorIndex] = categoryIndex
def getNextRecord(self): """ Get the next record to encode. Includes getting a record from the `dataSource` and applying filters. If the filters request more data from the `dataSource` continue to get data from the `dataSource` until all filters are satisfied. This method is separate from :meth:`.RecordSensor.compute` so that we can use a standalone :class:`.RecordSensor` to get filtered data. """ allFiltersHaveEnoughData = False while not allFiltersHaveEnoughData: # Get the data from the dataSource data = self.dataSource.getNextRecordDict() if not data: raise StopIteration("Datasource has no more data") # temporary check if "_reset" not in data: data["_reset"] = 0 if "_sequenceId" not in data: data["_sequenceId"] = 0 if "_category" not in data: data["_category"] = [None] data, allFiltersHaveEnoughData = self.applyFilters(data) self.lastRecord = data return data
def applyFilters(self, data): """ Apply pre-encoding filters. These filters may modify or add data. If a filter needs another record (e.g. a delta filter) it will request another record by returning False and the current record will be skipped (but will still be given to all filters). We have to be very careful about resets. A filter may add a reset, but other filters should not see the added reset, each filter sees the original reset value, and we keep track of whether any filter adds a reset. :param data: (dict) The data that will be processed by the filter. :returns: (tuple) with the data processed by the filter and a boolean to know whether or not the filter needs mode data. """ if self.verbosity > 0: print "RecordSensor got data: %s" % data allFiltersHaveEnoughData = True if len(self.preEncodingFilters) > 0: originalReset = data['_reset'] actualReset = originalReset for f in self.preEncodingFilters: # if filter needs more data, it returns False filterHasEnoughData = f.process(data) allFiltersHaveEnoughData = (allFiltersHaveEnoughData and filterHasEnoughData) actualReset = actualReset or data['_reset'] data['_reset'] = originalReset data['_reset'] = actualReset return data, allFiltersHaveEnoughData
def populateCategoriesOut(self, categories, output): """ Populate the output array with the category indices. .. note:: Non-categories are represented with ``-1``. :param categories: (list) of category strings :param output: (list) category output, will be overwritten """ if categories[0] is None: # The record has no entry in category field. output[:] = -1 else: # Populate category output array by looping over the smaller of the # output array (size specified by numCategories) and the record's number # of categories. for i, cat in enumerate(categories[:len(output)]): output[i] = cat output[len(categories):] = -1
def compute(self, inputs, outputs): """ Get a record from the dataSource and encode it. Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.compute`. """ if not self.topDownMode: data = self.getNextRecord() # The private keys in data are standard of RecordStreamIface objects. Any # add'l keys are column headers from the data source. reset = data["_reset"] sequenceId = data["_sequenceId"] categories = data["_category"] # Encode the processed records; populate outputs["dataOut"] in place self.encoder.encodeIntoArray(data, outputs["dataOut"]) # If there is a field to predict, set bucketIdxOut and actValueOut. # There is a special case where a predicted field might be a vector, as in # the CoordinateEncoder. Since this encoder does not provide bucket # indices for prediction, we will ignore it. if self.predictedField is not None and self.predictedField != "vector": allEncoders = list(self.encoder.encoders) if self.disabledEncoder is not None: allEncoders.extend(self.disabledEncoder.encoders) encoders = [e for e in allEncoders if e[0] == self.predictedField] if len(encoders) == 0: raise ValueError("There is no encoder for set for the predicted " "field: %s" % self.predictedField) # TODO: Figure out why there are sometimes multiple encoders with the # same name. #elif len(encoders) > 1: # raise ValueError("There cant' be more than 1 encoder for the " # "predicted field: %s" % self.predictedField) else: encoder = encoders[0][1] actualValue = data[self.predictedField] outputs["bucketIdxOut"][:] = encoder.getBucketIndices(actualValue) if isinstance(actualValue, str): outputs["actValueOut"][:] = encoder.getBucketIndices(actualValue) else: outputs["actValueOut"][:] = actualValue # Write out the scalar values obtained from they data source. outputs["sourceOut"][:] = self.encoder.getScalars(data) self._outputValues["sourceOut"] = self.encoder.getEncodedValues(data) # ----------------------------------------------------------------------- # Get the encoded bit arrays for each field encoders = self.encoder.getEncoderList() prevOffset = 0 sourceEncodings = [] bitData = outputs["dataOut"] for encoder in encoders: nextOffset = prevOffset + encoder.getWidth() sourceEncodings.append(bitData[prevOffset:nextOffset]) prevOffset = nextOffset self._outputValues['sourceEncodings'] = sourceEncodings # Execute post-encoding filters, if any for filter in self.postEncodingFilters: filter.process(encoder=self.encoder, data=outputs['dataOut']) # Populate the output numpy arrays; must assign by index. outputs['resetOut'][0] = reset outputs['sequenceIdOut'][0] = sequenceId self.populateCategoriesOut(categories, outputs['categoryOut']) # ------------------------------------------------------------------------ # Verbose print? if self.verbosity >= 1: if self._iterNum == 0: self.encoder.pprintHeader(prefix="sensor:") if reset: print "RESET - sequenceID:%d" % sequenceId if self.verbosity >= 2: print # If verbosity >=2, print the record fields if self.verbosity >= 1: self.encoder.pprint(outputs["dataOut"], prefix="%7d:" % (self._iterNum)) scalarValues = self.encoder.getScalars(data) nz = outputs["dataOut"].nonzero()[0] print " nz: (%d)" % (len(nz)), nz print " encIn:", self.encoder.scalarsToStr(scalarValues) if self.verbosity >= 2: # if hasattr(data, 'header'): # header = data.header() # else: # header = ' '.join(self.dataSource.names) # print " ", header print " data:", str(data) if self.verbosity >= 3: decoded = self.encoder.decode(outputs["dataOut"]) print "decoded:", self.encoder.decodedToStr(decoded) self._iterNum += 1 else: # ======================================================================== # Spatial # ======================================================================== # This is the top down compute in sensor # We get the spatial pooler's topDownOut as spatialTopDownIn spatialTopDownIn = inputs['spatialTopDownIn'] spatialTopDownOut = self.encoder.topDownCompute(spatialTopDownIn) # ----------------------------------------------------------------------- # Split topDownOutput into seperate outputs values = [elem.value for elem in spatialTopDownOut] scalars = [elem.scalar for elem in spatialTopDownOut] encodings = [elem.encoding for elem in spatialTopDownOut] self._outputValues['spatialTopDownOut'] = values outputs['spatialTopDownOut'][:] = numpy.array(scalars) self._outputValues['spatialTopDownEncodings'] = encodings # ======================================================================== # Temporal # ======================================================================== ## TODO: Add temporal top-down loop # We get the temporal memory's topDownOut passed through the spatial # pooler as temporalTopDownIn temporalTopDownIn = inputs['temporalTopDownIn'] temporalTopDownOut = self.encoder.topDownCompute(temporalTopDownIn) # ----------------------------------------------------------------------- # Split topDownOutput into separate outputs values = [elem.value for elem in temporalTopDownOut] scalars = [elem.scalar for elem in temporalTopDownOut] encodings = [elem.encoding for elem in temporalTopDownOut] self._outputValues['temporalTopDownOut'] = values outputs['temporalTopDownOut'][:] = numpy.array(scalars) self._outputValues['temporalTopDownEncodings'] = encodings assert len(spatialTopDownOut) == len(temporalTopDownOut), ( "Error: spatialTopDownOut and temporalTopDownOut should be the same " "size")
def _convertNonNumericData(self, spatialOutput, temporalOutput, output): """ Converts all of the non-numeric fields from spatialOutput and temporalOutput into their scalar equivalents and records them in the output dictionary. :param spatialOutput: The results of topDownCompute() for the spatial input. :param temporalOutput: The results of topDownCompute() for the temporal input. :param output: The main dictionary of outputs passed to compute(). It is expected to have keys 'spatialTopDownOut' and 'temporalTopDownOut' that are mapped to numpy arrays. """ encoders = self.encoder.getEncoderList() types = self.encoder.getDecoderOutputFieldTypes() for i, (encoder, type) in enumerate(zip(encoders, types)): spatialData = spatialOutput[i] temporalData = temporalOutput[i] if type != FieldMetaType.integer and type != FieldMetaType.float: # TODO: Make sure that this doesn't modify any state spatialData = encoder.getScalars(spatialData)[0] temporalData = encoder.getScalars(temporalData)[0] assert isinstance(spatialData, (float, int)) assert isinstance(temporalData, (float, int)) output['spatialTopDownOut'][i] = spatialData output['temporalTopDownOut'][i] = temporalData
def getOutputElementCount(self, name): """ Computes the width of dataOut. Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getOutputElementCount`. """ if name == "resetOut": print ("WARNING: getOutputElementCount should not have been called with " "resetOut") return 1 elif name == "sequenceIdOut": print ("WARNING: getOutputElementCount should not have been called with " "sequenceIdOut") return 1 elif name == "dataOut": if self.encoder is None: raise Exception("NuPIC requested output element count for 'dataOut' " "on a RecordSensor node, but the encoder has not " "been set") return self.encoder.getWidth() elif name == "sourceOut": if self.encoder is None: raise Exception("NuPIC requested output element count for 'sourceOut' " "on a RecordSensor node, " "but the encoder has not been set") return len(self.encoder.getDescription()) elif name == "bucketIdxOut": return 1 elif name == "actValueOut": return 1 elif name == "categoryOut": return self.numCategories elif name == 'spatialTopDownOut' or name == 'temporalTopDownOut': if self.encoder is None: raise Exception("NuPIC requested output element count for 'sourceOut' " "on a RecordSensor node, " "but the encoder has not been set") return len(self.encoder.getDescription()) else: raise Exception("Unknown output %s" % name)
def setParameter(self, parameterName, index, parameterValue): """ Set the value of a Spec parameter. Most parameters are handled automatically by PyRegion's parameter set mechanism. The ones that need special treatment are explicitly handled here. """ if parameterName == 'topDownMode': self.topDownMode = parameterValue elif parameterName == 'predictedField': self.predictedField = parameterValue else: raise Exception('Unknown parameter: ' + parameterName)
def writeToProto(self, proto): """ Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.writeToProto`. """ self.encoder.write(proto.encoder) if self.disabledEncoder is not None: self.disabledEncoder.write(proto.disabledEncoder) proto.topDownMode = int(self.topDownMode) proto.verbosity = self.verbosity proto.numCategories = self.numCategories
def readFromProto(cls, proto): """ Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.readFromProto`. """ instance = cls() instance.encoder = MultiEncoder.read(proto.encoder) if proto.disabledEncoder is not None: instance.disabledEncoder = MultiEncoder.read(proto.disabledEncoder) instance.topDownMode = bool(proto.topDownMode) instance.verbosity = proto.verbosity instance.numCategories = proto.numCategories return instance
def computeAccuracy(model, size, top): """ Compute prediction accuracy by checking if the next page in the sequence is within the top N predictions calculated by the model Args: model: HTM model size: Sample size top: top N predictions to use Returns: Probability the next page in the sequence is within the top N predicted pages """ accuracy = [] # Load MSNBC web data file filename = os.path.join(os.path.dirname(__file__), "msnbc990928.zip") with zipfile.ZipFile(filename) as archive: with archive.open("msnbc990928.seq") as datafile: # Skip header lines (first 7 lines) for _ in xrange(7): next(datafile) # Skip learning data and compute accuracy using only new sessions for _ in xrange(LEARNING_RECORDS): next(datafile) # Compute prediction accuracy by checking if the next page in the sequence # is within the top N predictions calculated by the model for _ in xrange(size): pages = readUserSession(datafile) model.resetSequenceStates() for i in xrange(len(pages) - 1): result = model.run({"page": pages[i]}) inferences = result.inferences["multiStepPredictions"][1] # Get top N predictions for the next page predicted = sorted(inferences.items(), key=itemgetter(1), reverse=True)[:top] # Check if the next page is within the predicted pages accuracy.append(1 if pages[i + 1] in zip(*predicted)[0] else 0) return np.mean(accuracy)
def readUserSession(datafile): """ Reads the user session record from the file's cursor position Args: datafile: Data file whose cursor points at the beginning of the record Returns: list of pages in the order clicked by the user """ for line in datafile: pages = line.split() total = len(pages) # Select user sessions with 2 or more pages if total < 2: continue # Exclude outliers by removing extreme long sessions if total > 500: continue return [PAGE_CATEGORIES[int(i) - 1] for i in pages] return []
def rewind(self): """ Put us back at the beginning of the file again. """ # Superclass rewind super(FileRecordStream, self).rewind() self.close() self._file = open(self._filename, self._mode) self._reader = csv.reader(self._file, dialect="excel") # Skip header rows self._reader.next() self._reader.next() self._reader.next() # Reset record count, etc. self._recordCount = 0
def getNextRecord(self, useCache=True): """ Returns next available data record from the file. :returns: a data row (a list or tuple) if available; None, if no more records in the table (End of Stream - EOS); empty sequence (list or tuple) when timing out while waiting for the next record. """ assert self._file is not None assert self._mode == self._FILE_READ_MODE # Read the line try: line = self._reader.next() except StopIteration: if self.rewindAtEOF: if self._recordCount == 0: raise Exception("The source configured to reset at EOF but " "'%s' appears to be empty" % self._filename) self.rewind() line = self._reader.next() else: return None # Keep score of how many records were read self._recordCount += 1 # Split the line to text fields and convert each text field to a Python # object if value is missing (empty string) encode appropriately for # upstream consumers in the case of numeric types, this means replacing # missing data with a sentinel value for string type, we can leave the empty # string in place record = [] for i, f in enumerate(line): #print "DEBUG: Evaluating field @ index %s: %r" % (i, f) #sys.stdout.flush() if f in self._missingValues: record.append(SENTINEL_VALUE_FOR_MISSING_DATA) else: # either there is valid data, or the field is string type, # in which case the adapter does the right thing by default record.append(self._adapters[i](f)) return record
def appendRecord(self, record): """ Saves the record in the underlying csv file. :param record: a list of Python objects that will be string-ified """ assert self._file is not None assert self._mode == self._FILE_WRITE_MODE assert isinstance(record, (list, tuple)), \ "unexpected record type: " + repr(type(record)) assert len(record) == self._fieldCount, \ "len(record): %s, fieldCount: %s" % (len(record), self._fieldCount) # Write header if needed if self._recordCount == 0: # Write the header names, types, specials = zip(*self.getFields()) for line in names, types, specials: self._writer.writerow(line) # Keep track of sequences, make sure time flows forward self._updateSequenceInfo(record) line = [self._adapters[i](f) for i, f in enumerate(record)] self._writer.writerow(line) self._recordCount += 1
def appendRecords(self, records, progressCB=None): """ Saves multiple records in the underlying storage. :param records: array of records as in :meth:`~.FileRecordStream.appendRecord` :param progressCB: (function) callback to report progress """ for record in records: self.appendRecord(record) if progressCB is not None: progressCB()
def getBookmark(self): """ Gets a bookmark or anchor to the current position. :returns: an anchor to the current position in the data. Passing this anchor to a constructor makes the current position to be the first returned record. """ if self._write and self._recordCount==0: return None rowDict = dict(filepath=os.path.realpath(self._filename), currentRow=self._recordCount) return json.dumps(rowDict)
def seekFromEnd(self, numRecords): """ Seeks to ``numRecords`` from the end and returns a bookmark to the new position. :param numRecords: how far to seek from end of file. :return: bookmark to desired location. """ self._file.seek(self._getTotalLineCount() - numRecords) return self.getBookmark()
def getStats(self): """ Parse the file using dedicated reader and collect fields stats. Never called if user of :class:`~.FileRecordStream` does not invoke :meth:`~.FileRecordStream.getStats` method. :returns: a dictionary of stats. In the current implementation, min and max fields are supported. Example of the return dictionary is: .. code-block:: python { 'min' : [f1_min, f2_min, None, None, fn_min], 'max' : [f1_max, f2_max, None, None, fn_max] } (where fx_min/fx_max are set for scalar fields, or None if not) """ # Collect stats only once per File object, use fresh csv iterator # to keep the next() method returning sequential records no matter when # caller asks for stats if self._stats == None: # Stats are only available when reading csv file assert self._mode == self._FILE_READ_MODE inFile = open(self._filename, self._FILE_READ_MODE) # Create a new reader; read names, types, specials reader = csv.reader(inFile, dialect="excel") names = [n.strip() for n in reader.next()] types = [t.strip() for t in reader.next()] # Skip over specials reader.next() # Initialize stats to all None self._stats = dict() self._stats['min'] = [] self._stats['max'] = [] for i in xrange(len(names)): self._stats['min'].append(None) self._stats['max'].append(None) # Read the file, collect stats while True: try: line = reader.next() for i, f in enumerate(line): if (len(types) > i and types[i] in [FieldMetaType.integer, FieldMetaType.float] and f not in self._missingValues): value = self._adapters[i](f) if self._stats['max'][i] == None or \ self._stats['max'][i] < value: self._stats['max'][i] = value if self._stats['min'][i] == None or \ self._stats['min'][i] > value: self._stats['min'][i] = value except StopIteration: break return self._stats
def _updateSequenceInfo(self, r): """Keep track of sequence and make sure time goes forward Check if the current record is the beginning of a new sequence A new sequence starts in 2 cases: 1. The sequence id changed (if there is a sequence id field) 2. The reset field is 1 (if there is a reset field) Note that if there is no sequenceId field or resetId field then the entire dataset is technically one big sequence. The function will not return True for the first record in this case. This is Ok because it is important to detect new sequences only when there are multiple sequences in the file. """ # Get current sequence id (if any) newSequence = False sequenceId = (r[self._sequenceIdIdx] if self._sequenceIdIdx is not None else None) if sequenceId != self._currSequence: # verify that the new sequence didn't show up before if sequenceId in self._sequences: raise Exception('Broken sequence: %s, record: %s' % \ (sequenceId, r)) # add the finished sequence to the set of sequence self._sequences.add(self._currSequence) self._currSequence = sequenceId # Verify that the reset is consistent (if there is one) if self._resetIdx: assert r[self._resetIdx] == 1 newSequence = True else: # Check the reset reset = False if self._resetIdx: reset = r[self._resetIdx] if reset == 1: newSequence = True # If it's still the same old sequence make sure the time flows forward if not newSequence: if self._timeStampIdx and self._currTime is not None: t = r[self._timeStampIdx] if t < self._currTime: raise Exception('No time travel. Early timestamp for record: %s' % r) if self._timeStampIdx: self._currTime = r[self._timeStampIdx]
def _getStartRow(self, bookmark): """ Extracts start row from the bookmark information """ bookMarkDict = json.loads(bookmark) realpath = os.path.realpath(self._filename) bookMarkFile = bookMarkDict.get('filepath', None) if bookMarkFile != realpath: print ("Ignoring bookmark due to mismatch between File's " "filename realpath vs. bookmark; realpath: %r; bookmark: %r") % ( realpath, bookMarkDict) return 0 else: return bookMarkDict['currentRow']
def _getTotalLineCount(self): """ Returns: count of ALL lines in dataset, including header lines """ # Flush the file before we open it again to count lines if self._mode == self._FILE_WRITE_MODE: self._file.flush() return sum(1 for line in open(self._filename, self._FILE_READ_MODE))
def getDataRowCount(self): """ :returns: (int) count of data rows in dataset (excluding header lines) """ numLines = self._getTotalLineCount() if numLines == 0: # this may be the case in a file opened for write before the # header rows are written out assert self._mode == self._FILE_WRITE_MODE and self._recordCount == 0 numDataRows = 0 else: numDataRows = numLines - self._NUM_HEADER_ROWS assert numDataRows >= 0 return numDataRows
def runIoThroughNupic(inputData, model, gymName, plot): """ Handles looping over the input data and passing each row into the given model object, as well as extracting the result object and passing it into an output handler. :param inputData: file path to input data CSV :param model: OPF Model object :param gymName: Gym name, used for output handler naming :param plot: Whether to use matplotlib or not. If false, uses file output. """ inputFile = open(inputData, "rb") csvReader = csv.reader(inputFile) # skip header rows csvReader.next() csvReader.next() csvReader.next() shifter = InferenceShifter() if plot: output = nupic_anomaly_output.NuPICPlotOutput(gymName) else: output = nupic_anomaly_output.NuPICFileOutput(gymName) counter = 0 for row in csvReader: counter += 1 if (counter % 100 == 0): print "Read %i lines..." % counter timestamp = datetime.datetime.strptime(row[0], DATE_FORMAT) consumption = float(row[1]) result = model.run({ "timestamp": timestamp, "kw_energy_consumption": consumption }) if plot: result = shifter.shift(result) prediction = result.inferences["multiStepBestPredictions"][1] anomalyScore = result.inferences["anomalyScore"] output.write(timestamp, consumption, prediction, anomalyScore) inputFile.close() output.close()
def topDownCompute(self, encoded): """[ScalarEncoder class method override]""" #Decode to delta scalar if self._prevAbsolute==None or self._prevDelta==None: return [EncoderResult(value=0, scalar=0, encoding=numpy.zeros(self.n))] ret = self._adaptiveScalarEnc.topDownCompute(encoded) if self._prevAbsolute != None: ret = [EncoderResult(value=ret[0].value+self._prevAbsolute, scalar=ret[0].scalar+self._prevAbsolute, encoding=ret[0].encoding)] # ret[0].value+=self._prevAbsolute # ret[0].scalar+=self._prevAbsolute return ret
def isTemporal(inferenceElement): """ Returns True if the inference from this timestep is predicted the input for the NEXT timestep. NOTE: This should only be checked IF THE MODEL'S INFERENCE TYPE IS ALSO TEMPORAL. That is, a temporal model CAN have non-temporal inference elements, but a non-temporal model CANNOT have temporal inference elements """ if InferenceElement.__temporalInferenceElements is None: InferenceElement.__temporalInferenceElements = \ set([InferenceElement.prediction]) return inferenceElement in InferenceElement.__temporalInferenceElements
def getTemporalDelay(inferenceElement, key=None): """ Returns the number of records that elapse between when an inference is made and when the corresponding input record will appear. For example, a multistep prediction for 3 timesteps out will have a delay of 3 Parameters: ----------------------------------------------------------------------- inferenceElement: The InferenceElement value being delayed key: If the inference is a dictionary type, this specifies key for the sub-inference that is being delayed """ # ----------------------------------------------------------------------- # For next step prediction, we shift by 1 if inferenceElement in (InferenceElement.prediction, InferenceElement.encodings): return 1 # ----------------------------------------------------------------------- # For classification, anomaly scores, the inferences immediately succeed the # inputs if inferenceElement in (InferenceElement.anomalyScore, InferenceElement.anomalyLabel, InferenceElement.classification, InferenceElement.classConfidences): return 0 # ----------------------------------------------------------------------- # For multistep prediction, the delay is based on the key in the inference # dictionary if inferenceElement in (InferenceElement.multiStepPredictions, InferenceElement.multiStepBestPredictions): return int(key) # ----------------------------------------------------------------------- # default: return 0 return 0
def getMaxDelay(inferences): """ Returns the maximum delay for the InferenceElements in the inference dictionary Parameters: ----------------------------------------------------------------------- inferences: A dictionary where the keys are InferenceElements """ maxDelay = 0 for inferenceElement, inference in inferences.iteritems(): if isinstance(inference, dict): for key in inference.iterkeys(): maxDelay = max(InferenceElement.getTemporalDelay(inferenceElement, key), maxDelay) else: maxDelay = max(InferenceElement.getTemporalDelay(inferenceElement), maxDelay) return maxDelay
def isTemporal(inferenceType): """ Returns True if the inference type is 'temporal', i.e. requires a temporal memory in the network. """ if InferenceType.__temporalInferenceTypes is None: InferenceType.__temporalInferenceTypes = \ set([InferenceType.TemporalNextStep, InferenceType.TemporalClassification, InferenceType.TemporalAnomaly, InferenceType.TemporalMultiStep, InferenceType.NontemporalMultiStep]) return inferenceType in InferenceType.__temporalInferenceTypes
def Enum(*args, **kwargs): """ Utility function for creating enumerations in python Example Usage: >> Color = Enum("Red", "Green", "Blue", "Magenta") >> print Color.Red >> 0 >> print Color.Green >> 1 >> print Color.Blue >> 2 >> print Color.Magenta >> 3 >> Color.Violet >> 'violet' >> Color.getLabel(Color.Red) >> 'Red' >> Color.getLabel(2) >> 'Blue' """ def getLabel(cls, val): """ Get a string label for the current value of the enum """ return cls.__labels[val] def validate(cls, val): """ Returns True if val is a valid value for the enumeration """ return val in cls.__values def getValues(cls): """ Returns a list of all the possible values for this enum """ return list(cls.__values) def getLabels(cls): """ Returns a list of all possible labels for this enum """ return list(cls.__labels.values()) def getValue(cls, label): """ Returns value given a label """ return cls.__labels[label] for arg in list(args)+kwargs.keys(): if type(arg) is not str: raise TypeError("Enum arg {0} must be a string".format(arg)) if not __isidentifier(arg): raise ValueError("Invalid enum value '{0}'. "\ "'{0}' is not a valid identifier".format(arg)) #kwargs.update(zip(args, range(len(args)))) kwargs.update(zip(args, args)) newType = type("Enum", (object,), kwargs) newType.__labels = dict( (v,k) for k,v in kwargs.iteritems()) newType.__values = set(newType.__labels.keys()) newType.getLabel = functools.partial(getLabel, newType) newType.validate = functools.partial(validate, newType) newType.getValues = functools.partial(getValues, newType) newType.getLabels = functools.partial(getLabels, newType) newType.getValue = functools.partial(getValue, newType) return newType
def makeDirectoryFromAbsolutePath(absDirPath): """ Makes directory for the given directory path with default permissions. If the directory already exists, it is treated as success. absDirPath: absolute path of the directory to create. Returns: absDirPath arg Exceptions: OSError if directory creation fails """ assert os.path.isabs(absDirPath) try: os.makedirs(absDirPath) except OSError, e: if e.errno != os.errno.EEXIST: raise return absDirPath
def _readConfigFile(cls, filename, path=None): """ Parse the given XML file and return a dict describing the file. Parameters: ---------------------------------------------------------------- filename: name of XML file to parse (no path) path: path of the XML file. If None, then use the standard configuration search path. retval: returns a dict with each property as a key and a dict of all the property's attributes as value """ outputProperties = dict() # Get the path to the config files. if path is None: filePath = cls.findConfigFile(filename) else: filePath = os.path.join(path, filename) # ------------------------------------------------------------------ # Read in the config file try: if filePath is not None: try: # Use warn since console log level is set to warning _getLoggerBase().debug("Loading config file: %s", filePath) with open(filePath, 'r') as inp: contents = inp.read() except Exception: raise RuntimeError("Expected configuration file at %s" % filePath) else: # If the file was not found in the normal search paths, which includes # checking the NTA_CONF_PATH, we'll try loading it from pkg_resources. try: contents = resource_string("nupic.support", filename) except Exception as resourceException: # We expect these to be read, and if they don't exist we'll just use # an empty configuration string. if filename in [USER_CONFIG, CUSTOM_CONFIG]: contents = '<configuration/>' else: raise resourceException elements = ElementTree.XML(contents) if elements.tag != 'configuration': raise RuntimeError("Expected top-level element to be 'configuration' " "but got '%s'" % (elements.tag)) # ------------------------------------------------------------------ # Add in each property found propertyElements = elements.findall('./property') for propertyItem in propertyElements: propInfo = dict() # Parse this property element propertyAttributes = list(propertyItem) for propertyAttribute in propertyAttributes: propInfo[propertyAttribute.tag] = propertyAttribute.text # Get the name name = propInfo.get('name', None) # value is allowed to be empty string if 'value' in propInfo and propInfo['value'] is None: value = '' else: value = propInfo.get('value', None) if value is None: if 'novalue' in propInfo: # Placeholder "novalue" properties are intended to be overridden # via dynamic configuration or another configuration layer. continue else: raise RuntimeError("Missing 'value' element within the property " "element: => %s " % (str(propInfo))) # The value is allowed to contain substitution tags of the form # ${env.VARNAME}, which should be substituted with the corresponding # environment variable values restOfValue = value value = '' while True: # Find the beginning of substitution tag pos = restOfValue.find('${env.') if pos == -1: # No more environment variable substitutions value += restOfValue break # Append prefix to value accumulator value += restOfValue[0:pos] # Find the end of current substitution tag varTailPos = restOfValue.find('}', pos) if varTailPos == -1: raise RuntimeError( "Trailing environment variable tag delimiter '}'" " not found in %r" % (restOfValue)) # Extract environment variable name from tag varname = restOfValue[pos + 6:varTailPos] if varname not in os.environ: raise RuntimeError("Attempting to use the value of the environment" " variable %r, which is not defined" % ( varname)) envVarValue = os.environ[varname] value += envVarValue restOfValue = restOfValue[varTailPos + 1:] # Check for errors if name is None: raise RuntimeError( "Missing 'name' element within following property " "element:\n => %s " % (str(propInfo))) propInfo['value'] = value outputProperties[name] = propInfo return outputProperties except Exception: _getLoggerBase().exception("Error while parsing configuration file: %s.", filePath) raise