Search is not available for this dataset
text
stringlengths
75
104k
def next(self, record, curInputBookmark): """ Return the next aggregated record, if any Parameters: ------------------------------------------------------------------------ record: The input record (values only) from the input source, or None if the input has reached EOF (this will cause this method to force completion of and return any partially aggregated time period) curInputBookmark: The bookmark to the next input record retval: (outputRecord, inputBookmark) outputRecord: the aggregated record inputBookmark: a bookmark to the last position from the input that contributed to this aggregated record. If we don't have any aggregated records yet, returns (None, None) The caller should generally do a loop like this: while True: inRecord = reader.getNextRecord() bookmark = reader.getBookmark() (aggRecord, aggBookmark) = aggregator.next(inRecord, bookmark) # reached EOF? if inRecord is None and aggRecord is None: break if aggRecord is not None: proessRecord(aggRecord, aggBookmark) This method makes use of the self._slice member variable to build up the values we need to aggregate. This is a dict of lists. The keys are the field indices and the elements of each list are the values for that field. For example: self._siice = { 0: [42, 53], 1: [4.0, 5.1] } """ # This will hold the aggregated record we return outRecord = None # This will hold the bookmark of the last input used within the # aggregated record we return. retInputBookmark = None if record is not None: # Increment input count self._inIdx += 1 #print self._inIdx, record # Apply the filter, ignore the record if any field is unacceptable if self._filter != None and not self._filter[0](self._filter[1], record): return (None, None) # If no aggregation info just return as-is if self._nullAggregation: return (record, curInputBookmark) # ---------------------------------------------------------------------- # Do aggregation # # Remember the very first record time stamp - it will be used as # the timestamp for all first records in all sequences to align # times for the aggregation/join of sequences. # # For a set of aggregated records, it will use the beginning of the time # window as a timestamp for the set # t = record[self._timeFieldIdx] if self._firstSequenceStartTime == None: self._firstSequenceStartTime = t # Create initial startTime and endTime if needed if self._startTime is None: self._startTime = t if self._endTime is None: self._endTime = self._getEndTime(t) assert self._endTime > t #print 'Processing line:', i, t, endTime #from dbgp.client import brk; brk(port=9011) # ---------------------------------------------------------------------- # Does this record have a reset signal or sequence Id associated with it? # If so, see if we've reached a sequence boundary if self._resetFieldIdx is not None: resetSignal = record[self._resetFieldIdx] else: resetSignal = None if self._sequenceIdFieldIdx is not None: currSequenceId = record[self._sequenceIdFieldIdx] else: currSequenceId = None newSequence = (resetSignal == 1 and self._inIdx > 0) \ or self._sequenceId != currSequenceId \ or self._inIdx == 0 if newSequence: self._sequenceId = currSequenceId # -------------------------------------------------------------------- # We end the aggregation chunk if we go past the end time # -OR- we get an out of order record (t < startTime) sliceEnded = (t >= self._endTime or t < self._startTime) # ------------------------------------------------------------------- # Time to generate a new output record? if (newSequence or sliceEnded) and len(self._slice) > 0: # Create aggregated record # print 'Creating aggregate record...' # Make first record timestamp as the beginning of the time period, # in case the first record wasn't falling on the beginning of the period for j, f in enumerate(self._fields): index = f[0] if index == self._timeFieldIdx: self._slice[j][0] = self._startTime break # Generate the aggregated record outRecord = self._createAggregateRecord() retInputBookmark = self._aggrInputBookmark # Reset the slice self._slice = defaultdict(list) # -------------------------------------------------------------------- # Add current record to slice (Note keeping slices in memory). Each # field in the slice is a list of field values from all the sliced # records for j, f in enumerate(self._fields): index = f[0] # append the parsed field value to the proper aggregated slice field. self._slice[j].append(record[index]) self._aggrInputBookmark = curInputBookmark # -------------------------------------------------------------------- # If we've encountered a new sequence, start aggregation over again if newSequence: # TODO: May use self._firstSequenceStartTime as a start for the new # sequence (to align all sequences) self._startTime = t self._endTime = self._getEndTime(t) # -------------------------------------------------------------------- # If a slice just ended, re-compute the start and end time for the # next aggregated record if sliceEnded: # Did we receive an out of order record? If so, go back and iterate # till we get to the next end time boundary. if t < self._startTime: self._endTime = self._firstSequenceStartTime while t >= self._endTime: self._startTime = self._endTime self._endTime = self._getEndTime(self._endTime) # If we have a record to return, do it now if outRecord is not None: return (outRecord, retInputBookmark) # --------------------------------------------------------------------- # Input reached EOF # Aggregate one last time in the end if necessary elif self._slice: # Make first record timestamp as the beginning of the time period, # in case the first record wasn't falling on the beginning of the period for j, f in enumerate(self._fields): index = f[0] if index == self._timeFieldIdx: self._slice[j][0] = self._startTime break outRecord = self._createAggregateRecord() retInputBookmark = self._aggrInputBookmark self._slice = defaultdict(list) # Return aggregated record return (outRecord, retInputBookmark)
def processClubAttendance(f, clubs): """Process the attendance data of one club If the club already exists in the list update its data. If the club is new create a new Club object and add it to the dict The next step is to iterate over all the lines and add a record for each line. When reaching an empty line it means there are no more records for this club. Along the way some redundant lines are skipped. When the file ends the f.next() call raises a StopIteration exception and that's the sign to return False, which indicates to the caller that there are no more clubs to process. """ try: # Skip as many empty lines as necessary (file format inconsistent) line = f.next() while line == ',,,,,,,,,,,,,,,,,,,\n': line = f.next() # The first non-empty line should have the name as the first field name = line.split(',')[0] # Create a new club object if needed if name not in clubs: clubs[name] = Club(name) # Get the named club c = clubs[name] c.processAttendance(f) return True except StopIteration: return False
def processClubConsumption(f, clubs): """Process the consumption a club - Skip the header line - Iterate over lines - Read 4 records at a time - Parse each line: club, date, time, consumption - Get club object from dictionary if needed - Aggregate consumption - Call club.processConsumption() with data """ try: # Skip header line line = f.next() assert line.endswith('" ","SITE_LOCATION_NAME","TIMESTAMP","TOTAL_KWH"\n') valid_times = range(24) t = 0 # used to track time club = None clubName = None lastDate = None while True: assert t in valid_times consumption = 0 for x in range(4): # Read the line and get rid of the newline character line = f.next()[:-1] fields = line.split(',') assert len(fields) == 4 for i, field in enumerate(fields): # Strip the redundant double quotes assert field[0] == '"' and field[-1] == '"' fields[i] = field[1:-1] # Ignoring field 0, which is just a running count # Get the club name name = fields[1] # Hack to fix inconsistent club names like: "Melbourne CBD - Melbourne Central" vs. "Melbourne Central" partialNames = ('Melbourne Central', 'North Sydney', 'Park St', 'Pitt St') for pn in partialNames: if pn in name: name = pn # Locate the club if needed (maybe ) if name != clubName: clubName = name club = clubs[name] # Split the date (time is counted using the t variable) tokens = fields[2].split() # Verify that t == 0 and consumption == 0 when there is no time in the file if len(tokens) == 1: assert consumption == 0 and t == 0 # The first (and sometimes only) token is the date date = tokens[0] # Aggregate the consumption consumption += float(fields[3]) # Update the Club object after aggregating the consumption of 4 lines club.updateRecord(date, t, consumption) # Increment time t += 1 t %= 24 except StopIteration: return
def run(self, inputRecord): """ Run one iteration of this model. :param inputRecord: (object) A record object formatted according to :meth:`~nupic.data.record_stream.RecordStreamIface.getNextRecord` or :meth:`~nupic.data.record_stream.RecordStreamIface.getNextRecordDict` result format. :returns: (:class:`~nupic.frameworks.opf.opf_utils.ModelResult`) An ModelResult namedtuple. The contents of ModelResult.inferences depends on the the specific inference type of this model, which can be queried by :meth:`.getInferenceType`. """ # 0-based prediction index for ModelResult predictionNumber = self._numPredictions self._numPredictions += 1 result = opf_utils.ModelResult(predictionNumber=predictionNumber, rawInput=inputRecord) return result
def _getModelCheckpointFilePath(checkpointDir): """ Return the absolute path of the model's checkpoint file. :param checkpointDir: (string) Directory of where the experiment is to be or was saved :returns: (string) An absolute path. """ path = os.path.join(checkpointDir, "model.data") path = os.path.abspath(path) return path
def writeToCheckpoint(self, checkpointDir): """Serializes model using capnproto and writes data to ``checkpointDir``""" proto = self.getSchema().new_message() self.write(proto) checkpointPath = self._getModelCheckpointFilePath(checkpointDir) # Clean up old saved state, if any if os.path.exists(checkpointDir): if not os.path.isdir(checkpointDir): raise Exception(("Existing filesystem entry <%s> is not a model" " checkpoint -- refusing to delete (not a directory)") \ % checkpointDir) if not os.path.isfile(checkpointPath): raise Exception(("Existing filesystem entry <%s> is not a model" " checkpoint -- refusing to delete"\ " (%s missing or not a file)") % \ (checkpointDir, checkpointPath)) shutil.rmtree(checkpointDir) # Create a new directory for saving state self.__makeDirectoryFromAbsolutePath(checkpointDir) with open(checkpointPath, 'wb') as f: proto.write(f)
def readFromCheckpoint(cls, checkpointDir): """Deserializes model from checkpointDir using capnproto""" checkpointPath = cls._getModelCheckpointFilePath(checkpointDir) with open(checkpointPath, 'r') as f: proto = cls.getSchema().read(f, traversal_limit_in_words=_TRAVERSAL_LIMIT_IN_WORDS) model = cls.read(proto) return model
def writeBaseToProto(self, proto): """Save the state maintained by the Model base class :param proto: capnp ModelProto message builder """ inferenceType = self.getInferenceType() # lower-case first letter to be compatible with capnproto enum naming inferenceType = inferenceType[:1].lower() + inferenceType[1:] proto.inferenceType = inferenceType proto.numPredictions = self._numPredictions proto.learningEnabled = self.__learningEnabled proto.inferenceEnabled = self.__inferenceEnabled proto.inferenceArgs = json.dumps(self.__inferenceArgs)
def save(self, saveModelDir): """ Save the model in the given directory. :param saveModelDir: (string) Absolute directory path for saving the model. This directory should only be used to store a saved model. If the directory does not exist, it will be created automatically and populated with model data. A pre-existing directory will only be accepted if it contains previously saved model data. If such a directory is given, the full contents of the directory will be deleted and replaced with current model data. """ logger = self._getLogger() logger.debug("(%s) Creating local checkpoint in %r...", self, saveModelDir) modelPickleFilePath = self._getModelPickleFilePath(saveModelDir) # Clean up old saved state, if any if os.path.exists(saveModelDir): if not os.path.isdir(saveModelDir): raise Exception(("Existing filesystem entry <%s> is not a model" " checkpoint -- refusing to delete (not a directory)") \ % saveModelDir) if not os.path.isfile(modelPickleFilePath): raise Exception(("Existing filesystem entry <%s> is not a model" " checkpoint -- refusing to delete"\ " (%s missing or not a file)") % \ (saveModelDir, modelPickleFilePath)) shutil.rmtree(saveModelDir) # Create a new directory for saving state self.__makeDirectoryFromAbsolutePath(saveModelDir) with open(modelPickleFilePath, 'wb') as modelPickleFile: logger.debug("(%s) Pickling Model instance...", self) pickle.dump(self, modelPickleFile, protocol=pickle.HIGHEST_PROTOCOL) logger.debug("(%s) Finished pickling Model instance", self) # Tell the model to save extra data, if any, that's too big for pickling self._serializeExtraData(extraDataDir=self._getModelExtraDataDir(saveModelDir)) logger.debug("(%s) Finished creating local checkpoint", self) return
def load(cls, savedModelDir): """ Load saved model. :param savedModelDir: (string) Directory of where the experiment is to be or was saved :returns: (:class:`Model`) The loaded model instance """ logger = opf_utils.initLogger(cls) logger.debug("Loading model from local checkpoint at %r...", savedModelDir) # Load the model modelPickleFilePath = Model._getModelPickleFilePath(savedModelDir) with open(modelPickleFilePath, 'rb') as modelPickleFile: logger.debug("Unpickling Model instance...") model = pickle.load(modelPickleFile) logger.debug("Finished unpickling Model instance") # Tell the model to load extra data, if any, that was too big for pickling model._deSerializeExtraData( extraDataDir=Model._getModelExtraDataDir(savedModelDir)) logger.debug("Finished Loading model from local checkpoint") return model
def _getModelPickleFilePath(saveModelDir): """ Return the absolute path of the model's pickle file. :param saveModelDir: (string) Directory of where the experiment is to be or was saved :returns: (string) An absolute path. """ path = os.path.join(saveModelDir, "model.pkl") path = os.path.abspath(path) return path
def _getModelExtraDataDir(saveModelDir): """ Return the absolute path to the directory where the model's own "extra data" are stored (i.e., data that's too big for pickling). :param saveModelDir: (string) Directory of where the experiment is to be or was saved :returns: (string) An absolute path. """ path = os.path.join(saveModelDir, "modelextradata") path = os.path.abspath(path) return path
def runExperiment(args, model=None): """ Run a single OPF experiment. .. note:: The caller is responsible for initializing python logging before calling this function (e.g., import :mod:`nupic.support`; :meth:`nupic.support.initLogging`) See also: :meth:`.initExperimentPrng`. :param args: (string) Experiment command-line args list. Too see all options, run with ``--help``: .. code-block:: text Options: -h, --help show this help message and exit -c <CHECKPOINT> Create a model and save it under the given <CHECKPOINT> name, but don't run it --listCheckpoints List all available checkpoints --listTasks List all task labels in description.py --load=<CHECKPOINT> Load a model from the given <CHECKPOINT> and run it. Run with --listCheckpoints flag for more details. --newSerialization Use new capnproto serialization --tasks Run the tasks with the given TASK LABELS in the order they are given. Either end of arg-list, or a standalone dot ('.') arg or the next short or long option name (-a or --blah) terminates the list. NOTE: FAILS TO RECOGNIZE task label names with one or more leading dashes. [default: run all of the tasks in description.py] --testMode Reduce iteration count for testing --noCheckpoint Don't checkpoint the model after running each task. :param model: (:class:`~nupic.frameworks.opf.model.Model`) For testing, may pass in an existing OPF Model to use instead of creating a new one. :returns: (:class:`~nupic.frameworks.opf.model.Model`) reference to OPF Model instance that was constructed (this is provided to aid with debugging) or None, if none was created. """ # Parse command-line options opt = _parseCommandLineOptions(args) #print "runExperiment: Parsed Command Options: ", opt model = _runExperimentImpl(opt, model) return model
def _parseCommandLineOptions(args): """Parse command line options Args: args: command line arguments (not including sys.argv[0]) Returns: namedtuple ParseCommandLineOptionsResult """ usageStr = ( "%prog [options] descriptionPyDirectory\n" "This script runs a single OPF Model described by description.py " "located in the given directory." ) parser = optparse.OptionParser(usage=usageStr) parser.add_option("-c", help="Create a model and save it under the given " "<CHECKPOINT> name, but don't run it", dest="createCheckpointName", action="store", type="string", default="", metavar="<CHECKPOINT>") parser.add_option("--listCheckpoints", help="List all available checkpoints", dest="listAvailableCheckpoints", action="store_true", default=False) parser.add_option("--listTasks", help="List all task labels in description.py", dest="listTasks", action="store_true", default=False) parser.add_option("--load", help="Load a model from the given <CHECKPOINT> and run it. " "Run with --listCheckpoints flag for more details. ", dest="runCheckpointName", action="store", type="string", default="", metavar="<CHECKPOINT>") parser.add_option("--newSerialization", help="Use new capnproto serialization", dest="newSerialization", action="store_true", default=False) #parser.add_option("--reuseDatasets", # help="Keep existing generated/aggregated datasets", # dest="reuseDatasets", action="store_true", # default=False) parser.add_option("--tasks", help="Run the tasks with the given TASK LABELS " "in the order they are given. Either end of " "arg-list, or a standalone dot ('.') arg or " "the next short or long option name (-a or " "--blah) terminates the list. NOTE: FAILS " "TO RECOGNIZE task label names with one or more " "leading dashes. [default: run all of the tasks in " "description.py]", dest="taskLabels", default=[], action="callback", callback=reapVarArgsCallback, metavar="TASK_LABELS") parser.add_option("--testMode", help="Reduce iteration count for testing", dest="testMode", action="store_true", default=False) parser.add_option("--noCheckpoint", help="Don't checkpoint the model after running each task.", dest="checkpointModel", action="store_false", default=True) options, experiments = parser.parse_args(args) # Validate args mutuallyExclusiveOptionCount = sum([bool(options.createCheckpointName), options.listAvailableCheckpoints, options.listTasks, bool(options.runCheckpointName)]) if mutuallyExclusiveOptionCount > 1: _reportCommandLineUsageErrorAndExit( parser, "Options: -c, --listCheckpoints, --listTasks, and --load are " "mutually exclusive. Please select only one") mutuallyExclusiveOptionCount = sum([bool(not options.checkpointModel), bool(options.createCheckpointName)]) if mutuallyExclusiveOptionCount > 1: _reportCommandLineUsageErrorAndExit( parser, "Options: -c and --noCheckpoint are " "mutually exclusive. Please select only one") if len(experiments) != 1: _reportCommandLineUsageErrorAndExit( parser, "Exactly ONE experiment must be specified, but got %s (%s)" % ( len(experiments), experiments)) # Done with parser parser.destroy() # Prepare results # Directory path of the experiment (that contain description.py) experimentDir = os.path.abspath(experiments[0]) # RunExperiment.py's private options (g_parsedPrivateCommandLineOptionsSchema) privateOptions = dict() privateOptions['createCheckpointName'] = options.createCheckpointName privateOptions['listAvailableCheckpoints'] = options.listAvailableCheckpoints privateOptions['listTasks'] = options.listTasks privateOptions['runCheckpointName'] = options.runCheckpointName privateOptions['newSerialization'] = options.newSerialization privateOptions['testMode'] = options.testMode #privateOptions['reuseDatasets'] = options.reuseDatasets privateOptions['taskLabels'] = options.taskLabels privateOptions['checkpointModel'] = options.checkpointModel result = ParseCommandLineOptionsResult(experimentDir=experimentDir, privateOptions=privateOptions) return result
def reapVarArgsCallback(option, optStr, value, parser): """Used as optparse callback for reaping a variable number of option args. The option may be specified multiple times, and all the args associated with that option name will be accumulated in the order that they are encountered """ newValues = [] # Reap the args, taking care to stop before the next option or '.' gotDot = False for arg in parser.rargs: # Stop on --longname options if arg.startswith("--") and len(arg) > 2: break # Stop on -b options if arg.startswith("-") and len(arg) > 1: break if arg == ".": gotDot = True break newValues.append(arg) if not newValues: raise optparse.OptionValueError( ("Empty arg list for option %r expecting one or more args " "(remaining tokens: %r)") % (optStr, parser.rargs)) del parser.rargs[:len(newValues) + int(gotDot)] # Retrieve the existing arg accumulator, if any value = getattr(parser.values, option.dest, []) #print "Previous value: %r" % value if value is None: value = [] # Append the new args to the existing ones and save to the parser value.extend(newValues) setattr(parser.values, option.dest, value)
def _reportCommandLineUsageErrorAndExit(parser, message): """Report usage error and exit program with error indication.""" print parser.get_usage() print message sys.exit(1)
def _runExperimentImpl(options, model=None): """Creates and runs the experiment Args: options: namedtuple ParseCommandLineOptionsResult model: For testing: may pass in an existing OPF Model instance to use instead of creating a new one. Returns: reference to OPFExperiment instance that was constructed (this is provided to aid with debugging) or None, if none was created. """ json_helpers.validate(options.privateOptions, schemaDict=g_parsedPrivateCommandLineOptionsSchema) # Load the experiment's description.py module experimentDir = options.experimentDir descriptionPyModule = helpers.loadExperimentDescriptionScriptFromDir( experimentDir) expIface = helpers.getExperimentDescriptionInterfaceFromModule( descriptionPyModule) # Handle "list checkpoints" request if options.privateOptions['listAvailableCheckpoints']: _printAvailableCheckpoints(experimentDir) return None # Load experiment tasks experimentTasks = expIface.getModelControl().get('tasks', []) # If the tasks list is empty, and this is a nupic environment description # file being run from the OPF, convert it to a simple OPF description file. if (len(experimentTasks) == 0 and expIface.getModelControl()['environment'] == OpfEnvironment.Nupic): expIface.convertNupicEnvToOPF() experimentTasks = expIface.getModelControl().get('tasks', []) # Ensures all the source locations are either absolute paths or relative to # the nupic.datafiles package_data location. expIface.normalizeStreamSources() # Extract option newSerialization = options.privateOptions['newSerialization'] # Handle listTasks if options.privateOptions['listTasks']: print "Available tasks:" for label in [t['taskLabel'] for t in experimentTasks]: print "\t", label return None # Construct the experiment instance if options.privateOptions['runCheckpointName']: assert model is None checkpointName = options.privateOptions['runCheckpointName'] model = ModelFactory.loadFromCheckpoint( savedModelDir=_getModelCheckpointDir(experimentDir, checkpointName), newSerialization=newSerialization) elif model is not None: print "Skipping creation of OPFExperiment instance: caller provided his own" else: modelDescription = expIface.getModelDescription() model = ModelFactory.create(modelDescription) # Handle "create model" request if options.privateOptions['createCheckpointName']: checkpointName = options.privateOptions['createCheckpointName'] _saveModel(model=model, experimentDir=experimentDir, checkpointLabel=checkpointName, newSerialization=newSerialization) return model # Build the task list # Default task execution index list is in the natural list order of the tasks taskIndexList = range(len(experimentTasks)) customTaskExecutionLabelsList = options.privateOptions['taskLabels'] if customTaskExecutionLabelsList: taskLabelsList = [t['taskLabel'] for t in experimentTasks] taskLabelsSet = set(taskLabelsList) customTaskExecutionLabelsSet = set(customTaskExecutionLabelsList) assert customTaskExecutionLabelsSet.issubset(taskLabelsSet), \ ("Some custom-provided task execution labels don't correspond " "to actual task labels: mismatched labels: %r; actual task " "labels: %r.") % (customTaskExecutionLabelsSet - taskLabelsSet, customTaskExecutionLabelsList) taskIndexList = [taskLabelsList.index(label) for label in customTaskExecutionLabelsList] print "#### Executing custom task list: %r" % [taskLabelsList[i] for i in taskIndexList] # Run all experiment tasks for taskIndex in taskIndexList: task = experimentTasks[taskIndex] # Create a task runner and run it! taskRunner = _TaskRunner(model=model, task=task, cmdOptions=options) taskRunner.run() del taskRunner if options.privateOptions['checkpointModel']: _saveModel(model=model, experimentDir=experimentDir, checkpointLabel=task['taskLabel'], newSerialization=newSerialization) return model
def _saveModel(model, experimentDir, checkpointLabel, newSerialization=False): """Save model""" checkpointDir = _getModelCheckpointDir(experimentDir, checkpointLabel) if newSerialization: model.writeToCheckpoint(checkpointDir) else: model.save(saveModelDir=checkpointDir)
def _getModelCheckpointDir(experimentDir, checkpointLabel): """Creates directory for serialization of the model checkpointLabel: Checkpoint label (string) Returns: absolute path to the serialization directory """ checkpointDir = os.path.join(getCheckpointParentDir(experimentDir), checkpointLabel + g_defaultCheckpointExtension) checkpointDir = os.path.abspath(checkpointDir) return checkpointDir
def getCheckpointParentDir(experimentDir): """Get checkpoint parent dir. Returns: absolute path to the base serialization directory within which model checkpoints for this experiment are created """ baseDir = os.path.join(experimentDir, "savedmodels") baseDir = os.path.abspath(baseDir) return baseDir
def _checkpointLabelFromCheckpointDir(checkpointDir): """Returns a checkpoint label string for the given model checkpoint directory checkpointDir: relative or absolute model checkpoint directory path """ assert checkpointDir.endswith(g_defaultCheckpointExtension) lastSegment = os.path.split(checkpointDir)[1] checkpointLabel = lastSegment[0:-len(g_defaultCheckpointExtension)] return checkpointLabel
def _isCheckpointDir(checkpointDir): """Return true iff checkpointDir appears to be a checkpoint directory.""" lastSegment = os.path.split(checkpointDir)[1] if lastSegment[0] == '.': return False if not checkpointDir.endswith(g_defaultCheckpointExtension): return False if not os.path.isdir(checkpointDir): return False return True
def _printAvailableCheckpoints(experimentDir): """List available checkpoints for the specified experiment.""" checkpointParentDir = getCheckpointParentDir(experimentDir) if not os.path.exists(checkpointParentDir): print "No available checkpoints." return checkpointDirs = [x for x in os.listdir(checkpointParentDir) if _isCheckpointDir(os.path.join(checkpointParentDir, x))] if not checkpointDirs: print "No available checkpoints." return print "Available checkpoints:" checkpointList = [_checkpointLabelFromCheckpointDir(x) for x in checkpointDirs] for checkpoint in sorted(checkpointList): print "\t", checkpoint print print "To start from a checkpoint:" print " python run_opf_experiment.py experiment --load <CHECKPOINT>" print "For example, to start from the checkpoint \"MyCheckpoint\":" print " python run_opf_experiment.py experiment --load MyCheckpoint"
def run(self): """Runs a single experiment task""" self.__logger.debug("run(): Starting task <%s>", self.__task['taskLabel']) # Set up the task # Create our main loop-control iterator if self.__cmdOptions.privateOptions['testMode']: numIters = 10 else: numIters = self.__task['iterationCount'] if numIters >= 0: iterTracker = iter(xrange(numIters)) else: iterTracker = iter(itertools.count()) # Initialize periodic activities periodic = PeriodicActivityMgr( requestedActivities=self._createPeriodicActivities()) # Reset sequence states in the model, so it starts looking for a new # sequence # TODO: should this be done in OPFTaskDriver.setup(), instead? Is it always # desired in Nupic? self.__model.resetSequenceStates() # Have Task Driver perform its initial setup activities, including setup # callbacks self.__taskDriver.setup() # Run it! while True: # Check controlling iterator first try: next(iterTracker) except StopIteration: break # Read next input record try: inputRecord = self.__datasetReader.next() except StopIteration: break # Process input record result = self.__taskDriver.handleInputRecord(inputRecord=inputRecord) if InferenceElement.encodings in result.inferences: result.inferences.pop(InferenceElement.encodings) self.__predictionLogger.writeRecord(result) # Run periodic activities periodic.tick() # Dump the experiment metrics at the end of the task self._getAndEmitExperimentMetrics(final=True) # Have Task Driver perform its final activities self.__taskDriver.finalize() # Reset sequence states in the model, so it starts looking for a new # sequence # TODO: should this be done in OPFTaskDriver.setup(), instead? Is it always # desired in Nupic? self.__model.resetSequenceStates()
def _createPeriodicActivities(self): """Creates and returns a list of activites for this TaskRunner instance Returns: a list of PeriodicActivityRequest elements """ # Initialize periodic activities periodicActivities = [] # Metrics reporting class MetricsReportCb(object): def __init__(self, taskRunner): self.__taskRunner = taskRunner return def __call__(self): self.__taskRunner._getAndEmitExperimentMetrics() reportMetrics = PeriodicActivityRequest( repeating=True, period=1000, cb=MetricsReportCb(self)) periodicActivities.append(reportMetrics) # Iteration progress class IterationProgressCb(object): PROGRESS_UPDATE_PERIOD_TICKS = 1000 def __init__(self, taskLabel, requestedIterationCount, logger): self.__taskLabel = taskLabel self.__requestedIterationCount = requestedIterationCount self.__logger = logger self.__numIterationsSoFar = 0 def __call__(self): self.__numIterationsSoFar += self.PROGRESS_UPDATE_PERIOD_TICKS self.__logger.debug("%s: ITERATION PROGRESS: %s of %s" % ( self.__taskLabel, self.__numIterationsSoFar, self.__requestedIterationCount)) iterationProgressCb = IterationProgressCb( taskLabel=self.__task['taskLabel'], requestedIterationCount=self.__task['iterationCount'], logger=self.__logger) iterationProgressReporter = PeriodicActivityRequest( repeating=True, period=IterationProgressCb.PROGRESS_UPDATE_PERIOD_TICKS, cb=iterationProgressCb) periodicActivities.append(iterationProgressReporter) return periodicActivities
def _generateFile(filename, data): """ Parameters: ---------------------------------------------------------------- filename: name of .csv file to generate """ # Create the file print "Creating %s..." % (filename) numRecords, numFields = data.shape fields = [('field%d'%(i+1), 'float', '') for i in range(numFields)] outFile = File(filename, fields) for i in xrange(numRecords): outFile.write(data[i].tolist()) outFile.close()
def corruptVector(v1, noiseLevel, numActiveCols): """ Corrupts a copy of a binary vector by inverting noiseLevel percent of its bits. @param v1 (array) binary vector whose copy will be corrupted @param noiseLevel (float) amount of noise to be applied on the new vector @param numActiveCols (int) number of sparse columns that represent an input @return v2 (array) corrupted binary vector """ size = len(v1) v2 = np.zeros(size, dtype="uint32") bitsToSwap = int(noiseLevel * numActiveCols) # Copy the contents of v1 into v2 for i in range(size): v2[i] = v1[i] for _ in range(bitsToSwap): i = random.randrange(size) if v2[i] == 1: v2[i] = 0 else: v2[i] = 1 return v2
def showPredictions(): """ Shows predictions of the TM when presented with the characters A, B, C, D, X, and Y without any contextual information, that is, not embedded within a sequence. """ for k in range(6): tm.reset() print "--- " + "ABCDXY"[k] + " ---" tm.compute(set(seqT[k][:].nonzero()[0].tolist()), learn=False) activeColumnsIndices = [tm.columnForCell(i) for i in tm.getActiveCells()] predictedColumnIndices = [tm.columnForCell(i) for i in tm.getPredictiveCells()] currentColumns = [1 if i in activeColumnsIndices else 0 for i in range(tm.numberOfColumns())] predictedColumns = [1 if i in predictedColumnIndices else 0 for i in range(tm.numberOfColumns())] print("Active cols: " + str(np.nonzero(currentColumns)[0])) print("Predicted cols: " + str(np.nonzero(predictedColumns)[0])) print ""
def trainTM(sequence, timeSteps, noiseLevel): """ Trains the TM with given sequence for a given number of time steps and level of input corruption @param sequence (array) array whose rows are the input characters @param timeSteps (int) number of time steps in which the TM will be presented with sequence @param noiseLevel (float) amount of noise to be applied on the characters in the sequence """ currentColumns = np.zeros(tm.numberOfColumns(), dtype="uint32") predictedColumns = np.zeros(tm.numberOfColumns(), dtype="uint32") ts = 0 for t in range(timeSteps): tm.reset() for k in range(4): v = corruptVector(sequence[k][:], noiseLevel, sparseCols) tm.compute(set(v[:].nonzero()[0].tolist()), learn=True) activeColumnsIndices = [tm.columnForCell(i) for i in tm.getActiveCells()] predictedColumnIndices = [tm.columnForCell(i) for i in tm.getPredictiveCells()] currentColumns = [1 if i in activeColumnsIndices else 0 for i in range(tm.numberOfColumns())] acc = accuracy(currentColumns, predictedColumns) x.append(ts) y.append(acc) ts += 1 predictedColumns = [1 if i in predictedColumnIndices else 0 for i in range(tm.numberOfColumns())]
def encodeIntoArray(self, inputVal, outputVal): """See method description in base.py""" if len(inputVal) != len(outputVal): raise ValueError("Different input (%i) and output (%i) sizes." % ( len(inputVal), len(outputVal))) if self.w is not None and sum(inputVal) != self.w: raise ValueError("Input has %i bits but w was set to %i." % ( sum(inputVal), self.w)) outputVal[:] = inputVal[:] if self.verbosity >= 2: print "input:", inputVal, "output:", outputVal print "decoded:", self.decodedToStr(self.decode(outputVal))
def decode(self, encoded, parentFieldName=""): """See the function description in base.py""" if parentFieldName != "": fieldName = "%s.%s" % (parentFieldName, self.name) else: fieldName = self.name return ({fieldName: ([[0, 0]], "input")}, [fieldName])
def getBucketInfo(self, buckets): """See the function description in base.py""" return [EncoderResult(value=0, scalar=0, encoding=numpy.zeros(self.n))]
def topDownCompute(self, encoded): """See the function description in base.py""" return EncoderResult(value=0, scalar=0, encoding=numpy.zeros(self.n))
def closenessScores(self, expValues, actValues, **kwargs): """ Does a bitwise compare of the two bitmaps and returns a fractonal value between 0 and 1 of how similar they are. - ``1`` => identical - ``0`` => no overlaping bits ``kwargs`` will have the keyword "fractional", which is assumed by this encoder. """ ratio = 1.0 esum = int(expValues.sum()) asum = int(actValues.sum()) if asum > esum: diff = asum - esum if diff < esum: ratio = 1 - diff/float(esum) else: ratio = 1/float(diff) olap = expValues & actValues osum = int(olap.sum()) if esum == 0: r = 0.0 else: r = osum/float(esum) r = r * ratio return numpy.array([r])
def getCallerInfo(depth=2): """Utility function to get information about function callers The information is the tuple (function/method name, filename, class) The class will be None if the caller is just a function and not an object method. :param depth: (int) how far back in the callstack to go to extract the caller info """ f = sys._getframe(depth) method_name = f.f_code.co_name filename = f.f_code.co_filename arg_class = None args = inspect.getargvalues(f) if len(args[0]) > 0: arg_name = args[0][0] # potentially the 'self' arg if its a method arg_class = args[3][arg_name].__class__.__name__ return (method_name, filename, arg_class)
def title(s=None, additional='', stream=sys.stdout): """Utility function to display nice titles It automatically extracts the name of the function/method it is called from and you can add additional text. title() will then print the name of the function/method and the additional text surrounded by tow lines of dashes. If you don't want the name of the function, you can provide alternative text (regardless of the additional text) :param s: (string) text to display, uses the function name and arguments by default :param additional: (string) extra text to display (not needed if s is not None) :param stream: (stream) the stream to print to. Ny default goes to standard output Examples: .. code-block:: python def foo(): title() will display: .. code-block:: text --- foo --- .. code-block:: python def foo(): title(additional='(), this is cool!!!') will display: .. code-block:: text ---------------------- foo(), this is cool!!! ---------------------- .. code-block:: python def foo(): title('No function name here!') will display: .. code-block:: text ---------------------- No function name here! ---------------------- """ if s is None: callable_name, file_name, class_name = getCallerInfo(2) s = callable_name if class_name is not None: s = class_name + '.' + callable_name lines = (s + additional).split('\n') length = max(len(line) for line in lines) print >> stream, '-' * length print >> stream, s + additional print >> stream, '-' * length
def getArgumentDescriptions(f): """ Get the arguments, default values, and argument descriptions for a function. Parses the argument descriptions out of the function docstring, using a format something lke this: :: [junk] argument_name: description... description... description... [junk] [more arguments] It will find an argument as long as the exact argument name starts the line. It will then strip a trailing colon, if present, then strip the rest of the line and use it to start the description. It will then strip and append any subsequent lines with a greater indent level than the original argument name. :param f: (function) to inspect :returns: (list of tuples) (``argName``, ``argDescription``, ``defaultValue``) If an argument has no default value, the tuple is only two elements long (as ``None`` cannot be used, since it could be a default value itself). """ # Get the argument names and default values argspec = inspect.getargspec(f) # Scan through the docstring to extract documentation for each argument as # follows: # Check the first word of the line, stripping a colon if one is present. # If it matches an argument name: # Take the rest of the line, stripping leading whitespeace # Take each subsequent line if its indentation level is greater than the # initial indentation level # Once the indentation level is back to the original level, look for # another argument docstring = f.__doc__ descriptions = {} if docstring: lines = docstring.split('\n') i = 0 while i < len(lines): stripped = lines[i].lstrip() if not stripped: i += 1 continue # Indentation level is index of the first character indentLevel = lines[i].index(stripped[0]) # Get the first word and remove the colon, if present firstWord = stripped.split()[0] if firstWord.endswith(':'): firstWord = firstWord[:-1] if firstWord in argspec.args: # Found an argument argName = firstWord restOfLine = stripped[len(firstWord)+1:].strip() argLines = [restOfLine] # Take the next lines as long as they are indented more i += 1 while i < len(lines): stripped = lines[i].lstrip() if not stripped: # Empty line - stop break if lines[i].index(stripped[0]) <= indentLevel: # No longer indented far enough - stop break # This line counts too argLines.append(lines[i].strip()) i += 1 # Store this description descriptions[argName] = ' '.join(argLines) else: # Not an argument i += 1 # Build the list of (argName, description, defaultValue) args = [] if argspec.defaults: defaultCount = len(argspec.defaults) else: defaultCount = 0 nonDefaultArgCount = len(argspec.args) - defaultCount for i, argName in enumerate(argspec.args): if i >= nonDefaultArgCount: defaultValue = argspec.defaults[i - nonDefaultArgCount] args.append((argName, descriptions.get(argName, ""), defaultValue)) else: args.append((argName, descriptions.get(argName, ""))) return args
def initLogging(verbose=False, console='stdout', consoleLevel='DEBUG'): """ Initilize NuPic logging by reading in from the logging configuration file. The logging configuration file is named ``nupic-logging.conf`` and is expected to be in the format defined by the python logging module. If the environment variable ``NTA_CONF_PATH`` is defined, then the logging configuration file is expected to be in the ``NTA_CONF_PATH`` directory. If ``NTA_CONF_PATH`` is not defined, then it is found in the 'conf/default' subdirectory of the NuPic installation directory (typically ~/nupic/current/conf/default) The logging configuration file can use the environment variable ``NTA_LOG_DIR`` to set the locations of log files. If this variable is not defined, logging to files will be disabled. :param console: Defines console output for the default "root" logging configuration; this may be one of 'stdout', 'stderr', or None; Use None to suppress console logging output :param consoleLevel: Logging-level filter string for console output corresponding to logging levels in the logging module; may be one of: 'DEBUG', 'INFO', 'WARNING', 'ERROR', or 'CRITICAL'. E.g., a value of'WARNING' suppresses DEBUG and INFO level output to console, but allows WARNING, ERROR, and CRITICAL """ # NOTE: If you call this twice from the same process there seems to be a # bug - logged messages don't show up for loggers that you do another # logging.getLogger() on. global gLoggingInitialized if gLoggingInitialized: if verbose: print >> sys.stderr, "Logging already initialized, doing nothing." return consoleStreamMappings = { 'stdout' : 'stdoutConsoleHandler', 'stderr' : 'stderrConsoleHandler', } consoleLogLevels = ['DEBUG', 'INFO', 'WARNING', 'WARN', 'ERROR', 'CRITICAL', 'FATAL'] assert console is None or console in consoleStreamMappings.keys(), ( 'Unexpected console arg value: %r') % (console,) assert consoleLevel in consoleLogLevels, ( 'Unexpected consoleLevel arg value: %r') % (consoleLevel) # ----------------------------------------------------------------------- # Setup logging. Look for the nupic-logging.conf file, first in the # NTA_CONFIG_DIR path (if defined), then in a subdirectory of the nupic # module configFilename = 'nupic-logging.conf' configFilePath = resource_filename("nupic.support", configFilename) configLogDir = os.environ.get('NTA_LOG_DIR', None) # Load in the logging configuration file if verbose: print >> sys.stderr, ( "Using logging configuration file: %s") % (configFilePath) # This dict will hold our replacement strings for logging configuration replacements = dict() def makeKey(name): """ Makes replacement key """ return "$$%s$$" % (name) platform = sys.platform.lower() if platform.startswith('java'): # Jython import java.lang platform = java.lang.System.getProperty("os.name").lower() if platform.startswith('mac os x'): platform = 'darwin' if platform.startswith('darwin'): replacements[makeKey('SYSLOG_HANDLER_ADDRESS')] = '"/var/run/syslog"' elif platform.startswith('linux'): replacements[makeKey('SYSLOG_HANDLER_ADDRESS')] = '"/dev/log"' elif platform.startswith('win'): replacements[makeKey('SYSLOG_HANDLER_ADDRESS')] = '"log"' else: raise RuntimeError("This platform is neither darwin, win32, nor linux: %s" % ( sys.platform,)) # Nupic logs go to file replacements[makeKey('PERSISTENT_LOG_HANDLER')] = 'fileHandler' if platform.startswith('win'): replacements[makeKey('FILE_HANDLER_LOG_FILENAME')] = '"NUL"' else: replacements[makeKey('FILE_HANDLER_LOG_FILENAME')] = '"/dev/null"' # Set up log file path for the default file handler and configure handlers handlers = list() if configLogDir is not None: logFilePath = _genLoggingFilePath() makeDirectoryFromAbsolutePath(os.path.dirname(logFilePath)) replacements[makeKey('FILE_HANDLER_LOG_FILENAME')] = repr(logFilePath) handlers.append(replacements[makeKey('PERSISTENT_LOG_HANDLER')]) if console is not None: handlers.append(consoleStreamMappings[console]) replacements[makeKey('ROOT_LOGGER_HANDLERS')] = ", ".join(handlers) # Set up log level for console handlers replacements[makeKey('CONSOLE_LOG_LEVEL')] = consoleLevel customConfig = StringIO() # Using pkg_resources to get the logging file, which should be packaged and # associated with this source file name. loggingFileContents = resource_string(__name__, configFilename) for lineNum, line in enumerate(loggingFileContents.splitlines()): if "$$" in line: for (key, value) in replacements.items(): line = line.replace(key, value) # If there is still a replacement string in the line, we're missing it # from our replacements dict if "$$" in line and "$$<key>$$" not in line: raise RuntimeError(("The text %r, found at line #%d of file %r, " "contains a string not found in our replacement " "dict.") % (line, lineNum, configFilePath)) customConfig.write("%s\n" % line) customConfig.seek(0) if python_version()[:3] >= '2.6': logging.config.fileConfig(customConfig, disable_existing_loggers=False) else: logging.config.fileConfig(customConfig) gLoggingInitialized = True
def _genLoggingFilePath(): """ Generate a filepath for the calling app """ appName = os.path.splitext(os.path.basename(sys.argv[0]))[0] or 'UnknownApp' appLogDir = os.path.abspath(os.path.join( os.environ['NTA_LOG_DIR'], 'numenta-logs-%s' % (os.environ['USER'],), appName)) appLogFileName = '%s-%s-%s.log' % ( appName, long(time.mktime(time.gmtime())), os.getpid()) return os.path.join(appLogDir, appLogFileName)
def aggregationToMonthsSeconds(interval): """ Return the number of months and seconds from an aggregation dict that represents a date and time. Interval is a dict that contain one or more of the following keys: 'years', 'months', 'weeks', 'days', 'hours', 'minutes', seconds', 'milliseconds', 'microseconds'. For example: :: aggregationMicroseconds({'years': 1, 'hours': 4, 'microseconds':42}) == {'months':12, 'seconds':14400.000042} :param interval: (dict) The aggregation interval representing a date and time :returns: (dict) number of months and seconds in the interval: ``{months': XX, 'seconds': XX}``. The seconds is a floating point that can represent resolutions down to a microsecond. """ seconds = interval.get('microseconds', 0) * 0.000001 seconds += interval.get('milliseconds', 0) * 0.001 seconds += interval.get('seconds', 0) seconds += interval.get('minutes', 0) * 60 seconds += interval.get('hours', 0) * 60 * 60 seconds += interval.get('days', 0) * 24 * 60 * 60 seconds += interval.get('weeks', 0) * 7 * 24 * 60 * 60 months = interval.get('months', 0) months += 12 * interval.get('years', 0) return {'months': months, 'seconds': seconds}
def aggregationDivide(dividend, divisor): """ Return the result from dividing two dicts that represent date and time. Both dividend and divisor are dicts that contain one or more of the following keys: 'years', 'months', 'weeks', 'days', 'hours', 'minutes', seconds', 'milliseconds', 'microseconds'. For example: :: aggregationDivide({'hours': 4}, {'minutes': 15}) == 16 :param dividend: (dict) The numerator, as a dict representing a date and time :param divisor: (dict) the denominator, as a dict representing a date and time :returns: (float) number of times divisor goes into dividend """ # Convert each into microseconds dividendMonthSec = aggregationToMonthsSeconds(dividend) divisorMonthSec = aggregationToMonthsSeconds(divisor) # It is a usage error to mix both months and seconds in the same operation if (dividendMonthSec['months'] != 0 and divisorMonthSec['seconds'] != 0) \ or (dividendMonthSec['seconds'] != 0 and divisorMonthSec['months'] != 0): raise RuntimeError("Aggregation dicts with months/years can only be " "inter-operated with other aggregation dicts that contain " "months/years") if dividendMonthSec['months'] > 0: return float(dividendMonthSec['months']) / divisor['months'] else: return float(dividendMonthSec['seconds']) / divisorMonthSec['seconds']
def validateOpfJsonValue(value, opfJsonSchemaFilename): """ Validate a python object against an OPF json schema file :param value: target python object to validate (typically a dictionary) :param opfJsonSchemaFilename: (string) OPF json schema filename containing the json schema object. (e.g., opfTaskControlSchema.json) :raises: jsonhelpers.ValidationError when value fails json validation """ # Create a path by joining the filename with our local json schema root jsonSchemaPath = os.path.join(os.path.dirname(__file__), "jsonschema", opfJsonSchemaFilename) # Validate jsonhelpers.validate(value, schemaPath=jsonSchemaPath) return
def initLogger(obj): """ Helper function to create a logger object for the current object with the standard Numenta prefix. :param obj: (object) to add a logger to """ if inspect.isclass(obj): myClass = obj else: myClass = obj.__class__ logger = logging.getLogger(".".join( ['com.numenta', myClass.__module__, myClass.__name__])) return logger
def matchPatterns(patterns, keys): """ Returns a subset of the keys that match any of the given patterns :param patterns: (list) regular expressions to match :param keys: (list) keys to search for matches """ results = [] if patterns: for pattern in patterns: prog = re.compile(pattern) for key in keys: if prog.match(key): results.append(key) else: return None return results
def _getScaledValue(self, inpt): """ Convert the input, which is in normal space, into log space """ if inpt == SENTINEL_VALUE_FOR_MISSING_DATA: return None else: val = inpt if val < self.minval: val = self.minval elif val > self.maxval: val = self.maxval scaledVal = math.log10(val) return scaledVal
def getBucketIndices(self, inpt): """ See the function description in base.py """ # Get the scaled value scaledVal = self._getScaledValue(inpt) if scaledVal is None: return [None] else: return self.encoder.getBucketIndices(scaledVal)
def encodeIntoArray(self, inpt, output): """ See the function description in base.py """ # Get the scaled value scaledVal = self._getScaledValue(inpt) if scaledVal is None: output[0:] = 0 else: self.encoder.encodeIntoArray(scaledVal, output) if self.verbosity >= 2: print "input:", inpt, "scaledVal:", scaledVal, "output:", output print "decoded:", self.decodedToStr(self.decode(output))
def decode(self, encoded, parentFieldName=''): """ See the function description in base.py """ # Get the scalar values from the underlying scalar encoder (fieldsDict, fieldNames) = self.encoder.decode(encoded) if len(fieldsDict) == 0: return (fieldsDict, fieldNames) # Expect only 1 field assert(len(fieldsDict) == 1) # Convert each range into normal space (inRanges, inDesc) = fieldsDict.values()[0] outRanges = [] for (minV, maxV) in inRanges: outRanges.append((math.pow(10, minV), math.pow(10, maxV))) # Generate a text description of the ranges desc = "" numRanges = len(outRanges) for i in xrange(numRanges): if outRanges[i][0] != outRanges[i][1]: desc += "%.2f-%.2f" % (outRanges[i][0], outRanges[i][1]) else: desc += "%.2f" % (outRanges[i][0]) if i < numRanges-1: desc += ", " # Return result if parentFieldName != '': fieldName = "%s.%s" % (parentFieldName, self.name) else: fieldName = self.name return ({fieldName: (outRanges, desc)}, [fieldName])
def getBucketValues(self): """ See the function description in base.py """ # Need to re-create? if self._bucketValues is None: scaledValues = self.encoder.getBucketValues() self._bucketValues = [] for scaledValue in scaledValues: value = math.pow(10, scaledValue) self._bucketValues.append(value) return self._bucketValues
def getBucketInfo(self, buckets): """ See the function description in base.py """ scaledResult = self.encoder.getBucketInfo(buckets)[0] scaledValue = scaledResult.value value = math.pow(10, scaledValue) return [EncoderResult(value=value, scalar=value, encoding = scaledResult.encoding)]
def topDownCompute(self, encoded): """ See the function description in base.py """ scaledResult = self.encoder.topDownCompute(encoded)[0] scaledValue = scaledResult.value value = math.pow(10, scaledValue) return EncoderResult(value=value, scalar=value, encoding = scaledResult.encoding)
def closenessScores(self, expValues, actValues, fractional=True): """ See the function description in base.py """ # Compute the percent error in log space if expValues[0] > 0: expValue = math.log10(expValues[0]) else: expValue = self.minScaledValue if actValues [0] > 0: actValue = math.log10(actValues[0]) else: actValue = self.minScaledValue if fractional: err = abs(expValue - actValue) pctErr = err / (self.maxScaledValue - self.minScaledValue) pctErr = min(1.0, pctErr) closeness = 1.0 - pctErr else: err = abs(expValue - actValue) closeness = err #print "log::", "expValue:", expValues[0], "actValue:", actValues[0], \ # "closeness", closeness #import pdb; pdb.set_trace() return numpy.array([closeness])
def export(self): """ Exports a network as a networkx MultiDiGraph intermediate representation suitable for visualization. :return: networkx MultiDiGraph """ graph = nx.MultiDiGraph() # Add regions to graph as nodes, annotated by name regions = self.network.getRegions() for idx in xrange(regions.getCount()): regionPair = regions.getByIndex(idx) regionName = regionPair[0] graph.add_node(regionName, label=regionName) # Add links between regions to graph as edges, annotate by input-output # name pairs for linkName, link in self.network.getLinks(): graph.add_edge(link.getSrcRegionName(), link.getDestRegionName(), src=link.getSrcOutputName(), dest=link.getDestInputName()) return graph
def bitsToString(arr): """Returns a string representing a numpy array of 0's and 1's""" s = array('c','.'*len(arr)) for i in xrange(len(arr)): if arr[i] == 1: s[i]='*' return s
def percentOverlap(x1, x2, size): """ Computes the percentage of overlap between vectors x1 and x2. @param x1 (array) binary vector @param x2 (array) binary vector @param size (int) length of binary vectors @return percentOverlap (float) percentage overlap between x1 and x2 """ nonZeroX1 = np.count_nonzero(x1) nonZeroX2 = np.count_nonzero(x2) minX1X2 = min(nonZeroX1, nonZeroX2) percentOverlap = 0 if minX1X2 > 0: percentOverlap = float(np.dot(x1, x2))/float(minX1X2) return percentOverlap
def resetVector(x1, x2): """ Copies the contents of vector x1 into vector x2. @param x1 (array) binary vector to be copied @param x2 (array) binary vector where x1 is copied """ size = len(x1) for i in range(size): x2[i] = x1[i]
def runCPU(): """Poll CPU usage, make predictions, and plot the results. Runs forever.""" # Create the model for predicting CPU usage. model = ModelFactory.create(model_params.MODEL_PARAMS) model.enableInference({'predictedField': 'cpu'}) # The shifter will align prediction and actual values. shifter = InferenceShifter() # Keep the last WINDOW predicted and actual values for plotting. actHistory = deque([0.0] * WINDOW, maxlen=60) predHistory = deque([0.0] * WINDOW, maxlen=60) # Initialize the plot lines that we will update with each new record. actline, = plt.plot(range(WINDOW), actHistory) predline, = plt.plot(range(WINDOW), predHistory) # Set the y-axis range. actline.axes.set_ylim(0, 100) predline.axes.set_ylim(0, 100) while True: s = time.time() # Get the CPU usage. cpu = psutil.cpu_percent() # Run the input through the model and shift the resulting prediction. modelInput = {'cpu': cpu} result = shifter.shift(model.run(modelInput)) # Update the trailing predicted and actual value deques. inference = result.inferences['multiStepBestPredictions'][5] if inference is not None: actHistory.append(result.rawInput['cpu']) predHistory.append(inference) # Redraw the chart with the new data. actline.set_ydata(actHistory) # update the data predline.set_ydata(predHistory) # update the data plt.draw() plt.legend( ('actual','predicted') ) # Make sure we wait a total of 2 seconds per iteration. try: plt.pause(SECONDS_PER_STEP) except: pass
def _extractCallingMethodArgs(): """ Returns args dictionary from the calling method """ import inspect import copy callingFrame = inspect.stack()[1][0] argNames, _, _, frameLocalVarDict = inspect.getargvalues(callingFrame) argNames.remove("self") args = copy.copy(frameLocalVarDict) for varName in frameLocalVarDict: if varName not in argNames: args.pop(varName) return args
def write(self, proto): """Populate serialization proto instance. :param proto: (BacktrackingTMCppProto) the proto instance to populate """ # Write base class to proto.baseTM (BacktrackingTMProto) super(BacktrackingTMCPP, self).write(proto.baseTM) self.cells4.write(proto.cells4) proto.makeCells4Ephemeral = self.makeCells4Ephemeral proto.seed = self.seed proto.checkSynapseConsistency = self.checkSynapseConsistency proto.initArgs = json.dumps(self._initArgsDict)
def read(cls, proto): """Deserialize from proto instance. :param proto: (BacktrackingTMCppProto) the proto instance to read from """ # Use base class to create initial class from proto.baseTM # (BacktrackingTMProto) obj = BacktrackingTM.read(proto.baseTM) obj.__class__ = cls # Additional CPP-specific deserialization newCells4 = Cells4.read(proto.cells4) print newCells4 obj.cells4 = newCells4 obj.makeCells4Ephemeral = proto.makeCells4Ephemeral obj.seed = proto.seed obj.checkSynapseConsistency = proto.checkSynapseConsistency obj._initArgsDict = json.loads(proto.initArgs) # Convert unicode to str obj._initArgsDict["outputType"] = str(obj._initArgsDict["outputType"]) # Initialize ephemeral attributes obj.allocateStatesInCPP = False obj.retrieveLearningStates = False obj._setStatePointers() return obj
def _getEphemeralMembers(self): """ List of our member variables that we don't need to be saved """ e = BacktrackingTM._getEphemeralMembers(self) if self.makeCells4Ephemeral: e.extend(['cells4']) return e
def _initEphemerals(self): """ Initialize all ephemeral members after being restored to a pickled state. """ BacktrackingTM._initEphemerals(self) #--------------------------------------------------------------------------------- # cells4 specific initialization # If True, let C++ allocate memory for activeState, predictedState, and # learnState. In this case we can retrieve copies of these states but can't # set them directly from Python. If False, Python can allocate them as # numpy arrays and we can pass pointers to the C++ using setStatePointers self.allocateStatesInCPP = False # Set this to true for debugging or accessing learning states self.retrieveLearningStates = False if self.makeCells4Ephemeral: self._initCells4()
def compute(self, bottomUpInput, enableLearn, enableInference=None): """ Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.compute`. """ # The C++ TM takes 32 bit floats as input. uint32 works as well since the # code only checks whether elements are non-zero assert (bottomUpInput.dtype == numpy.dtype('float32')) or \ (bottomUpInput.dtype == numpy.dtype('uint32')) or \ (bottomUpInput.dtype == numpy.dtype('int32')) self.iterationIdx = self.iterationIdx + 1 # As a speed optimization for now (until we need online learning), skip # computing the inference output while learning if enableInference is None: if enableLearn: enableInference = False else: enableInference = True # ==================================================================== # Run compute and retrieve selected state and member variables self._setStatePointers() y = self.cells4.compute(bottomUpInput, enableInference, enableLearn) self.currentOutput = y.reshape((self.numberOfCols, self.cellsPerColumn)) self.avgLearnedSeqLength = self.cells4.getAvgLearnedSeqLength() self._copyAllocatedStates() # ======================================================================== # Update the prediction score stats # Learning always includes inference if self.collectStats: activeColumns = bottomUpInput.nonzero()[0] if enableInference: predictedState = self.infPredictedState['t-1'] else: predictedState = self.lrnPredictedState['t-1'] self._updateStatsInferEnd(self._internalStats, activeColumns, predictedState, self.colConfidence['t-1']) # Finally return the TM output output = self._computeOutput() # Print diagnostic information based on the current verbosity level self.printComputeEnd(output, learn=enableLearn) self.resetCalled = False return output
def _copyAllocatedStates(self): """If state is allocated in CPP, copy over the data into our numpy arrays.""" # Get learn states if we need to print them out if self.verbosity > 1 or self.retrieveLearningStates: (activeT, activeT1, predT, predT1) = self.cells4.getLearnStates() self.lrnActiveState['t-1'] = activeT1.reshape((self.numberOfCols, self.cellsPerColumn)) self.lrnActiveState['t'] = activeT.reshape((self.numberOfCols, self.cellsPerColumn)) self.lrnPredictedState['t-1'] = predT1.reshape((self.numberOfCols, self.cellsPerColumn)) self.lrnPredictedState['t'] = predT.reshape((self.numberOfCols, self.cellsPerColumn)) if self.allocateStatesInCPP: assert False (activeT, activeT1, predT, predT1, colConfidenceT, colConfidenceT1, confidenceT, confidenceT1) = self.cells4.getStates() self.cellConfidence['t'] = confidenceT.reshape((self.numberOfCols, self.cellsPerColumn)) self.cellConfidence['t-1'] = confidenceT1.reshape((self.numberOfCols, self.cellsPerColumn)) self.colConfidence['t'] = colConfidenceT.reshape(self.numberOfCols) self.colConfidence['t-1'] = colConfidenceT1.reshape(self.numberOfCols) self.infActiveState['t-1'] = activeT1.reshape((self.numberOfCols, self.cellsPerColumn)) self.infActiveState['t'] = activeT.reshape((self.numberOfCols, self.cellsPerColumn)) self.infPredictedState['t-1'] = predT1.reshape((self.numberOfCols, self.cellsPerColumn)) self.infPredictedState['t'] = predT.reshape((self.numberOfCols, self.cellsPerColumn))
def _setStatePointers(self): """If we are having CPP use numpy-allocated buffers, set these buffer pointers. This is a relatively fast operation and, for safety, should be done before every call to the cells4 compute methods. This protects us in situations where code can cause Python or numpy to create copies.""" if not self.allocateStatesInCPP: self.cells4.setStatePointers( self.infActiveState["t"], self.infActiveState["t-1"], self.infPredictedState["t"], self.infPredictedState["t-1"], self.colConfidence["t"], self.colConfidence["t-1"], self.cellConfidence["t"], self.cellConfidence["t-1"])
def reset(self): """ Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.reset`. """ if self.verbosity >= 3: print "TM Reset" self._setStatePointers() self.cells4.reset() BacktrackingTM.reset(self)
def trimSegments(self, minPermanence=None, minNumSyns=None): """ Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.trimSegments`. """ # Fill in defaults if minPermanence is None: minPermanence = 0.0 if minNumSyns is None: minNumSyns = 0 # Print all cells if verbosity says to if self.verbosity >= 5: print "Cells, all segments:" self.printCells(predictedOnly=False) return self.cells4.trimSegments(minPermanence=minPermanence, minNumSyns=minNumSyns)
def printSegmentUpdates(self): """ Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.printSegmentUpdates`. """ # TODO: need to add C++ accessors to implement this method assert False print "=== SEGMENT UPDATES ===, Num = ", len(self.segmentUpdates) for key, updateList in self.segmentUpdates.iteritems(): c,i = key[0],key[1] print c,i,updateList
def _slowIsSegmentActive(self, seg, timeStep): """ A segment is active if it has >= activationThreshold connected synapses that are active due to infActiveState. """ numSyn = seg.size() numActiveSyns = 0 for synIdx in xrange(numSyn): if seg.getPermanence(synIdx) < self.connectedPerm: continue sc, si = self.getColCellIdx(seg.getSrcCellIdx(synIdx)) if self.infActiveState[timeStep][sc, si]: numActiveSyns += 1 if numActiveSyns >= self.activationThreshold: return True return numActiveSyns >= self.activationThreshold
def printCell(self, c, i, onlyActiveSegments=False): """ Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.printCell`. """ nSegs = self.cells4.nSegmentsOnCell(c,i) if nSegs > 0: segList = self.cells4.getNonEmptySegList(c,i) gidx = c * self.cellsPerColumn + i print "Column", c, "Cell", i, "(%d)"%(gidx),":", nSegs, "segment(s)" for k,segIdx in enumerate(segList): seg = self.cells4.getSegment(c, i, segIdx) isActive = self._slowIsSegmentActive(seg, 't') if onlyActiveSegments and not isActive: continue isActiveStr = "*" if isActive else " " print " %sSeg #%-3d" % (isActiveStr, segIdx), print seg.size(), print seg.isSequenceSegment(), "%9.7f" % (seg.dutyCycle( self.cells4.getNLrnIterations(), False, True)), # numPositive/totalActivations print "(%4d/%-4d)" % (seg.getPositiveActivations(), seg.getTotalActivations()), # Age print "%4d" % (self.cells4.getNLrnIterations() - seg.getLastActiveIteration()), numSyn = seg.size() for s in xrange(numSyn): sc, si = self.getColCellIdx(seg.getSrcCellIdx(s)) print "[%d,%d]%4.2f"%(sc, si, seg.getPermanence(s)), print
def getColCellIdx(self, idx): """ Get column and cell within column from a global cell index. The global index is ``idx = colIdx * nCellsPerCol() + cellIdxInCol`` :param idx: (int) global cell index :returns: (tuple) (colIdx, cellIdxInCol) """ c = idx//self.cellsPerColumn i = idx - c*self.cellsPerColumn return c,i
def getSegmentOnCell(self, c, i, segIdx): """ Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.getSegmentOnCell`. """ segList = self.cells4.getNonEmptySegList(c,i) seg = self.cells4.getSegment(c, i, segList[segIdx]) numSyn = seg.size() assert numSyn != 0 # Accumulate segment information result = [] result.append([int(segIdx), bool(seg.isSequenceSegment()), seg.getPositiveActivations(), seg.getTotalActivations(), seg.getLastActiveIteration(), seg.getLastPosDutyCycle(), seg.getLastPosDutyCycleIteration()]) for s in xrange(numSyn): sc, si = self.getColCellIdx(seg.getSrcCellIdx(s)) result.append([int(sc), int(si), seg.getPermanence(s)]) return result
def getSegmentInfo(self, collectActiveData = False): """ Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.getSegmentInfo`. """ # Requires appropriate accessors in C++ cells4 (currently unimplemented) assert collectActiveData == False nSegments, nSynapses = self.getNumSegments(), self.cells4.nSynapses() distSegSizes, distNSegsPerCell = {}, {} nActiveSegs, nActiveSynapses = 0, 0 distPermValues = {} # Num synapses with given permanence values numAgeBuckets = 20 distAges = [] ageBucketSize = int((self.iterationIdx+20) / 20) for i in range(numAgeBuckets): distAges.append(['%d-%d' % (i*ageBucketSize, (i+1)*ageBucketSize-1), 0]) for c in xrange(self.numberOfCols): for i in xrange(self.cellsPerColumn): # Update histogram counting cell sizes nSegmentsThisCell = self.getNumSegmentsInCell(c,i) if nSegmentsThisCell > 0: if distNSegsPerCell.has_key(nSegmentsThisCell): distNSegsPerCell[nSegmentsThisCell] += 1 else: distNSegsPerCell[nSegmentsThisCell] = 1 # Update histogram counting segment sizes. segList = self.cells4.getNonEmptySegList(c,i) for segIdx in xrange(nSegmentsThisCell): seg = self.getSegmentOnCell(c, i, segIdx) nSynapsesThisSeg = len(seg) - 1 if nSynapsesThisSeg > 0: if distSegSizes.has_key(nSynapsesThisSeg): distSegSizes[nSynapsesThisSeg] += 1 else: distSegSizes[nSynapsesThisSeg] = 1 # Accumulate permanence value histogram for syn in seg[1:]: p = int(syn[2]*10) if distPermValues.has_key(p): distPermValues[p] += 1 else: distPermValues[p] = 1 segObj = self.cells4.getSegment(c, i, segList[segIdx]) age = self.iterationIdx - segObj.getLastActiveIteration() ageBucket = int(age/ageBucketSize) distAges[ageBucket][1] += 1 return (nSegments, nSynapses, nActiveSegs, nActiveSynapses, \ distSegSizes, distNSegsPerCell, distPermValues, distAges)
def main(argv): """ The main function of the HypersearchWorker script. This parses the command line arguments, instantiates a HypersearchWorker instance, and then runs it. Parameters: ---------------------------------------------------------------------- retval: jobID of the job we ran. This is used by unit test code when calling this working using the --params command line option (which tells this worker to insert the job itself). """ parser = OptionParser(helpString) parser.add_option("--jobID", action="store", type="int", default=None, help="jobID of the job within the dbTable [default: %default].") parser.add_option("--modelID", action="store", type="str", default=None, help=("Tell worker to re-run this model ID. When specified, jobID " "must also be specified [default: %default].")) parser.add_option("--workerID", action="store", type="str", default=None, help=("workerID of the scheduler's SlotAgent (GenericWorker) that " "hosts this SpecializedWorker [default: %default].")) parser.add_option("--params", action="store", default=None, help="Create and execute a new hypersearch request using this JSON " \ "format params string. This is helpful for unit tests and debugging. " \ "When specified jobID must NOT be specified. [default: %default].") parser.add_option("--clearModels", action="store_true", default=False, help="clear out the models table before starting [default: %default].") parser.add_option("--resetJobStatus", action="store_true", default=False, help="Reset the job status before starting [default: %default].") parser.add_option("--logLevel", action="store", type="int", default=None, help="override default log level. Pass in an integer value that " "represents the desired logging level (10=logging.DEBUG, " "20=logging.INFO, etc.) [default: %default].") # Evaluate command line arguments (options, args) = parser.parse_args(argv[1:]) if len(args) != 0: raise RuntimeError("Expected no command line arguments, but got: %s" % \ (args)) if (options.jobID and options.params): raise RuntimeError("--jobID and --params can not be used at the same time") if (options.jobID is None and options.params is None): raise RuntimeError("Either --jobID or --params must be specified.") initLogging(verbose=True) # Instantiate the HypersearchWorker and run it hst = HypersearchWorker(options, argv[1:]) # Normal use. This is one of among a number of workers. If we encounter # an exception at the outer loop here, we fail the entire job. if options.params is None: try: jobID = hst.run() except Exception, e: jobID = options.jobID msg = StringIO.StringIO() print >>msg, "%s: Exception occurred in Hypersearch Worker: %r" % \ (ErrorCodes.hypersearchLogicErr, e) traceback.print_exc(None, msg) completionReason = ClientJobsDAO.CMPL_REASON_ERROR completionMsg = msg.getvalue() hst.logger.error(completionMsg) # If no other worker already marked the job as failed, do so now. jobsDAO = ClientJobsDAO.get() workerCmpReason = jobsDAO.jobGetFields(options.jobID, ['workerCompletionReason'])[0] if workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS: jobsDAO.jobSetFields(options.jobID, fields=dict( cancel=True, workerCompletionReason = ClientJobsDAO.CMPL_REASON_ERROR, workerCompletionMsg = completionMsg), useConnectionID=False, ignoreUnchanged=True) # Run just 1 worker for the entire job. Used for unit tests that run in # 1 process else: jobID = None completionReason = ClientJobsDAO.CMPL_REASON_SUCCESS completionMsg = "Success" try: jobID = hst.run() except Exception, e: jobID = hst._options.jobID completionReason = ClientJobsDAO.CMPL_REASON_ERROR completionMsg = "ERROR: %s" % (e,) raise finally: if jobID is not None: cjDAO = ClientJobsDAO.get() cjDAO.jobSetCompleted(jobID=jobID, completionReason=completionReason, completionMsg=completionMsg) return jobID
def _processUpdatedModels(self, cjDAO): """ For all models that modified their results since last time this method was called, send their latest results to the Hypersearch implementation. """ # Get the latest update counters. This returns a list of tuples: # (modelID, updateCounter) curModelIDCtrList = cjDAO.modelsGetUpdateCounters(self._options.jobID) if len(curModelIDCtrList) == 0: return self.logger.debug("current modelID/updateCounters: %s" \ % (str(curModelIDCtrList))) self.logger.debug("last modelID/updateCounters: %s" \ % (str(self._modelIDCtrList))) # -------------------------------------------------------------------- # Find out which ones have changed update counters. Since these are models # that the Hypersearch implementation already knows about, we don't need to # send params or paramsHash curModelIDCtrList = sorted(curModelIDCtrList) numItems = len(curModelIDCtrList) # Each item in the list we are filtering contains: # (idxIntoModelIDCtrList, (modelID, curCtr), (modelID, oldCtr)) # We only want to keep the ones where the oldCtr != curCtr changedEntries = filter(lambda x:x[1][1] != x[2][1], itertools.izip(xrange(numItems), curModelIDCtrList, self._modelIDCtrList)) if len(changedEntries) > 0: # Update values in our cache self.logger.debug("changedEntries: %s", str(changedEntries)) for entry in changedEntries: (idx, (modelID, curCtr), (_, oldCtr)) = entry self._modelIDCtrDict[modelID] = curCtr assert (self._modelIDCtrList[idx][0] == modelID) assert (curCtr != oldCtr) self._modelIDCtrList[idx][1] = curCtr # Tell Hypersearch implementation of the updated results for each model changedModelIDs = [x[1][0] for x in changedEntries] modelResults = cjDAO.modelsGetResultAndStatus(changedModelIDs) for mResult in modelResults: results = mResult.results if results is not None: results = json.loads(results) self._hs.recordModelProgress(modelID=mResult.modelId, modelParams = None, modelParamsHash = mResult.engParamsHash, results = results, completed = (mResult.status == cjDAO.STATUS_COMPLETED), completionReason = mResult.completionReason, matured = mResult.engMatured, numRecords = mResult.numRecords) # -------------------------------------------------------------------- # Figure out which ones are newly arrived and add them to our # cache curModelIDSet = set([x[0] for x in curModelIDCtrList]) newModelIDs = curModelIDSet.difference(self._modelIDSet) if len(newModelIDs) > 0: # Add new modelID and counters to our cache self._modelIDSet.update(newModelIDs) curModelIDCtrDict = dict(curModelIDCtrList) # Get the results for each of these models and send them to the # Hypersearch implementation. modelInfos = cjDAO.modelsGetResultAndStatus(newModelIDs) modelInfos.sort() modelParamsAndHashs = cjDAO.modelsGetParams(newModelIDs) modelParamsAndHashs.sort() for (mResult, mParamsAndHash) in itertools.izip(modelInfos, modelParamsAndHashs): modelID = mResult.modelId assert (modelID == mParamsAndHash.modelId) # Update our cache of IDs and update counters self._modelIDCtrDict[modelID] = curModelIDCtrDict[modelID] self._modelIDCtrList.append([modelID, curModelIDCtrDict[modelID]]) # Tell the Hypersearch implementation of the new model results = mResult.results if results is not None: results = json.loads(mResult.results) self._hs.recordModelProgress(modelID = modelID, modelParams = json.loads(mParamsAndHash.params), modelParamsHash = mParamsAndHash.engParamsHash, results = results, completed = (mResult.status == cjDAO.STATUS_COMPLETED), completionReason = (mResult.completionReason), matured = mResult.engMatured, numRecords = mResult.numRecords) # Keep our list sorted self._modelIDCtrList.sort()
def run(self): """ Run this worker. Parameters: ---------------------------------------------------------------------- retval: jobID of the job we ran. This is used by unit test code when calling this working using the --params command line option (which tells this worker to insert the job itself). """ # Easier access to options options = self._options # --------------------------------------------------------------------- # Connect to the jobs database self.logger.info("Connecting to the jobs database") cjDAO = ClientJobsDAO.get() # Get our worker ID self._workerID = cjDAO.getConnectionID() if options.clearModels: cjDAO.modelsClearAll() # ------------------------------------------------------------------------- # if params were specified on the command line, insert a new job using # them. if options.params is not None: options.jobID = cjDAO.jobInsert(client='hwTest', cmdLine="echo 'test mode'", params=options.params, alreadyRunning=True, minimumWorkers=1, maximumWorkers=1, jobType = cjDAO.JOB_TYPE_HS) if options.workerID is not None: wID = options.workerID else: wID = self._workerID buildID = Configuration.get('nupic.software.buildNumber', 'N/A') logPrefix = '<BUILDID=%s, WORKER=HW, WRKID=%s, JOBID=%s> ' % \ (buildID, wID, options.jobID) ExtendedLogger.setLogPrefix(logPrefix) # --------------------------------------------------------------------- # Get the search parameters # If asked to reset the job status, do that now if options.resetJobStatus: cjDAO.jobSetFields(options.jobID, fields={'workerCompletionReason': ClientJobsDAO.CMPL_REASON_SUCCESS, 'cancel': False, #'engWorkerState': None }, useConnectionID=False, ignoreUnchanged=True) jobInfo = cjDAO.jobInfo(options.jobID) self.logger.info("Job info retrieved: %s" % (str(clippedObj(jobInfo)))) # --------------------------------------------------------------------- # Instantiate the Hypersearch object, which will handle the logic of # which models to create when we need more to evaluate. jobParams = json.loads(jobInfo.params) # Validate job params jsonSchemaPath = os.path.join(os.path.dirname(__file__), "jsonschema", "jobParamsSchema.json") validate(jobParams, schemaPath=jsonSchemaPath) hsVersion = jobParams.get('hsVersion', None) if hsVersion == 'v2': self._hs = HypersearchV2(searchParams=jobParams, workerID=self._workerID, cjDAO=cjDAO, jobID=options.jobID, logLevel=options.logLevel) else: raise RuntimeError("Invalid Hypersearch implementation (%s) specified" \ % (hsVersion)) # ===================================================================== # The main loop. try: exit = False numModelsTotal = 0 print >>sys.stderr, "reporter:status:Evaluating first model..." while not exit: # ------------------------------------------------------------------ # Choose a model to evaluate batchSize = 10 # How many to try at a time. modelIDToRun = None while modelIDToRun is None: if options.modelID is None: # ----------------------------------------------------------------- # Get the latest results on all running models and send them to # the Hypersearch implementation # This calls cjDAO.modelsGetUpdateCounters(), compares the # updateCounters with what we have cached, fetches the results for the # changed and new models, and sends those to the Hypersearch # implementation's self._hs.recordModelProgress() method. self._processUpdatedModels(cjDAO) # -------------------------------------------------------------------- # Create a new batch of models (exit, newModels) = self._hs.createModels(numModels = batchSize) if exit: break # No more models left to create, just loop. The _hs is waiting for # all remaining running models to complete, and may pick up on an # orphan if it detects one. if len(newModels) == 0: continue # Try and insert one that we will run for (modelParams, modelParamsHash, particleHash) in newModels: jsonModelParams = json.dumps(modelParams) (modelID, ours) = cjDAO.modelInsertAndStart(options.jobID, jsonModelParams, modelParamsHash, particleHash) # Some other worker is already running it, tell the Hypersearch object # so that it doesn't try and insert it again if not ours: mParamsAndHash = cjDAO.modelsGetParams([modelID])[0] mResult = cjDAO.modelsGetResultAndStatus([modelID])[0] results = mResult.results if results is not None: results = json.loads(results) modelParams = json.loads(mParamsAndHash.params) particleHash = cjDAO.modelsGetFields(modelID, ['engParticleHash'])[0] particleInst = "%s.%s" % ( modelParams['particleState']['id'], modelParams['particleState']['genIdx']) self.logger.info("Adding model %d to our internal DB " \ "because modelInsertAndStart() failed to insert it: " \ "paramsHash=%s, particleHash=%s, particleId='%s'", modelID, mParamsAndHash.engParamsHash.encode('hex'), particleHash.encode('hex'), particleInst) self._hs.recordModelProgress(modelID = modelID, modelParams = modelParams, modelParamsHash = mParamsAndHash.engParamsHash, results = results, completed = (mResult.status == cjDAO.STATUS_COMPLETED), completionReason = mResult.completionReason, matured = mResult.engMatured, numRecords = mResult.numRecords) else: modelIDToRun = modelID break else: # A specific modelID was passed on the command line modelIDToRun = int(options.modelID) mParamsAndHash = cjDAO.modelsGetParams([modelIDToRun])[0] modelParams = json.loads(mParamsAndHash.params) modelParamsHash = mParamsAndHash.engParamsHash # Make us the worker cjDAO.modelSetFields(modelIDToRun, dict(engWorkerConnId=self._workerID)) if False: # Change the hash and params of the old entry so that we can # create a new model with the same params for attempt in range(1000): paramsHash = hashlib.md5("OrphanParams.%d.%d" % (modelIDToRun, attempt)).digest() particleHash = hashlib.md5("OrphanParticle.%d.%d" % (modelIDToRun, attempt)).digest() try: cjDAO.modelSetFields(modelIDToRun, dict(engParamsHash=paramsHash, engParticleHash=particleHash)) success = True except: success = False if success: break if not success: raise RuntimeError("Unexpected failure to change paramsHash and " "particleHash of orphaned model") (modelIDToRun, ours) = cjDAO.modelInsertAndStart(options.jobID, mParamsAndHash.params, modelParamsHash) # ^^^ end while modelIDToRun ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # --------------------------------------------------------------- # We have a model, evaluate it now # All done? if exit: break # Run the model now self.logger.info("RUNNING MODEL GID=%d, paramsHash=%s, params=%s", modelIDToRun, modelParamsHash.encode('hex'), modelParams) # --------------------------------------------------------------------- # Construct model checkpoint GUID for this model: # jobParams['persistentJobGUID'] contains the client's (e.g., API Server) # persistent, globally-unique model identifier, which is what we need; persistentJobGUID = jobParams['persistentJobGUID'] assert persistentJobGUID, "persistentJobGUID: %r" % (persistentJobGUID,) modelCheckpointGUID = jobInfo.client + "_" + persistentJobGUID + ( '_' + str(modelIDToRun)) self._hs.runModel(modelID=modelIDToRun, jobID = options.jobID, modelParams=modelParams, modelParamsHash=modelParamsHash, jobsDAO=cjDAO, modelCheckpointGUID=modelCheckpointGUID) # TODO: don't increment for orphaned models numModelsTotal += 1 self.logger.info("COMPLETED MODEL GID=%d; EVALUATED %d MODELs", modelIDToRun, numModelsTotal) print >>sys.stderr, "reporter:status:Evaluated %d models..." % \ (numModelsTotal) print >>sys.stderr, "reporter:counter:HypersearchWorker,numModels,1" if options.modelID is not None: exit = True # ^^^ end while not exit finally: # Provide Hypersearch instance an opportunity to clean up temporary files self._hs.close() self.logger.info("FINISHED. Evaluated %d models." % (numModelsTotal)) print >>sys.stderr, "reporter:status:Finished, evaluated %d models" % (numModelsTotal) return options.jobID
def getBucketIndices(self, x): """ See method description in base.py """ if ((isinstance(x, float) and math.isnan(x)) or x == SENTINEL_VALUE_FOR_MISSING_DATA): return [None] if self._offset is None: self._offset = x bucketIdx = ( (self._maxBuckets/2) + int(round((x - self._offset) / self.resolution)) ) if bucketIdx < 0: bucketIdx = 0 elif bucketIdx >= self._maxBuckets: bucketIdx = self._maxBuckets-1 return [bucketIdx]
def mapBucketIndexToNonZeroBits(self, index): """ Given a bucket index, return the list of non-zero bits. If the bucket index does not exist, it is created. If the index falls outside our range we clip it. :param index The bucket index to get non-zero bits for. @returns numpy array of indices of non-zero bits for specified index. """ if index < 0: index = 0 if index >= self._maxBuckets: index = self._maxBuckets-1 if not self.bucketMap.has_key(index): if self.verbosity >= 2: print "Adding additional buckets to handle index=", index self._createBucket(index) return self.bucketMap[index]
def encodeIntoArray(self, x, output): """ See method description in base.py """ if x is not None and not isinstance(x, numbers.Number): raise TypeError( "Expected a scalar input but got input of type %s" % type(x)) # Get the bucket index to use bucketIdx = self.getBucketIndices(x)[0] # None is returned for missing value in which case we return all 0's. output[0:self.n] = 0 if bucketIdx is not None: output[self.mapBucketIndexToNonZeroBits(bucketIdx)] = 1
def _createBucket(self, index): """ Create the given bucket index. Recursively create as many in-between bucket indices as necessary. """ if index < self.minIndex: if index == self.minIndex - 1: # Create a new representation that has exactly w-1 overlapping bits # as the min representation self.bucketMap[index] = self._newRepresentation(self.minIndex, index) self.minIndex = index else: # Recursively create all the indices above and then this index self._createBucket(index+1) self._createBucket(index) else: if index == self.maxIndex + 1: # Create a new representation that has exactly w-1 overlapping bits # as the max representation self.bucketMap[index] = self._newRepresentation(self.maxIndex, index) self.maxIndex = index else: # Recursively create all the indices below and then this index self._createBucket(index-1) self._createBucket(index)
def _newRepresentation(self, index, newIndex): """ Return a new representation for newIndex that overlaps with the representation at index by exactly w-1 bits """ newRepresentation = self.bucketMap[index].copy() # Choose the bit we will replace in this representation. We need to shift # this bit deterministically. If this is always chosen randomly then there # is a 1 in w chance of the same bit being replaced in neighboring # representations, which is fairly high ri = newIndex % self.w # Now we choose a bit such that the overlap rules are satisfied. newBit = self.random.getUInt32(self.n) newRepresentation[ri] = newBit while newBit in self.bucketMap[index] or \ not self._newRepresentationOK(newRepresentation, newIndex): self.numTries += 1 newBit = self.random.getUInt32(self.n) newRepresentation[ri] = newBit return newRepresentation
def _newRepresentationOK(self, newRep, newIndex): """ Return True if this new candidate representation satisfies all our overlap rules. Since we know that neighboring representations differ by at most one bit, we compute running overlaps. """ if newRep.size != self.w: return False if (newIndex < self.minIndex-1) or (newIndex > self.maxIndex+1): raise ValueError("newIndex must be within one of existing indices") # A binary representation of newRep. We will use this to test containment newRepBinary = numpy.array([False]*self.n) newRepBinary[newRep] = True # Midpoint midIdx = self._maxBuckets/2 # Start by checking the overlap at minIndex runningOverlap = self._countOverlap(self.bucketMap[self.minIndex], newRep) if not self._overlapOK(self.minIndex, newIndex, overlap=runningOverlap): return False # Compute running overlaps all the way to the midpoint for i in range(self.minIndex+1, midIdx+1): # This is the bit that is going to change newBit = (i-1)%self.w # Update our running overlap if newRepBinary[self.bucketMap[i-1][newBit]]: runningOverlap -= 1 if newRepBinary[self.bucketMap[i][newBit]]: runningOverlap += 1 # Verify our rules if not self._overlapOK(i, newIndex, overlap=runningOverlap): return False # At this point, runningOverlap contains the overlap for midIdx # Compute running overlaps all the way to maxIndex for i in range(midIdx+1, self.maxIndex+1): # This is the bit that is going to change newBit = i%self.w # Update our running overlap if newRepBinary[self.bucketMap[i-1][newBit]]: runningOverlap -= 1 if newRepBinary[self.bucketMap[i][newBit]]: runningOverlap += 1 # Verify our rules if not self._overlapOK(i, newIndex, overlap=runningOverlap): return False return True
def _countOverlapIndices(self, i, j): """ Return the overlap between bucket indices i and j """ if self.bucketMap.has_key(i) and self.bucketMap.has_key(j): iRep = self.bucketMap[i] jRep = self.bucketMap[j] return self._countOverlap(iRep, jRep) else: raise ValueError("Either i or j don't exist")
def _countOverlap(rep1, rep2): """ Return the overlap between two representations. rep1 and rep2 are lists of non-zero indices. """ overlap = 0 for e in rep1: if e in rep2: overlap += 1 return overlap
def _overlapOK(self, i, j, overlap=None): """ Return True if the given overlap between bucket indices i and j are acceptable. If overlap is not specified, calculate it from the bucketMap """ if overlap is None: overlap = self._countOverlapIndices(i, j) if abs(i-j) < self.w: if overlap == (self.w - abs(i-j)): return True else: return False else: if overlap <= self._maxOverlap: return True else: return False
def _initializeBucketMap(self, maxBuckets, offset): """ Initialize the bucket map assuming the given number of maxBuckets. """ # The first bucket index will be _maxBuckets / 2 and bucket indices will be # allowed to grow lower or higher as long as they don't become negative. # _maxBuckets is required because the current SDR Classifier assumes bucket # indices must be non-negative. This normally does not need to be changed # but if altered, should be set to an even number. self._maxBuckets = maxBuckets self.minIndex = self._maxBuckets / 2 self.maxIndex = self._maxBuckets / 2 # The scalar offset used to map scalar values to bucket indices. The middle # bucket will correspond to numbers in the range # [offset-resolution/2, offset+resolution/2). # The bucket index for a number x will be: # maxBuckets/2 + int( round( (x-offset)/resolution ) ) self._offset = offset # This dictionary maps a bucket index into its bit representation # We initialize the class with a single bucket with index 0 self.bucketMap = {} def _permutation(n): r = numpy.arange(n, dtype=numpy.uint32) self.random.shuffle(r) return r self.bucketMap[self.minIndex] = _permutation(self.n)[0:self.w] # How often we need to retry when generating valid encodings self.numTries = 0
def retrySQL(timeoutSec=60*5, logger=None): """ Return a closure suitable for use as a decorator for retrying a pymysql DAO function on certain failures that warrant retries ( e.g., RDS/MySQL server down temporarily, transaction deadlock, etc.). We share this function across multiple scripts (e.g., ClientJobsDAO, StreamMgr) for consitent behavior. .. note:: Please ensure that the operation being retried is idempotent. .. note:: logging must be initialized *before* any loggers are created, else there will be no output; see nupic.support.initLogging() Usage Example: .. code-block:: python @retrySQL() def jobInfo(self, jobID): ... :param timeoutSec: How many seconds from time of initial call to stop retrying (floating point) :param logger: User-supplied logger instance. """ if logger is None: logger = logging.getLogger(__name__) def retryFilter(e, args, kwargs): if isinstance(e, (pymysql.InternalError, pymysql.OperationalError)): if e.args and e.args[0] in _ALL_RETRIABLE_ERROR_CODES: return True elif isinstance(e, pymysql.Error): if (e.args and inspect.isclass(e.args[0]) and issubclass(e.args[0], socket_error)): return True return False retryExceptions = tuple([ pymysql.InternalError, pymysql.OperationalError, pymysql.Error, ]) return make_retry_decorator( timeoutSec=timeoutSec, initialRetryDelaySec=0.1, maxRetryDelaySec=10, retryExceptions=retryExceptions, retryFilter=retryFilter, logger=logger)
def create(*args, **kwargs): """ Create a SDR classifier factory. The implementation of the SDR Classifier can be specified with the "implementation" keyword argument. The SDRClassifierFactory uses the implementation as specified in `Default NuPIC Configuration <default-config.html>`_. """ impl = kwargs.pop('implementation', None) if impl is None: impl = Configuration.get('nupic.opf.sdrClassifier.implementation') if impl == 'py': return SDRClassifier(*args, **kwargs) elif impl == 'cpp': return FastSDRClassifier(*args, **kwargs) elif impl == 'diff': return SDRClassifierDiff(*args, **kwargs) else: raise ValueError('Invalid classifier implementation (%r). Value must be ' '"py", "cpp" or "diff".' % impl)
def read(proto): """ :param proto: SDRClassifierRegionProto capnproto object """ impl = proto.implementation if impl == 'py': return SDRClassifier.read(proto.sdrClassifier) elif impl == 'cpp': return FastSDRClassifier.read(proto.sdrClassifier) elif impl == 'diff': return SDRClassifierDiff.read(proto.sdrClassifier) else: raise ValueError('Invalid classifier implementation (%r). Value must be ' '"py", "cpp" or "diff".' % impl)
def cross_list(*sequences): """ From: http://book.opensourceproject.org.cn/lamp/python/pythoncook2/opensource/0596007973/pythoncook2-chp-19-sect-9.html """ result = [[ ]] for seq in sequences: result = [sublist+[item] for sublist in result for item in seq] return result
def cross(*sequences): """ From: http://book.opensourceproject.org.cn/lamp/python/pythoncook2/opensource/0596007973/pythoncook2-chp-19-sect-9.html """ # visualize an odometer, with "wheels" displaying "digits"...: wheels = map(iter, sequences) digits = [it.next( ) for it in wheels] while True: yield tuple(digits) for i in range(len(digits)-1, -1, -1): try: digits[i] = wheels[i].next( ) break except StopIteration: wheels[i] = iter(sequences[i]) digits[i] = wheels[i].next( ) else: break
def dcross(**keywords): """ Similar to cross(), but generates output dictionaries instead of tuples. """ keys = keywords.keys() # Could use keywords.values(), but unsure whether the order # the values come out in is guaranteed to be the same as that of keys # (appears to be anecdotally true). sequences = [keywords[key] for key in keys] wheels = map(iter, sequences) digits = [it.next( ) for it in wheels] while True: yield dict(zip(keys, digits)) for i in range(len(digits)-1, -1, -1): try: digits[i] = wheels[i].next( ) break except StopIteration: wheels[i] = iter(sequences[i]) digits[i] = wheels[i].next( ) else: break
def mmGetMetricFromTrace(self, trace): """ Convenience method to compute a metric over an indices trace, excluding resets. @param (IndicesTrace) Trace of indices @return (Metric) Metric over trace excluding resets """ return Metric.createFromTrace(trace.makeCountsTrace(), excludeResets=self.mmGetTraceResets())
def mmGetMetricSequencesPredictedActiveCellsPerColumn(self): """ Metric for number of predicted => active cells per column for each sequence @return (Metric) metric """ self._mmComputeTransitionTraces() numCellsPerColumn = [] for predictedActiveCells in ( self._mmData["predictedActiveCellsForSequence"].values()): cellsForColumn = self.mapCellsToColumns(predictedActiveCells) numCellsPerColumn += [len(x) for x in cellsForColumn.values()] return Metric(self, "# predicted => active cells per column for each sequence", numCellsPerColumn)
def mmGetMetricSequencesPredictedActiveCellsShared(self): """ Metric for number of sequences each predicted => active cell appears in Note: This metric is flawed when it comes to high-order sequences. @return (Metric) metric """ self._mmComputeTransitionTraces() numSequencesForCell = defaultdict(lambda: 0) for predictedActiveCells in ( self._mmData["predictedActiveCellsForSequence"].values()): for cell in predictedActiveCells: numSequencesForCell[cell] += 1 return Metric(self, "# sequences each predicted => active cells appears in", numSequencesForCell.values())
def mmPrettyPrintConnections(self): """ Pretty print the connections in the temporal memory. TODO: Use PrettyTable. @return (string) Pretty-printed text """ text = "" text += ("Segments: (format => " "(#) [(source cell=permanence ...), ...]\n") text += "------------------------------------\n" columns = range(self.numberOfColumns()) for column in columns: cells = self.cellsForColumn(column) for cell in cells: segmentDict = dict() for seg in self.connections.segmentsForCell(cell): synapseList = [] for synapse in self.connections.synapsesForSegment(seg): synapseData = self.connections.dataForSynapse(synapse) synapseList.append( (synapseData.presynapticCell, synapseData.permanence)) synapseList.sort() synapseStringList = ["{0:3}={1:.2f}".format(sourceCell, permanence) for sourceCell, permanence in synapseList] segmentDict[seg] = "({0})".format(" ".join(synapseStringList)) text += ("Column {0:3} / Cell {1:3}:\t({2}) {3}\n".format( column, cell, len(segmentDict.values()), "[{0}]".format(", ".join(segmentDict.values())))) if column < len(columns) - 1: # not last text += "\n" text += "------------------------------------\n" return text
def mmPrettyPrintSequenceCellRepresentations(self, sortby="Column"): """ Pretty print the cell representations for sequences in the history. @param sortby (string) Column of table to sort by @return (string) Pretty-printed text """ self._mmComputeTransitionTraces() table = PrettyTable(["Pattern", "Column", "predicted=>active cells"]) for sequenceLabel, predictedActiveCells in ( self._mmData["predictedActiveCellsForSequence"].iteritems()): cellsForColumn = self.mapCellsToColumns(predictedActiveCells) for column, cells in cellsForColumn.iteritems(): table.add_row([sequenceLabel, column, list(cells)]) return table.get_string(sortby=sortby).encode("utf-8")
def createTemporalAnomaly(recordParams, spatialParams=_SP_PARAMS, temporalParams=_TM_PARAMS, verbosity=_VERBOSITY): """Generates a Network with connected RecordSensor, SP, TM. This function takes care of generating regions and the canonical links. The network has a sensor region reading data from a specified input and passing the encoded representation to an SPRegion. The SPRegion output is passed to a TMRegion. Note: this function returns a network that needs to be initialized. This allows the user to extend the network by adding further regions and connections. :param recordParams: a dict with parameters for creating RecordSensor region. :param spatialParams: a dict with parameters for creating SPRegion. :param temporalParams: a dict with parameters for creating TMRegion. :param verbosity: an integer representing how chatty the network will be. """ inputFilePath = recordParams["inputFilePath"] scalarEncoderArgs = recordParams["scalarEncoderArgs"] dateEncoderArgs = recordParams["dateEncoderArgs"] scalarEncoder = ScalarEncoder(**scalarEncoderArgs) dateEncoder = DateEncoder(**dateEncoderArgs) encoder = MultiEncoder() encoder.addEncoder(scalarEncoderArgs["name"], scalarEncoder) encoder.addEncoder(dateEncoderArgs["name"], dateEncoder) network = Network() network.addRegion("sensor", "py.RecordSensor", json.dumps({"verbosity": verbosity})) sensor = network.regions["sensor"].getSelf() sensor.encoder = encoder sensor.dataSource = FileRecordStream(streamID=inputFilePath) # Create the spatial pooler region spatialParams["inputWidth"] = sensor.encoder.getWidth() network.addRegion("spatialPoolerRegion", "py.SPRegion", json.dumps(spatialParams)) # Link the SP region to the sensor input network.link("sensor", "spatialPoolerRegion", "UniformLink", "") network.link("sensor", "spatialPoolerRegion", "UniformLink", "", srcOutput="resetOut", destInput="resetIn") network.link("spatialPoolerRegion", "sensor", "UniformLink", "", srcOutput="spatialTopDownOut", destInput="spatialTopDownIn") network.link("spatialPoolerRegion", "sensor", "UniformLink", "", srcOutput="temporalTopDownOut", destInput="temporalTopDownIn") # Add the TPRegion on top of the SPRegion network.addRegion("temporalPoolerRegion", "py.TMRegion", json.dumps(temporalParams)) network.link("spatialPoolerRegion", "temporalPoolerRegion", "UniformLink", "") network.link("temporalPoolerRegion", "spatialPoolerRegion", "UniformLink", "", srcOutput="topDownOut", destInput="topDownIn") spatialPoolerRegion = network.regions["spatialPoolerRegion"] # Make sure learning is enabled spatialPoolerRegion.setParameter("learningMode", True) # We want temporal anomalies so disable anomalyMode in the SP. This mode is # used for computing anomalies in a non-temporal model. spatialPoolerRegion.setParameter("anomalyMode", False) temporalPoolerRegion = network.regions["temporalPoolerRegion"] # Enable topDownMode to get the predicted columns output temporalPoolerRegion.setParameter("topDownMode", True) # Make sure learning is enabled (this is the default) temporalPoolerRegion.setParameter("learningMode", True) # Enable inference mode so we get predictions temporalPoolerRegion.setParameter("inferenceMode", True) # Enable anomalyMode to compute the anomaly score. temporalPoolerRegion.setParameter("anomalyMode", True) return network
def runNetwork(network, writer): """Run the network and write output to writer. :param network: a Network instance to run :param writer: a csv.writer instance to write output to """ sensorRegion = network.regions["sensor"] temporalPoolerRegion = network.regions["temporalPoolerRegion"] for i in xrange(_NUM_RECORDS): # Run the network for a single iteration network.run(1) # Write out the anomaly score along with the record number and consumption # value. anomalyScore = temporalPoolerRegion.getOutputData("anomalyScore")[0] consumption = sensorRegion.getOutputData("sourceOut")[0] writer.writerow((i, consumption, anomalyScore))
def __appendActivities(self, periodicActivities): """ periodicActivities: A sequence of PeriodicActivityRequest elements """ for req in periodicActivities: act = self.Activity(repeating=req.repeating, period=req.period, cb=req.cb, iteratorHolder=[iter(xrange(req.period-1))]) self.__activities.append(act) return