desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Change the values of 1 or more fields in a job. Here, \'fields\' is a dict with the name/value pairs to change. The names are the public names of the fields (camelBack, not the lower_case_only form as stored in the DB). This method is for private use by the ClientJobManager only. Parameters: jobID: jobID of the jo...
@logExceptions(_LOGGER) @g_retrySQL def jobSetFields(self, jobID, fields, useConnectionID=True, ignoreUnchanged=False):
assignmentExpressions = ','.join([('%s=%%s' % (self._jobs.pubToDBNameDict[f],)) for f in fields.iterkeys()]) assignmentValues = fields.values() query = ('UPDATE %s SET %s WHERE job_id=%%s' % (self.jobsTableName, assignmentExpressions)) sqlParams = (...
'Change the value of 1 field in a job to \'newValue\', but only if the current value matches \'curValue\'. The \'fieldName\' is the public name of the field (camelBack, not the lower_case_only form as stored in the DB). This method is used for example by HypersearcWorkers to update the engWorkerState field periodically...
@logExceptions(_LOGGER) @g_retrySQL def jobSetFieldIfEqual(self, jobID, fieldName, newValue, curValue):
dbFieldName = self._jobs.pubToDBNameDict[fieldName] conditionValue = [] if isinstance(curValue, bool): conditionExpression = ('%s IS %s' % (dbFieldName, {True: 'TRUE', False: 'FALSE'}[curValue])) elif (curValue is None): conditionExpression = ('%s is NULL' % (dbFieldName,)) ...
'Incremet the value of 1 field in a job by increment. The \'fieldName\' is the public name of the field (camelBack, not the lower_case_only form as stored in the DB). This method is used for example by HypersearcWorkers to update the engWorkerState field periodically. By qualifying on curValue, it insures that only 1 w...
@logExceptions(_LOGGER) @g_retrySQL def jobIncrementIntField(self, jobID, fieldName, increment=1, useConnectionID=False):
dbFieldName = self._jobs.pubToDBNameDict[fieldName] with ConnectionFactory.get() as conn: query = ('UPDATE %s SET %s=%s+%%s WHERE job_id=%%s' % (self.jobsTableName, dbFieldName, dbFieldName)) sqlParams = [increment, jobID] if useConn...
'Update the results string and last-update-time fields of a model. Parameters: jobID: job ID of model to modify results: new results (json dict string)'
@logExceptions(_LOGGER) @g_retrySQL def jobUpdateResults(self, jobID, results):
with ConnectionFactory.get() as conn: query = ('UPDATE %s SET _eng_last_update_time=UTC_TIMESTAMP(), results=%%s WHERE job_id=%%s' % (self.jobsTableName,)) conn.cursor.execute(query, [re...
'Delete all models from the models table Parameters:'
@logExceptions(_LOGGER) @g_retrySQL def modelsClearAll(self):
self._logger.info('Deleting all rows from models table %r', self.modelsTableName) with ConnectionFactory.get() as conn: query = ('DELETE FROM %s' % self.modelsTableName) conn.cursor.execute(query)
'Insert a new unique model (based on params) into the model table in the "running" state. This will return two things: whether or not the model was actually inserted (i.e. that set of params isn\'t already in the table) and the modelID chosen for that set of params. Even if the model was not inserted by this call (it w...
@logExceptions(_LOGGER) def modelInsertAndStart(self, jobID, params, paramsHash, particleHash=None):
if (particleHash is None): particleHash = paramsHash paramsHash = self._normalizeHash(paramsHash) particleHash = self._normalizeHash(particleHash) def findExactMatchNoRetries(conn): return self._getOneMatchingRowNoRetries(self._models, conn, {'job_id': jobID, '_eng_params_hash': paramsHa...
'Get ALL info for a set of models WARNING!!!: The order of the results are NOT necessarily in the same order as the order of the model IDs passed in!!! Parameters: modelIDs: list of model IDs retval: list of nametuples containing all the fields stored for each model.'
@logExceptions(_LOGGER) def modelsInfo(self, modelIDs):
assert isinstance(modelIDs, self._SEQUENCE_TYPES), ('wrong modelIDs type: %s' % (type(modelIDs),)) assert modelIDs, 'modelIDs is empty' rows = self._getMatchingRowsWithRetries(self._models, dict(model_id=modelIDs), [self._models.pubToDBNameDict[f] for f in self._models.modelInfoNamedTuple._fi...
'Fetch the values of 1 or more fields from a sequence of model records. Here, \'fields\' is a list with the names of the fields to fetch. The names are the public names of the fields (camelBack, not the lower_case_only form as stored in the DB). WARNING!!!: The order of the results are NOT necessarily in the same order...
@logExceptions(_LOGGER) def modelsGetFields(self, modelIDs, fields):
assert (len(fields) >= 1), 'fields is empty' isSequence = isinstance(modelIDs, self._SEQUENCE_TYPES) if isSequence: assert (len(modelIDs) >= 1), 'modelIDs is empty' else: modelIDs = [modelIDs] rows = self._getMatchingRowsWithRetries(self._models, dict(model_id=modelIDs), ...
'Gets the specified fields for all the models for a single job. This is similar to modelsGetFields Parameters: jobID: jobID for the models to be searched fields: A list of fields to return ignoreKilled: (True/False). If True, this will ignore models that have been killed Returns: a (poss...
@logExceptions(_LOGGER) @g_retrySQL def modelsGetFieldsForJob(self, jobID, fields, ignoreKilled=False):
assert (len(fields) >= 1), 'fields is empty' dbFields = [self._models.pubToDBNameDict[x] for x in fields] dbFieldsStr = ','.join(dbFields) query = ('SELECT model_id, %s FROM %s WHERE job_id=%%s ' % (dbFieldsStr, self.modelsTableName)) ...
'Gets fields from all models in a job that have been checkpointed. This is used to figure out whether or not a new model should be checkpointed. Parameters: jobID: The jobID for the models to be searched fields: A list of fields to return Returns: a (possibly-empty) list of tuples a...
@logExceptions(_LOGGER) @g_retrySQL def modelsGetFieldsForCheckpointed(self, jobID, fields):
assert (len(fields) >= 1), 'fields is empty' with ConnectionFactory.get() as conn: dbFields = [self._models.pubToDBNameDict[f] for f in fields] dbFieldStr = ', '.join(dbFields) query = 'SELECT model_id, {fields} from {models} WHERE job_id=%s AND m...
'Change the values of 1 or more fields in a model. Here, \'fields\' is a dict with the name/value pairs to change. The names are the public names of the fields (camelBack, not the lower_case_only form as stored in the DB). Parameters: jobID: jobID of the job record fields: dictionary of fields to change ignoreUn...
@logExceptions(_LOGGER) @g_retrySQL def modelSetFields(self, modelID, fields, ignoreUnchanged=False):
assignmentExpressions = ','.join((('%s=%%s' % (self._models.pubToDBNameDict[f],)) for f in fields.iterkeys())) assignmentValues = fields.values() query = ('UPDATE %s SET %s, update_counter = update_counter+1 WHERE model_id=%%s' % (self.modelsTa...
'Get the params and paramsHash for a set of models. WARNING!!!: The order of the results are NOT necessarily in the same order as the order of the model IDs passed in!!! Parameters: modelIDs: list of model IDs retval: list of result namedtuples defined in ClientJobsDAO._models.getParamsNamedTuple. Each tuple co...
@logExceptions(_LOGGER) def modelsGetParams(self, modelIDs):
assert isinstance(modelIDs, self._SEQUENCE_TYPES), ('Wrong modelIDs type: %r' % (type(modelIDs),)) assert (len(modelIDs) >= 1), 'modelIDs is empty' rows = self._getMatchingRowsWithRetries(self._models, {'model_id': modelIDs}, [self._models.pubToDBNameDict[f] for f in self._models.getParamsNam...
'Get the results string and other status fields for a set of models. WARNING!!!: The order of the results are NOT necessarily in the same order as the order of the model IDs passed in!!! For each model, this returns a tuple containing: (modelID, results, status, updateCounter, numRecords, completionReason, completionMs...
@logExceptions(_LOGGER) def modelsGetResultAndStatus(self, modelIDs):
assert isinstance(modelIDs, self._SEQUENCE_TYPES), ('Wrong modelIDs type: %r' % type(modelIDs)) assert (len(modelIDs) >= 1), 'modelIDs is empty' rows = self._getMatchingRowsWithRetries(self._models, {'model_id': modelIDs}, [self._models.pubToDBNameDict[f] for f in self._models.getResultAndSta...
'Return info on all of the models that are in already in the models table for a given job. For each model, this returns a tuple containing: (modelID, updateCounter). Note that we don\'t return the results for all models, since the results string could be quite large. The information we are returning is just 2 integer f...
@logExceptions(_LOGGER) def modelsGetUpdateCounters(self, jobID):
rows = self._getMatchingRowsWithRetries(self._models, {'job_id': jobID}, [self._models.pubToDBNameDict[f] for f in self._models.getUpdateCountersNamedTuple._fields]) return [self._models.getUpdateCountersNamedTuple._make(r) for r in rows]
'Update the results string, and/or num_records fields of a model. This will fail if the model does not currently belong to this client (connection_id doesn\'t match). Parameters: modelID: model ID of model to modify results: new results, or None to ignore metricValue: the value of the metric being optimized,...
@logExceptions(_LOGGER) @g_retrySQL def modelUpdateResults(self, modelID, results=None, metricValue=None, numRecords=None):
assignmentExpressions = ['_eng_last_update_time=UTC_TIMESTAMP()', 'update_counter=update_counter+1'] assignmentValues = [] if (results is not None): assignmentExpressions.append('results=%s') assignmentValues.append(results) if (numRecords is not None): assignmentExpressions.appe...
'Mark a model as completed, with the given completionReason and completionMsg. This will fail if the model does not currently belong to this client (connection_id doesn\'t match). Parameters: modelID: model ID of model to modify completionReason: completionReason string completionMsg: completionMsg...
@logExceptions(_LOGGER) @g_retrySQL def modelSetCompleted(self, modelID, completionReason, completionMsg, cpuTime=0, useConnectionID=True):
if (completionMsg is None): completionMsg = '' query = ('UPDATE %s SET status=%%s, completion_reason=%%s, completion_msg=%%s, end_time=UTC_TIM...
'Look through the models table for an orphaned model, which is a model that is not completed yet, whose _eng_last_update_time is more than maxUpdateInterval seconds ago. If one is found, change its _eng_worker_conn_id to the current worker\'s and return the model id. Parameters: retval: modelId of the model we adopt...
@logExceptions(_LOGGER) def modelAdoptNextOrphan(self, jobId, maxUpdateInterval):
@g_retrySQL def findCandidateModelWithRetries(): modelID = None with ConnectionFactory.get() as conn: query = ('SELECT model_id FROM %s WHERE status=%%s AND job_id=%%s ...
'Acquire a ConnectionWrapper instance that represents a connection to the SQL server per nupic.cluster.database.* configuration settings. NOTE: caller is responsible for calling the ConnectionWrapper instance\'s release() method after using the connection in order to release resources. Better yet, use the returned Conn...
@classmethod def get(cls):
if (cls._connectionPolicy is None): logger = _getLogger(cls) logger.info('Creating db connection policy via provider %r', cls._connectionPolicyInstanceProvider) cls._connectionPolicy = cls._connectionPolicyInstanceProvider() logger.debug('Created connection po...
'Close ConnectionFactory\'s connection policy. Typically, there is no need to call this method as the system will automatically close the connections when the process exits. NOTE: This method should be used with CAUTION. It is designed to be called ONLY by the code responsible for startup and shutdown of the process si...
@classmethod def close(cls):
if (cls._connectionPolicy is not None): cls._connectionPolicy.close() cls._connectionPolicy = None return
'Set the method for ConnectionFactory to use when it needs to instantiate its database connection policy. NOTE: This method should be used with CAUTION. ConnectionFactory\'s default behavior should be adequate for all NuPIC code, and this method is provided primarily for diagnostics. It is designed to only be called by...
@classmethod def setConnectionPolicyProvider(cls, provider):
cls._connectionPolicyInstanceProvider = provider return
'[private] Create the default database connection policy instance Parameters: retval: The default database connection policy instance'
@classmethod def _createDefaultPolicy(cls):
logger = _getLogger(cls) logger.debug('Creating database connection policy: platform=%r; pymysql.VERSION=%r', platform.system(), pymysql.VERSION) if (platform.system() == 'Java'): policy = SingleSharedConnectionPolicy() else: policy = PooledConnectionPolicy() return po...
'Parameters: dbConn: the underlying database connection instance cursor: database cursor releaser: a method to call to release the connection and cursor; method signature: None dbConnReleaser(dbConn, cursor)'
def __init__(self, dbConn, cursor, releaser, logger):
global g_max_concurrency try: self._logger = logger self.dbConn = dbConn ' database connection instance ' self.cursor = cursor " Public cursor instance. Don't close it directly: Connection.release()\n will ...
'[Context Manager protocol method] Permit a ConnectionWrapper instance to be used in a context manager expression (with ... as:) to facilitate robust release of resources (instead of try:/finally:/release()). See examples in ConnectionFactory docstring.'
def __enter__(self):
return self
'[Context Manager protocol method] Release resources.'
def __exit__(self, exc_type, exc_val, exc_tb):
self.release() return False
'Release the database connection and cursor The receiver of the Connection instance MUST call this method in order to reclaim resources'
def release(self):
self._logger.debug('Releasing: %r', self) if self._addedToInstanceSet: try: self._clsOutstandingInstances.remove(self) except: self._logger.exception('Failed to remove self from _clsOutstandingInstances: %r;', self) raise self._release...
'Check for concurrency violation and add self to _clsOutstandingInstances. ASSUMPTION: Called from constructor BEFORE _clsNumOutstanding is incremented'
def _trackInstanceAndCheckForConcurrencyViolation(self):
global g_max_concurrency, g_max_concurrency_raise_exception assert (g_max_concurrency is not None) assert (self not in self._clsOutstandingInstances), repr(self) self._creationTracebackString = traceback.format_stack() if (self._clsNumOutstanding >= g_max_concurrency): errorMsg = ('With n...
'Close the policy instance and its shared database connection.'
def close(self):
raise NotImplementedError()
'Get a Connection instance. Parameters: retval: A ConnectionWrapper instance. Caller is responsible for calling the ConnectionWrapper instance\'s release() method to release resources.'
def acquireConnection(self):
raise NotImplementedError()
'Consruct an instance. The instance\'s open() method must be called to make it ready for acquireConnection() calls.'
def __init__(self):
self._logger = _getLogger(self.__class__) self._conn = SteadyDB.connect(**_getCommonSteadyDBArgsDict()) self._logger.debug('Created %s', self.__class__.__name__) return
'Close the policy instance and its shared database connection.'
def close(self):
self._logger.info('Closing') if (self._conn is not None): self._conn.close() self._conn = None else: self._logger.warning('close() called, but connection policy was alredy closed') return
'Get a Connection instance. Parameters: retval: A ConnectionWrapper instance. NOTE: Caller is responsible for calling the ConnectionWrapper instance\'s release() method or use it in a context manager expression (with ... as:) to release resources.'
def acquireConnection(self):
self._logger.debug('Acquiring connection') self._conn._ping_check() connWrap = ConnectionWrapper(dbConn=self._conn, cursor=self._conn.cursor(), releaser=self._releaseConnection, logger=self._logger) return connWrap
'Release database connection and cursor; passed as a callback to ConnectionWrapper'
def _releaseConnection(self, dbConn, cursor):
self._logger.debug('Releasing connection') cursor.close() return
'Consruct an instance. The instance\'s open() method must be called to make it ready for acquireConnection() calls.'
def __init__(self):
self._logger = _getLogger(self.__class__) self._logger.debug('Opening') self._pool = PooledDB(**_getCommonSteadyDBArgsDict()) self._logger.info('Created %s', self.__class__.__name__) return
'Close the policy instance and its database connection pool.'
def close(self):
self._logger.info('Closing') if (self._pool is not None): self._pool.close() self._pool = None else: self._logger.warning('close() called, but connection policy was alredy closed') return
'Get a connection from the pool. Parameters: retval: A ConnectionWrapper instance. NOTE: Caller is responsible for calling the ConnectionWrapper instance\'s release() method or use it in a context manager expression (with ... as:) to release resources.'
def acquireConnection(self):
self._logger.debug('Acquiring connection') dbConn = self._pool.connection(shareable=False) connWrap = ConnectionWrapper(dbConn=dbConn, cursor=dbConn.cursor(), releaser=self._releaseConnection, logger=self._logger) return connWrap
'Release database connection and cursor; passed as a callback to ConnectionWrapper'
def _releaseConnection(self, dbConn, cursor):
self._logger.debug('Releasing connection') cursor.close() dbConn.close() return
'Consruct an instance. The instance\'s open() method must be called to make it ready for acquireConnection() calls.'
def __init__(self):
self._logger = _getLogger(self.__class__) self._opened = True self._logger.info('Created %s', self.__class__.__name__) return
'Close the policy instance.'
def close(self):
self._logger.info('Closing') if self._opened: self._opened = False else: self._logger.warning('close() called, but connection policy was alredy closed') return
'Create a Connection instance. Parameters: retval: A ConnectionWrapper instance. NOTE: Caller is responsible for calling the ConnectionWrapper instance\'s release() method or use it in a context manager expression (with ... as:) to release resources.'
def acquireConnection(self):
self._logger.debug('Acquiring connection') dbConn = SteadyDB.connect(**_getCommonSteadyDBArgsDict()) connWrap = ConnectionWrapper(dbConn=dbConn, cursor=dbConn.cursor(), releaser=self._releaseConnection, logger=self._logger) return connWrap
'Release database connection and cursor; passed as a callback to ConnectionWrapper'
def _releaseConnection(self, dbConn, cursor):
self._logger.debug('Releasing connection') cursor.close() dbConn.close() return
'n is the total bits in input w is the number of bits used to encode each input bit'
def __init__(self, n, w=None, name='sparse_pass_through', forced=False, verbosity=0):
super(SparsePassThroughEncoder, self).__init__(n, w, name, forced, verbosity)
'See method description in base.py'
def encodeIntoArray(self, value, output):
denseInput = numpy.zeros(output.shape) try: denseInput[value] = 1 except IndexError: if isinstance(value, numpy.ndarray): raise ValueError('Numpy array must have integer dtype but got {}'.format(value.dtype)) raise super(SparsePassThroughEncode...
'[ScalarEncoder class method override]'
def __init__(self, w, minval=None, maxval=None, periodic=False, n=0, radius=0, resolution=0, name=None, verbosity=0, clipInput=True, forced=False):
self._learningEnabled = True self._stateLock = False self.width = 0 self.encoders = None self.description = [] self.name = name if periodic: raise Exception('Delta encoder does not encode periodic inputs') assert (n != 0) self._adaptiveScalarEnc = AdaptiveSc...
'[ScalarEncoder class method override]'
def topDownCompute(self, encoded):
if ((self._prevAbsolute == None) or (self._prevDelta == None)): return [EncoderResult(value=0, scalar=0, encoding=numpy.zeros(self.n))] ret = self._adaptiveScalarEnc.topDownCompute(encoded) if (self._prevAbsolute != None): ret = [EncoderResult(value=(ret[0].value + self._prevAbsolute), scala...
'Encoder class virtual method override'
def getDecoderOutputFieldTypes(self):
return (FieldMetaType.float,)
'Convert the input, which is in normal space, into log space'
def _getScaledValue(self, inpt):
if (inpt == SENTINEL_VALUE_FOR_MISSING_DATA): return None else: val = inpt if (val < self.minval): val = self.minval elif (val > self.maxval): val = self.maxval scaledVal = math.log10(val) return scaledVal
'See the function description in base.py'
def getBucketIndices(self, inpt):
scaledVal = self._getScaledValue(inpt) if (scaledVal is None): return [None] else: return self.encoder.getBucketIndices(scaledVal)
'See the function description in base.py'
def encodeIntoArray(self, inpt, output):
scaledVal = self._getScaledValue(inpt) if (scaledVal is None): output[0:] = 0 else: self.encoder.encodeIntoArray(scaledVal, output) if (self.verbosity >= 2): print 'input:', inpt, 'scaledVal:', scaledVal, 'output:', output print 'decoded:', self.decodedToStr(s...
'See the function description in base.py'
def decode(self, encoded, parentFieldName=''):
(fieldsDict, fieldNames) = self.encoder.decode(encoded) if (len(fieldsDict) == 0): return (fieldsDict, fieldNames) assert (len(fieldsDict) == 1) (inRanges, inDesc) = fieldsDict.values()[0] outRanges = [] for (minV, maxV) in inRanges: outRanges.append((math.pow(10, minV), math.pow...
'See the function description in base.py'
def getBucketValues(self):
if (self._bucketValues is None): scaledValues = self.encoder.getBucketValues() self._bucketValues = [] for scaledValue in scaledValues: value = math.pow(10, scaledValue) self._bucketValues.append(value) return self._bucketValues
'See the function description in base.py'
def getBucketInfo(self, buckets):
scaledResult = self.encoder.getBucketInfo(buckets)[0] scaledValue = scaledResult.value value = math.pow(10, scaledValue) return [EncoderResult(value=value, scalar=value, encoding=scaledResult.encoding)]
'See the function description in base.py'
def topDownCompute(self, encoded):
scaledResult = self.encoder.topDownCompute(encoded)[0] scaledValue = scaledResult.value value = math.pow(10, scaledValue) return EncoderResult(value=value, scalar=value, encoding=scaledResult.encoding)
'See the function description in base.py'
def closenessScores(self, expValues, actValues, fractional=True):
if (expValues[0] > 0): expValue = math.log10(expValues[0]) else: expValue = self.minScaledValue if (actValues[0] > 0): actValue = math.log10(actValues[0]) else: actValue = self.minScaledValue if fractional: err = abs((expValue - actValue)) pctErr = (er...
'See `nupic.encoders.base.Encoder` for more information.'
def getDescription(self):
return [('speed', 0), ('longitude', 1), ('latitude', 2), ('altitude', 3)]
'See `nupic.encoders.base.Encoder` for more information.'
def getScalars(self, inputData):
return numpy.array(([0] * len(self.getDescription())))
'See `nupic.encoders.base.Encoder` for more information. :param: inputData (tuple) Contains speed (float), longitude (float), latitude (float), altitude (float) :param: output (numpy.array) Stores encoded SDR in this numpy array'
def encodeIntoArray(self, inputData, output):
altitude = None if (len(inputData) == 4): (speed, longitude, latitude, altitude) = inputData else: (speed, longitude, latitude) = inputData coordinate = self.coordinateForPosition(longitude, latitude, altitude) radius = self.radiusForSpeed(speed) super(GeospatialCoordinateEncoder...
'Returns coordinate for given GPS position. :param: longitude (float) Longitude of position :param: latitude (float) Latitude of position :param: altitude (float) Altitude of position :returns: (numpy.array) Coordinate that the given GPS position maps to'
def coordinateForPosition(self, longitude, latitude, altitude=None):
coords = PROJ(longitude, latitude) if (altitude is not None): coords = transform(PROJ, geocentric, coords[0], coords[1], altitude) coordinate = numpy.array(coords) coordinate = (coordinate / self.scale) return coordinate.astype(int)
'Returns radius for given speed. Tries to get the encodings of consecutive readings to be adjacent with some overlap. :param: speed (float) Speed (in meters per second) :returns: (int) Radius for given speed'
def radiusForSpeed(self, speed):
overlap = 1.5 coordinatesPerTimestep = ((speed * self.timestep) / self.scale) radius = int(round(((float(coordinatesPerTimestep) / 2) * overlap))) minRadius = int(math.ceil(((math.sqrt(self.w) - 1) / 2))) return max(radius, minRadius)
'Should return the output width, in bits. :return: (int) output width in bits'
def getWidth(self):
raise NotImplementedError()
'Encodes inputData and puts the encoded value into the numpy output array, which is a 1-D array of length returned by :meth:`.getWidth`. .. note:: The numpy output array is reused, so clear it before updating it. :param inputData: Data to encode. This should be validated by the encoder. :param output: numpy 1-D array o...
def encodeIntoArray(self, inputData, output):
raise NotImplementedError()
'Set whether learning is enabled. :param learningEnabled: (bool) whether learning should be enabled'
def setLearning(self, learningEnabled):
if hasattr(self, '_learningEnabled'): self._learningEnabled = learningEnabled
'This method is called by the model to set the statistics like min and max for the underlying encoders if this information is available. :param fieldName: name of the field this encoder is encoding, provided by :class:`~.nupic.encoders.multi.MultiEncoder`. :param fieldStatistics: dictionary of dictionaries with the fir...
def setFieldStats(self, fieldName, fieldStatistics):
pass
'Convenience wrapper for :meth:`.encodeIntoArray`. This may be less efficient because it allocates a new numpy array every call. :param inputData: input data to be encoded :return: a numpy array with the encoded representation of inputData'
def encode(self, inputData):
output = numpy.zeros((self.getWidth(),), dtype=defaultDtype) self.encodeIntoArray(inputData, output) return output
'Return the field names for each of the scalar values returned by getScalars. :param parentFieldName: The name of the encoder which is our parent. This name is prefixed to each of the field names within this encoder to form the keys of the dict() in the retval. :return: array of field names'
def getScalarNames(self, parentFieldName=''):
names = [] if (self.encoders is not None): for (name, encoder, offset) in self.encoders: subNames = encoder.getScalarNames(parentFieldName=name) if (parentFieldName != ''): subNames = [('%s.%s' % (parentFieldName, name)) for name in subNames] names.ext...
'Returns a sequence of field types corresponding to the elements in the decoded output field array. The types are defined by :class:`~nupic.data.field_meta.FieldMetaType`. :return: list of :class:`~nupic.data.field_meta.FieldMetaType` objects'
def getDecoderOutputFieldTypes(self):
if (hasattr(self, '_flattenedFieldTypeList') and (self._flattenedFieldTypeList is not None)): return self._flattenedFieldTypeList fieldTypes = [] for (name, encoder, offset) in self.encoders: subTypes = encoder.getDecoderOutputFieldTypes() fieldTypes.extend(subTypes) self._flatte...
'Setting this to true freezes the state of the encoder This is separate from the learning state which affects changing parameters. Implemented in subclasses.'
def setStateLock(self, lock):
pass
'Gets the value of a given field from the input record'
def _getInputValue(self, obj, fieldName):
if isinstance(obj, dict): if (not (fieldName in obj)): knownFields = ', '.join((key for key in obj.keys() if (not key.startswith('_')))) raise ValueError(("Unknown field name '%s' in input record. Known fields are '%s'.\nThis could be because...
':return: a reference to each sub-encoder in this encoder. They are returned in the same order as they are for :meth:`.getScalarNames` and :meth:`.getScalars`.'
def getEncoderList(self):
if (hasattr(self, '_flattenedEncoderList') and (self._flattenedEncoderList is not None)): return self._flattenedEncoderList encoders = [] if (self.encoders is not None): for (name, encoder, offset) in self.encoders: subEncoders = encoder.getEncoderList() encoders.exte...
'Returns a numpy array containing the sub-field scalar value(s) for each sub-field of the ``inputData``. To get the associated field names for each of the scalar values, call :meth:`.getScalarNames()`. For a simple scalar encoder, the scalar value is simply the input unmodified. For category encoders, it is the scalar ...
def getScalars(self, inputData):
retVals = numpy.array([]) if (self.encoders is not None): for (name, encoder, offset) in self.encoders: values = encoder.getScalars(self._getInputValue(inputData, name)) retVals = numpy.hstack((retVals, values)) else: retVals = numpy.hstack((retVals, inputData)) r...
'Returns the input in the same format as is returned by :meth:`.topDownCompute`. For most encoder types, this is the same as the input data. For instance, for scalar and category types, this corresponds to the numeric and string values, respectively, from the inputs. For datetime encoders, this returns the list of scal...
def getEncodedValues(self, inputData):
retVals = [] if (self.encoders is not None): for (name, encoders, offset) in self.encoders: values = encoders.getEncodedValues(self._getInputValue(inputData, name)) if _isSequence(values): retVals.extend(values) else: retVals.append(val...
'Returns an array containing the sub-field bucket indices for each sub-field of the inputData. To get the associated field names for each of the buckets, call :meth:`.getScalarNames`. :param inputData: The data from the source. This is typically an object with members. :return: array of bucket indices'
def getBucketIndices(self, inputData):
retVals = [] if (self.encoders is not None): for (name, encoder, offset) in self.encoders: values = encoder.getBucketIndices(self._getInputValue(inputData, name)) retVals.extend(values) else: assert False, 'Should be implemented in base classes that ...
'Return a pretty print string representing the return values from :meth:`.getScalars` and :meth:`.getScalarNames`. :param scalarValues: input values to encode to string :param scalarNames: optional input of scalar names to convert. If None, gets scalar names from :meth:`.getScalarNames` :return: string representation o...
def scalarsToStr(self, scalarValues, scalarNames=None):
if (scalarNames is None): scalarNames = self.getScalarNames() desc = '' for (name, value) in zip(scalarNames, scalarValues): if (len(desc) > 0): desc += (', %s:%.2f' % (name, value)) else: desc += ('%s:%.2f' % (name, value)) return desc
'**Must be overridden by subclasses.** This returns a list of tuples, each containing (``name``, ``offset``). The ``name`` is a string description of each sub-field, and ``offset`` is the bit offset of the sub-field for that encoder. For now, only the \'multi\' and \'date\' encoders have multiple (name, offset) pairs. ...
def getDescription(self):
raise Exception('getDescription must be implemented by all subclasses')
'Return the offset and length of a given field within the encoded output. :param fieldName: Name of the field :return: tuple(``offset``, ``width``) of the field within the encoded output'
def getFieldDescription(self, fieldName):
description = (self.getDescription() + [('end', self.getWidth())]) for i in xrange(len(description)): (name, offset) = description[i] if (name == fieldName): break if (i >= (len(description) - 1)): raise RuntimeError(('Field name %s not found in this ...
'Return a description of the given bit in the encoded output. This will include the field name and the offset within the field. :param bitOffset: Offset of the bit to get the description of :param formatted: If True, the bitOffset is w.r.t. formatted output, which includes separators :return: tupl...
def encodedBitDescription(self, bitOffset, formatted=False):
(prevFieldName, prevFieldOffset) = (None, None) description = self.getDescription() for i in xrange(len(description)): (name, offset) = description[i] if formatted: offset = (offset + i) if (bitOffset == (offset - 1)): prevFieldName = 'separator' ...
'Pretty-print a header that labels the sub-fields of the encoded output. This can be used in conjuction with :meth:`.pprint`. :param prefix: printed before the header if specified'
def pprintHeader(self, prefix=''):
print prefix, description = (self.getDescription() + [('end', self.getWidth())]) for i in xrange((len(description) - 1)): name = description[i][0] width = (description[(i + 1)][1] - description[i][1]) formatStr = ('%%-%ds |' % width) if (len(name) > width): pna...
'Pretty-print the encoded output using ascii art. :param output: to print :param prefix: printed before the header if specified'
def pprint(self, output, prefix=''):
print prefix, description = (self.getDescription() + [('end', self.getWidth())]) for i in xrange((len(description) - 1)): offset = description[i][1] nextoffset = description[(i + 1)][1] print ('%s |' % bitsToString(output[offset:nextoffset])), print
'Takes an encoded output and does its best to work backwards and generate the input that would have generated it. In cases where the encoded output contains more ON bits than an input would have generated, this routine will return one or more ranges of inputs which, if their encoded outputs were ORed together, would pr...
def decode(self, encoded, parentFieldName=''):
fieldsDict = dict() fieldsOrder = [] if (parentFieldName == ''): parentName = self.name else: parentName = ('%s.%s' % (parentFieldName, self.name)) if (self.encoders is not None): for i in xrange(len(self.encoders)): (name, encoder, offset) = self.encoders[i] ...
'Return a pretty print string representing the return value from :meth:`.decode`.'
def decodedToStr(self, decodeResults):
(fieldsDict, fieldsOrder) = decodeResults desc = '' for fieldName in fieldsOrder: (ranges, rangesStr) = fieldsDict[fieldName] if (len(desc) > 0): desc += (', %s:' % fieldName) else: desc += ('%s:' % fieldName) desc += ('[%s]' % rangesStr) return...
'**Must be overridden by subclasses.** Returns a list of items, one for each bucket defined by this encoder. Each item is the value assigned to that bucket, this is the same as the :attr:`.EncoderResult.value` that would be returned by :meth:`.getBucketInfo` for that bucket and is in the same format as the input that w...
def getBucketValues(self):
raise Exception('getBucketValues must be implemented by all subclasses')
'Returns a list of :class:`.EncoderResult` namedtuples describing the inputs for each sub-field that correspond to the bucket indices passed in ``buckets``. To get the associated field names for each of the values, call :meth:`.getScalarNames`. :param buckets: The list of bucket indices, one for each sub-field encoder....
def getBucketInfo(self, buckets):
if (self.encoders is None): raise RuntimeError('Must be implemented in sub-class') retVals = [] bucketOffset = 0 for i in xrange(len(self.encoders)): (name, encoder, offset) = self.encoders[i] if (encoder.encoders is not None): nextBucketOffset = (bucketOf...
'Returns a list of :class:`.EncoderResult` namedtuples describing the top-down best guess inputs for each sub-field given the encoded output. These are the values which are most likely to generate the given encoded output. To get the associated field names for each of the values, call :meth:`.getScalarNames`. :param en...
def topDownCompute(self, encoded):
if (self.encoders is None): raise RuntimeError('Must be implemented in sub-class') retVals = [] for i in xrange(len(self.encoders)): (name, encoder, offset) = self.encoders[i] if (i < (len(self.encoders) - 1)): nextOffset = self.encoders[(i + 1)][2] el...
'Compute closeness scores between the expected scalar value(s) and actual scalar value(s). The expected scalar values are typically those obtained from the :meth:`.getScalars` method. The actual scalar values are typically those returned from :meth:`.topDownCompute`. This method returns one closeness score for each val...
def closenessScores(self, expValues, actValues, fractional=True):
if (self.encoders is None): err = abs((expValues[0] - actValues[0])) if fractional: denom = max(expValues[0], actValues[0]) if (denom == 0): denom = 1.0 closeness = (1.0 - (float(err) / denom)) if (closeness < 0): closen...
'Calculate width of display for bits plus blanks between fields. :return: (int) width of display for bits plus blanks between fields'
def getDisplayWidth(self):
width = ((self.getWidth() + len(self.getDescription())) - 1) return width
'[Encoder class virtual method override]'
def getDecoderOutputFieldTypes(self):
return (FieldMetaType.string,)
'See method description in base.py'
def getScalars(self, input):
return numpy.array([0])
'See method description in base.py'
def getBucketIndices(self, input):
return [0]
'See method description in base.py'
def encodeIntoArray(self, inputVal, outputVal):
if (len(inputVal) != len(outputVal)): raise ValueError(('Different input (%i) and output (%i) sizes.' % (len(inputVal), len(outputVal)))) if ((self.w is not None) and (sum(inputVal) != self.w)): raise ValueError(('Input has %i bits but w was set to %i...
'See the function description in base.py'
def decode(self, encoded, parentFieldName=''):
if (parentFieldName != ''): fieldName = ('%s.%s' % (parentFieldName, self.name)) else: fieldName = self.name return ({fieldName: ([[0, 0]], 'input')}, [fieldName])
'See the function description in base.py'
def getBucketInfo(self, buckets):
return [EncoderResult(value=0, scalar=0, encoding=numpy.zeros(self.n))]
'See the function description in base.py'
def topDownCompute(self, encoded):
return EncoderResult(value=0, scalar=0, encoding=numpy.zeros(self.n))
'Does a bitwise compare of the two bitmaps and returns a fractonal value between 0 and 1 of how similar they are. - ``1`` => identical - ``0`` => no overlaping bits ``kwargs`` will have the keyword "fractional", which is assumed by this encoder.'
def closenessScores(self, expValues, actValues, **kwargs):
ratio = 1.0 esum = int(expValues.sum()) asum = int(actValues.sum()) if (asum > esum): diff = (asum - esum) if (diff < esum): ratio = (1 - (diff / float(esum))) else: ratio = (1 / float(diff)) olap = (expValues & actValues) osum = int(olap.sum()) ...
'[Encoder class virtual method override]'
def getDecoderOutputFieldTypes(self):
return (FieldMetaType.integer,)
'See method description in base.py'
def getScalars(self, input):
if (input == SENTINEL_VALUE_FOR_MISSING_DATA): return numpy.array([None]) else: return numpy.array([self.categoryToIndex.get(input, 0)])
'See method description in base.py'
def getBucketIndices(self, input):
if (input == SENTINEL_VALUE_FOR_MISSING_DATA): return [None] else: return self.encoder.getBucketIndices(self.categoryToIndex.get(input, 0))
'See the function description in base.py'
def decode(self, encoded, parentFieldName=''):
(fieldsDict, fieldNames) = self.encoder.decode(encoded) if (len(fieldsDict) == 0): return (fieldsDict, fieldNames) assert (len(fieldsDict) == 1) (inRanges, inDesc) = fieldsDict.values()[0] outRanges = [] desc = '' for (minV, maxV) in inRanges: minV = int(round(minV)) ...
'See the function description in base.py kwargs will have the keyword "fractional", which is ignored by this encoder'
def closenessScores(self, expValues, actValues, fractional=True):
expValue = expValues[0] actValue = actValues[0] if (expValue == actValue): closeness = 1.0 else: closeness = 0.0 if (not fractional): closeness = (1.0 - closeness) return numpy.array([closeness])
'See the function description in base.py'
def getBucketValues(self):
if (self._bucketValues is None): numBuckets = len(self.encoder.getBucketValues()) self._bucketValues = [] for bucketIndex in range(numBuckets): self._bucketValues.append(self.getBucketInfo([bucketIndex])[0].value) return self._bucketValues
'See the function description in base.py'
def getBucketInfo(self, buckets):
bucketInfo = self.encoder.getBucketInfo(buckets)[0] categoryIndex = int(round(bucketInfo.value)) category = self.indexToCategory[categoryIndex] return [EncoderResult(value=category, scalar=categoryIndex, encoding=bucketInfo.encoding)]
'See the function description in base.py'
def topDownCompute(self, encoded):
encoderResult = self.encoder.topDownCompute(encoded)[0] value = encoderResult.value categoryIndex = int(round(value)) category = self.indexToCategory[categoryIndex] return EncoderResult(value=category, scalar=categoryIndex, encoding=encoderResult.encoding)