desc stringlengths 3 26.7k | decl stringlengths 11 7.89k | bodies stringlengths 8 553k |
|---|---|---|
'Generates a random sample from the Poisson probability distribution with
with location and scale parameter equal to the current value (passed in).
Returns the value of the random sample, the log of the probability of
sampling that value, and the log of the probability of sampling the current
value if the roles of the ... | def propose(self, current, r):
| curLambda = (current + self.offset)
(x, logProb) = PoissonDistribution(curLambda).sample(r)
logBackward = PoissonDistribution((x + self.offset)).logDensity(current)
return (x, logProb, logBackward)
|
'Generates a random sample from the discrete probability distribution
and returns its value and the log of the probability of sampling that value.'
| def sample(self, rgen):
| rf = rgen.uniform(0, self.sum)
index = bisect.bisect(self.cdf, rf)
return (self.keys[index], numpy.log(self.pmf[index]))
|
'Form of distribution must be an array of counts in order of self.keys.'
| def logProbability(self, distn):
| x = numpy.asarray(distn)
n = x.sum()
return ((logFactorial(n) - numpy.sum([logFactorial(k) for k in x])) + numpy.sum((x * numpy.log(self.dist.pmf))))
|
'Generates a random sample from the Poisson probability distribution and
returns its value and the log of the probability of sampling that value.'
| def sample(self, rgen):
| x = rgen.poisson(self.lambdaParameter)
return (x, self.logDensity(x))
|
'Gets the number of rows in the histogram.
:returns: Integer number of rows.'
| def numRows(self):
| if self.hist_:
return self.hist_.nRows()
else:
return 0
|
':return: (int) number of columns'
| def numColumns(self):
| if self.hist_:
return self.hist_.nCols()
else:
return 0
|
'Grows the histogram to have rows rows and cols columns.
Must not have been initialized before, or already have the same
number of columns.
If rows is smaller than the current number of rows,
does not shrink.
Also updates the sizes of the row and column sums.
:param rows: Integer number of rows.
:param cols: Integer nu... | def grow(self, rows, cols):
| if (not self.hist_):
self.hist_ = SparseMatrix(rows, cols)
self.rowSums_ = numpy.zeros(rows, dtype=dtype)
self.colSums_ = numpy.zeros(cols, dtype=dtype)
self.hack_ = None
else:
oldRows = self.hist_.nRows()
oldCols = self.hist_.nCols()
nextRows = max(oldRow... |
'Add distribution to row row.
Distribution should be an array of probabilities or counts.
:param row: Integer index of the row to add to.
May be larger than the current number of rows, in which case
the histogram grows.
:param distribution: Array of length equal to the number of columns.'
| def updateRow(self, row, distribution):
| self.grow((row + 1), len(distribution))
self.hist_.axby(row, 1, 1, distribution)
self.rowSums_[row] += distribution.sum()
self.colSums_ += distribution
self.hack_ = None
|
'Computes the sumProp probability of each row given the input probability
of each column. Normalizes the distribution in each column on the fly.
The semantics are as follows: If the distribution is P(col|e) where e is
the evidence is col is the column, and the CPD represents P(row|col), then
this calculates sum(P(col|e... | def inferRow(self, distribution):
| return (self.hist_ * (distribution / self.colSums_))
|
'Computes the probability of evidence given each row from the probability
of evidence given each column. Essentially, this just means that it sums
probabilities over (normalized) rows. Normalizes the distribution over
each row on the fly.
The semantics are as follows: If the distribution is P(e|col) where e is
evide... | def inferRowEvidence(self, distribution):
| return ((self.hist_ * distribution) / self.rowSums_)
|
'Equivalent to the category inference of zeta1.TopLevel.
Computes the max_prod (maximum component of a component-wise multiply)
between the rows of the histogram and the incoming distribution.
May be slow if the result of clean_outcpd() is not valid.
:param distribution: Array of length equal to the number of columns.
... | def inferRowCompat(self, distribution):
| if (self.hack_ is None):
self.clean_outcpd()
return self.hack_.vecMaxProd(distribution)
|
'Hack to act like clean_outcpd on zeta1.TopLevelNode.
Take the max element in each to column, set it to 1, and set all the
other elements to 0.
Only called by inferRowMaxProd() and only needed if an updateRow()
has been called since the last clean_outcpd().'
| def clean_outcpd(self):
| m = self.hist_.toDense()
for j in xrange(m.shape[1]):
cmax = m[:, j].max()
if cmax:
m[:, j] = numpy.array((m[:, j] == cmax), dtype=dtype)
self.hack_ = SparseMatrix(0, self.hist_.nCols())
for i in xrange(m.shape[0]):
self.hack_.addRow(m[i, :])
|
'Shift the model result and return the new instance.
Queues up the T(i+1) prediction value and emits a T(i)
input/prediction pair, if possible. E.g., if the previous T(i-1)
iteration was learn-only, then we would not have a T(i) prediction in our
FIFO and would not be able to emit a meaningful input/prediction pair.
:p... | def shift(self, modelResult):
| inferencesToWrite = {}
if (self._inferenceBuffer is None):
maxDelay = InferenceElement.getMaxDelay(modelResult.inferences)
self._inferenceBuffer = collections.deque(maxlen=(maxDelay + 1))
self._inferenceBuffer.appendleft(copy.deepcopy(modelResult.inferences))
for (inferenceElement, infer... |
'TODO describe filterDict schema'
| def __init__(self, filterDict):
| self.filterDict = filterDict
|
'Returns True if the record matches any of the provided filters'
| def match(self, record):
| for (field, meta) in self.filterDict.iteritems():
index = meta['index']
categories = meta['categories']
for category in categories:
if (not record):
continue
if (record[index].find(category) != (-1)):
"\n ... |
'@param n (int) Number of available bits in pattern
@param w (int/list) Number of on bits in pattern
If list, each pattern will have a `w` randomly
selected from the list.
@param num (int) Number of available patterns'
| def __init__(self, n, w, num=100, seed=42):
| self._n = n
self._w = w
self._num = num
self._random = Random(seed)
self._patterns = dict()
self._generate()
|
'Return a pattern for a number.
@param number (int) Number of pattern
@return (set) Indices of on bits'
| def get(self, number):
| if (not (number in self._patterns)):
raise IndexError('Invalid number')
return self._patterns[number]
|
'Add noise to pattern.
@param bits (set) Indices of on bits
@param amount (float) Probability of switching an on bit with a random bit
@return (set) Indices of on bits in noisy pattern'
| def addNoise(self, bits, amount):
| newBits = set()
for bit in bits:
if (self._random.getReal64() < amount):
newBits.add(self._random.getUInt32(self._n))
else:
newBits.add(bit)
return newBits
|
'Return the set of pattern numbers that match a bit.
@param bit (int) Index of bit
@return (set) Indices of numbers'
| def numbersForBit(self, bit):
| if (bit >= self._n):
raise IndexError('Invalid bit')
numbers = set()
for (index, pattern) in self._patterns.iteritems():
if (bit in pattern):
numbers.add(index)
return numbers
|
'Return a map from number to matching on bits,
for all numbers that match a set of bits.
@param bits (set) Indices of bits
@return (dict) Mapping from number => on bits.'
| def numberMapForBits(self, bits):
| numberMap = dict()
for bit in bits:
numbers = self.numbersForBit(bit)
for number in numbers:
if (not (number in numberMap)):
numberMap[number] = set()
numberMap[number].add(bit)
return numberMap
|
'Pretty print a pattern.
@param bits (set) Indices of on bits
@param verbosity (int) Verbosity level
@return (string) Pretty-printed text'
| def prettyPrintPattern(self, bits, verbosity=1):
| numberMap = self.numberMapForBits(bits)
text = ''
numberList = []
numberItems = sorted(numberMap.iteritems(), key=(lambda (number, bits): len(bits)), reverse=True)
for (number, bits) in numberItems:
if (verbosity > 2):
strBits = [str(n) for n in bits]
numberText = '{0... |
'Generates set of random patterns.'
| def _generate(self):
| candidates = np.array(range(self._n), np.uint32)
for i in xrange(self._num):
self._random.shuffle(candidates)
pattern = candidates[0:self._getW()]
self._patterns[i] = set(pattern)
|
'Gets a value of `w` for use in generating a pattern.'
| def _getW(self):
| w = self._w
if (type(w) is list):
return w[self._random.getUInt32(len(w))]
else:
return w
|
'Generates set of consecutive patterns.'
| def _generate(self):
| n = self._n
w = self._w
assert (type(w) is int), 'List for w not supported'
for i in xrange((n / w)):
pattern = set(xrange((i * w), ((i + 1) * w)))
self._patterns[i] = pattern
|
'Returns the next value of the disribution using knowledge about the
current state of the distribution as stored in numValues.'
| def getNext(self):
| raise Exception('getNext must be implemented by all subclasses')
|
'Returns the next n values for the distribution as a list.'
| def getData(self, n):
| records = [self.getNext() for x in range(n)]
return records
|
'Returns a dict of parameters pertinent to the distribution (if any) as
well as state variables such as numValues.'
| def getDescription(self):
| raise Exception('getDescription must be implemented by all subclasses')
|
'@param patternMachine (PatternMachine) Pattern machine instance'
| def __init__(self, patternMachine, seed=42):
| self.patternMachine = patternMachine
self._random = Random(seed)
|
'Generate a sequence from a list of numbers.
Note: Any `None` in the list of numbers is considered a reset.
@param numbers (list) List of numbers
@return (list) Generated sequence'
| def generateFromNumbers(self, numbers):
| sequence = []
for number in numbers:
if (number == None):
sequence.append(number)
else:
pattern = self.patternMachine.get(number)
sequence.append(pattern)
return sequence
|
'Add spatial noise to each pattern in the sequence.
@param sequence (list) Sequence
@param amount (float) Amount of spatial noise
@return (list) Sequence with spatial noise'
| def addSpatialNoise(self, sequence, amount):
| newSequence = []
for pattern in sequence:
if (pattern is not None):
pattern = self.patternMachine.addNoise(pattern, amount)
newSequence.append(pattern)
return newSequence
|
'Pretty print a sequence.
@param sequence (list) Sequence
@param verbosity (int) Verbosity level
@return (string) Pretty-printed text'
| def prettyPrintSequence(self, sequence, verbosity=1):
| text = ''
for i in xrange(len(sequence)):
pattern = sequence[i]
if (pattern == None):
text += '<reset>'
if (i < (len(sequence) - 1)):
text += '\n'
else:
text += self.patternMachine.prettyPrintPattern(pattern, verbosity=verbosity)
re... |
'@param numSequences (int) Number of sequences to return,
separated by None
@param sequenceLength (int) Length of each sequence
@param sharedRange (tuple) (start index, end index) indicating range of
shared subsequence in each sequence
(None if no shared subsequences)
@return (list) Numbers representing sequen... | def generateNumbers(self, numSequences, sequenceLength, sharedRange=None):
| numbers = []
if sharedRange:
(sharedStart, sharedEnd) = sharedRange
sharedLength = (sharedEnd - sharedStart)
sharedNumbers = range((numSequences * sequenceLength), ((numSequences * sequenceLength) + sharedLength))
for i in xrange(numSequences):
start = (i * sequenceLength)
... |
'Initialize the dataset generator with a random seed and a name'
| def __init__(self, name='testDataset', seed=42, verbosity=0):
| self.name = name
self.verbosity = verbosity
self.setSeed(seed)
self.fields = []
|
'Returns a description of the dataset'
| def getDescription(self):
| description = {'name': self.name, 'fields': [f.name for f in self.fields], 'numRecords by field': [f.numRecords for f in self.fields]}
return description
|
'Set the random seed and the numpy seed
Parameters:
seed: random seed'
| def setSeed(self, seed):
| rand.seed(seed)
np.random.seed(seed)
|
'Add a single field to the dataset.
Parameters:
name: The user-specified name of the field
fieldSpec: A list of one or more dictionaries specifying parameters
to be used for dataClass initialization. Each dict must
contain the key \'type\' that specifies a distribution for
the values in this field
en... | def addField(self, name, fieldParams, encoderParams):
| assert ((fieldParams is not None) and ('type' in fieldParams))
dataClassName = fieldParams.pop('type')
try:
dataClass = eval(dataClassName)(fieldParams)
except TypeError as e:
print ('#### Error in constructing %s class object. Possibly missing some required... |
'Add multiple fields to the dataset.
Parameters:
fieldsInfo: A list of dictionaries, containing a field name, specs for
the data classes and encoder params for the corresponding
field.'
| def addMultipleFields(self, fieldsInfo):
| assert all(((x in field) for x in ['name', 'fieldSpec', 'encoderParams'] for field in fieldsInfo))
for spec in fieldsInfo:
self.addField(spec.pop('name'), spec.pop('fieldSpec'), spec.pop('encoderParams'))
|
'Initialize field using relevant encoder parameters.
Parameters:
name: Field name
encoderParams: Parameters for the encoder.
Returns the index of the field'
| def defineField(self, name, encoderParams=None):
| self.fields.append(_field(name, encoderParams))
return (len(self.fields) - 1)
|
'Set flag for field at index. Flags are special characters such as \'S\' for
sequence or \'T\' for timestamp.
Parameters:
index: index of field whose flag is being set
flag: special character'
| def setFlag(self, index, flag):
| assert (len(self.fields) > index)
self.fields[index].flag = flag
|
'Generate a record. Each value is stored in its respective field.
Parameters:
record: A 1-D array containing as many values as the number of fields
fields: An object of the class field that specifies the characteristics
of each value in the record
Assertion:
len(record)==len(fields): A value for each fiel... | def generateRecord(self, record):
| assert (len(record) == len(self.fields))
if (record is not None):
for x in range(len(self.fields)):
self.fields[x].addValue(record[x])
else:
for field in self.fields:
field.addValue(field.dataClass.getNext())
|
'Generate multiple records. Refer to definition for generateRecord'
| def generateRecords(self, records):
| if (self.verbosity > 0):
print 'Generating', len(records), 'records...'
for record in records:
self.generateRecord(record)
|
'Returns the nth record'
| def getRecord(self, n=None):
| if (n is None):
assert (len(self.fields) > 0)
n = (self.fields[0].numRecords - 1)
assert all(((field.numRecords > n) for field in self.fields))
record = [field.values[n] for field in self.fields]
return record
|
'Returns all the records'
| def getAllRecords(self):
| values = []
numRecords = self.fields[0].numRecords
assert all(((field.numRecords == numRecords) for field in self.fields))
for x in range(numRecords):
values.append(self.getRecord(x))
return values
|
'Encode a record as a sparse distributed representation
Parameters:
record: Record to be encoded
toBeAdded: Whether the encodings corresponding to the record are added to
the corresponding fields'
| def encodeRecord(self, record, toBeAdded=True):
| encoding = [self.fields[i].encodeValue(record[i], toBeAdded) for i in xrange(len(self.fields))]
return encoding
|
'Encodes a list of records.
Parameters:
records: One or more records. (i,j)th element of this 2D array
specifies the value at field j of record i.
If unspecified, records previously generated and stored are
used.
toBeAdded: Whether the encodings corresponding to the record are added to
the corresponding fields'... | def encodeAllRecords(self, records=None, toBeAdded=True):
| if (records is None):
records = self.getAllRecords()
if (self.verbosity > 0):
print 'Encoding', len(records), 'records.'
encodings = [self.encodeRecord(record, toBeAdded) for record in records]
return encodings
|
'Add \'value\' to the field i.
Parameters:
value: value to be added
i: value is added to field i'
| def addValueToField(self, i, value=None):
| assert (len(self.fields) > i)
if (value is None):
value = self.fields[i].dataClass.getNext()
self.fields[i].addValue(value)
return value
else:
self.fields[i].addValue(value)
|
'Add values to the field i.'
| def addValuesToField(self, i, numValues):
| assert (len(self.fields) > i)
values = [self.addValueToField(i) for n in range(numValues)]
return values
|
'Returns the sdr for jth value at column i'
| def getSDRforValue(self, i, j):
| assert (len(self.fields) > i)
assert (self.fields[i].numRecords > j)
encoding = self.fields[i].encodings[j]
return encoding
|
'Returns the nth encoding with the predictedField zeroed out'
| def getZeroedOutEncoding(self, n):
| assert all(((field.numRecords > n) for field in self.fields))
encoding = np.concatenate([(field.encoder.encode(SENTINEL_VALUE_FOR_MISSING_DATA) if field.isPredictedField else field.encodings[n]) for field in self.fields])
return encoding
|
'Returns the cumulative n for all the fields in the dataset'
| def getTotaln(self):
| n = sum([field.n for field in self.fields])
return n
|
'Returns the cumulative w for all the fields in the dataset'
| def getTotalw(self):
| w = sum([field.w for field in self.fields])
return w
|
'Returns the nth encoding'
| def getEncoding(self, n):
| assert all(((field.numEncodings > n) for field in self.fields))
encoding = np.concatenate([field.encodings[n] for field in self.fields])
return encoding
|
'Returns encodings for all the records'
| def getAllEncodings(self):
| numEncodings = self.fields[0].numEncodings
assert all(((field.numEncodings == numEncodings) for field in self.fields))
encodings = [self.getEncoding(index) for index in range(numEncodings)]
return encodings
|
'Returns all field names'
| def getAllFieldNames(self):
| names = [field.name for field in self.fields]
return names
|
'Returns flags for all fields'
| def getAllFlags(self):
| flags = [field.flag for field in self.fields]
return flags
|
'Returns data types for all fields'
| def getAllDataTypes(self):
| dataTypes = [field.dataType for field in self.fields]
return dataTypes
|
'Returns descriptions for all fields'
| def getFieldDescriptions(self):
| descriptions = [field.getDescription() for field in self.fields]
return descriptions
|
'Export all the records into a csv file in numenta format.
Example header format:
fieldName1 fieldName2 fieldName3
date string float
T S
Parameters:
path: Relative path of the file to which the records are to be exported'
| def saveRecords(self, path='myOutput'):
| numRecords = self.fields[0].numRecords
assert all(((field.numRecords == numRecords) for field in self.fields))
import csv
with open((path + '.csv'), 'wb') as f:
writer = csv.writer(f)
writer.writerow(self.getAllFieldNames())
writer.writerow(self.getAllDataTypes())
writer.... |
'Deletes all the values in the dataset'
| def removeAllRecords(self):
| for field in self.fields:
(field.encodings, field.values) = ([], [])
(field.numRecords, field.numEncodings) = (0, 0)
|
'Initialize a field with various parameters such as n, w, flag, dataType,
encoderType, and tag predicted field.'
| def __init__(self, name, encoderSpec):
| self.name = name
(self.n, self.w) = (100, 15)
(self.encoderType, self.dataType, self.dataClassName) = (None, None, None)
self.flag = ''
self.isPredictedField = False
if (encoderSpec is not None):
if ('n' in encoderSpec):
self.n = encoderSpec.pop('n')
if ('w' in encode... |
'Add values to the field'
| def addValues(self, values):
| for v in values:
self.addValue(v)
|
'Add value to the field'
| def addValue(self, value):
| self.values.append(value)
self.numRecords += 1
|
'Value is encoded as a sdr using the encoding parameters of the Field'
| def encodeValue(self, value, toBeAdded=True):
| encodedValue = np.array(self.encoder.encode(value), dtype=realDType)
if toBeAdded:
self.encodings.append(encodedValue)
self.numEncodings += 1
return encodedValue
|
'Set up the dataTypes and initialize encoders'
| def _setTypes(self, encoderSpec):
| if (self.encoderType is None):
if (self.dataType in ['int', 'float']):
self.encoderType = 'adaptiveScalar'
elif (self.dataType == 'string'):
self.encoderType = 'category'
elif (self.dataType in ['date', 'datetime']):
self.encoderType = 'date'
if (self.... |
'Initialize the encoders'
| def _initializeEncoders(self, encoderSpec):
| if (self.encoderType in ['adaptiveScalar', 'scalar']):
if ('minval' in encoderSpec):
self.minval = encoderSpec.pop('minval')
else:
self.minval = None
if ('maxval' in encoderSpec):
self.maxval = encoderSpec.pop('maxval')
else:
self.maxva... |
'Creates a :class:`.field_meta.FieldMetaInfo` instance from a tuple containing
``name``, ``type``, and ``special``.
:param fieldInfoTuple: Must contain ``name``, ``type``, and ``special``
:return: (:class:`~.field_meta.FieldMetaInfo`) instance'
| @staticmethod
def createFromFileFieldElement(fieldInfoTuple):
| return FieldMetaInfo._make(fieldInfoTuple)
|
'Creates a FieldMetaInfo list from the a list of tuples. Basically runs
:meth:`~.field_meta.FieldMetaInfo.createFromFileFieldElement` on each tuple.
*Example:*
.. code-block:: python
# Create a list of FieldMetaInfo instances from a list of File meta-data
# tuples
el = [("pounds", FieldMetaType.float, FieldMetaSpecial.... | @classmethod
def createListFromFileFieldList(cls, fields):
| return [cls.createFromFileFieldElement(f) for f in fields]
|
'Check a candidate value whether it\'s one of the valid field data types
:param fieldDataType: (string) candidate field data type
:returns: True if the candidate value is a legitimate field data type value;
False if not'
| @classmethod
def isValid(cls, fieldDataType):
| return (fieldDataType in cls._ALL)
|
'Check a candidate value whether it\'s one of the valid attributes
:param attr: (string) candidate value
:returns: True if the candidate value is a legitimate "special" field
attribute; False if not'
| @classmethod
def isValid(cls, attr):
| return (attr in cls._ALL)
|
'Override of getStats() in BaseStatsCollector
stats: A dictionary where all the stats are
outputted'
| def getStats(self, stats):
| BaseStatsCollector.getStats(self, stats)
sortedNumberList = sorted(self.valueList)
listLength = len(sortedNumberList)
min = sortedNumberList[0]
max = sortedNumberList[(-1)]
mean = numpy.mean(self.valueList)
median = sortedNumberList[int((0.5 * listLength))]
percentile1st = sortedNumberLi... |
'Add a delta field to the data.'
| def __init__(self, origField, deltaField):
| self.origField = origField
self.deltaField = deltaField
self.previousValue = None
self.rememberReset = False
|
'Open the underlying file stream
This only supports \'file://\' prefixed paths.
:returns: record stream instance
:rtype: FileRecordStream'
| @staticmethod
def _openStream(dataUrl, isBlocking, maxTimeout, bookmark, firstRecordIdx):
| filePath = dataUrl[len(FILE_PREF):]
if (not os.path.isabs(filePath)):
filePath = os.path.join(os.getcwd(), filePath)
return FileRecordStream(streamID=filePath, write=False, bookmark=bookmark, firstRecord=firstRecordIdx)
|
'Close the stream'
| def close(self):
| return self._recordStore.close()
|
'Returns combined data from all sources (values only).
:returns: None on EOF; empty sequence on timeout.'
| def getNextRecord(self):
| while True:
if ((self._sourceLastRecordIdx is not None) and (self._recordStore.getNextRecordIdx() >= self._sourceLastRecordIdx)):
preAggValues = None
bookmark = self._recordStore.getBookmark()
else:
preAggValues = self._recordStore.getNextRecord()
book... |
'Iterates through stream to calculate total records after aggregation.
This will alter the bookmark state.'
| def getDataRowCount(self):
| inputRowCountAfterAggregation = 0
while True:
record = self.getNextRecord()
if (record is None):
return inputRowCountAfterAggregation
inputRowCountAfterAggregation += 1
if (inputRowCountAfterAggregation > 10000):
raise RuntimeError('No end of data... |
':returns: the index of the record that will be read next from
:meth:`getNextRecord`.'
| def getNextRecordIdx(self):
| return self._recordCount
|
':returns: True if there are records left after the bookmark.'
| def recordsExistAfter(self, bookmark):
| return self._recordStore.recordsExistAfter(bookmark)
|
'Returns the aggregation period of the record stream as a dict
containing \'months\' and \'seconds\'. The months is always an integer and
seconds is a floating point. Only one is allowed to be non-zero at a
time.
Will return the aggregation period from this call. This call is
used by the :meth:`nupic.data.record_stream... | def getAggregationMonthsAndSeconds(self):
| return self._aggMonthsAndSeconds
|
'Returns all fields in all inputs (list of plain names).
.. note:: currently, only one input is supported'
| def getFieldNames(self):
| return [f.name for f in self._streamFields]
|
':returns: a sequence of :class:`nupic.data.fieldmeta.FieldMetaInfo` for each
field in the stream.'
| def getFields(self):
| return self._streamFields
|
':returns: a bookmark to the current position'
| def getBookmark(self):
| return self._aggBookmark
|
'Resets stats collected so far.'
| def clearStats(self):
| self._recordStore.clearStats()
|
'TODO: This method needs to be enhanced to get the stats on the *aggregated*
records.
:returns: stats (like min and max values of the fields).'
| def getStats(self):
| recordStoreStats = self._recordStore.getStats()
streamStats = dict()
for (key, values) in recordStoreStats.items():
fieldStats = dict(zip(self._recordStoreFieldNames, values))
streamValues = []
for name in self._streamFieldNames:
streamValues.append(fieldStats[name])
... |
':returns: errors saved in the stream.'
| def getError(self):
| return self._recordStore.getError()
|
'Saves specified error in the stream.
:param error: to save'
| def setError(self, error):
| self._recordStore.setError(error)
|
':returns: True if all records have been read.'
| def isCompleted(self):
| return self._recordStore.isCompleted()
|
'Marks the stream completed (True or False)
:param completed: (bool) is completed or not'
| def setCompleted(self, completed=True):
| self._recordStore.setCompleted(completed)
|
'Set the read timeout.
:param timeout: (float or int) timeout length'
| def setTimeout(self, timeout):
| self._recordStore.setTimeout(timeout)
|
'Figure out whether reset, sequenceId,
both or neither are present in the data.
Compute once instead of every time.
Taken from filesource.py'
| def _cacheSequenceInfoType(self):
| hasReset = (self.resetFieldName is not None)
hasSequenceId = (self.sequenceIdFieldName is not None)
if (hasReset and (not hasSequenceId)):
self._sequenceInfoType = self.SEQUENCEINFO_RESET_ONLY
self._prevSequenceId = 0
elif ((not hasReset) and hasSequenceId):
self._sequenceInfoTyp... |
'Construct an aggregator instance
Params:
- aggregationInfo: a dictionary that contains the following entries
- fields: a list of pairs. Each pair is a field name and an
aggregation function (e.g. sum). The function will be used to aggregate
multiple values during the aggregation period.
- aggregation period: 0 or more... | def __init__(self, aggregationInfo, inputFields, timeFieldName=None, sequenceIdFieldName=None, resetFieldName=None, filterInfo=None):
| self._filterInfo = filterInfo
self._nullAggregation = False
self._inputFields = inputFields
self._nullAggregation = False
if (aggregationInfo is None):
self._nullAggregation = True
else:
aggDef = defaultdict((lambda : 0), aggregationInfo)
if (aggDef['years'] == aggDef['mo... |
'Add the aggregation period to the input time t and return a datetime object
Years and months are handled as aspecial case due to leap years
and months with different number of dates. They can\'t be converted
to a strict timedelta because a period of 3 months will have different
durations actually. The solution is to j... | def _getEndTime(self, t):
| assert isinstance(t, datetime.datetime)
if self._aggTimeDelta:
return (t + self._aggTimeDelta)
else:
year = ((t.year + self._aggYears) + (((t.month - 1) + self._aggMonths) / 12))
month = ((((t.month - 1) + self._aggMonths) % 12) + 1)
return t.replace(year=year, month=month)
|
'Given the name of an aggregation function, returns the function pointer
and param.
Parameters:
funcName: a string (name of function) or funcPtr
retval: (funcPtr, param)'
| def _getFuncPtrAndParams(self, funcName):
| params = None
if isinstance(funcName, basestring):
if (funcName == 'sum'):
fp = _aggr_sum
elif (funcName == 'first'):
fp = _aggr_first
elif (funcName == 'last'):
fp = _aggr_last
elif (funcName == 'mean'):
fp = _aggr_mean
eli... |
'Generate the aggregated output record
Parameters:
retval: outputRecord'
| def _createAggregateRecord(self):
| record = []
for (i, (fieldIdx, aggFP, paramIdx)) in enumerate(self._fields):
if (aggFP is None):
continue
values = self._slice[i]
refIndex = None
if (paramIdx is not None):
record.append(aggFP(values, self._slice[paramIdx]))
else:
recor... |
'Return True if no aggregation will be performed, either because the
aggregationInfo was None or all aggregation params within it were 0.'
| def isNullAggregation(self):
| return self._nullAggregation
|
'Return the next aggregated record, if any
Parameters:
record: The input record (values only) from the input source, or
None if the input has reached EOF (this will cause this
method to force completion of and return any partially
aggregated time period)
curInputBookmark: The bookmark to the next input record
r... | def next(self, record, curInputBookmark):
| outRecord = None
retInputBookmark = None
if (record is not None):
self._inIdx += 1
if ((self._filter != None) and (not self._filter[0](self._filter[1], record))):
return (None, None)
if self._nullAggregation:
return (record, curInputBookmark)
t = recor... |
':param fields: non-empty sequence of nupic.data.fieldmeta.FieldMetaInfo
objects corresponding to fields in input rows.
:param aggregationPeriod: (dict) aggregation period of the record stream
containing \'months\' and \'seconds\'. The months is always an integer
and seconds is a floating point. Only one is allowed to ... | def __init__(self, fields, aggregationPeriod=None):
| if (not fields):
raise ValueError(('fields arg must be non-empty, but got %r' % (fields,)))
self._fields = fields
self._aggregationPeriod = aggregationPeriod
self._sequenceId = (-1)
self._fieldNames = tuple((f.name for f in fields))
self._categoryFieldIndex = _getFie... |
'Put us back at the beginning of the file again'
| def rewind(self):
| self._sequenceId = (-1)
|
'Encodes the given input row as a dict, with the
keys being the field names. This also adds in some meta fields:
\'_category\': The value from the category field (if any)
\'_reset\': True if the reset field was True (if any)
\'_sequenceId\': the value from the sequenceId field (if any)
:param inputRow: sequence of valu... | def encode(self, inputRow):
| result = dict(zip(self._fieldNames, inputRow))
if (self._categoryFieldIndex is not None):
if isinstance(inputRow[self._categoryFieldIndex], int):
result['_category'] = [inputRow[self._categoryFieldIndex]]
else:
result['_category'] = (inputRow[self._categoryFieldIndex] if ... |
'Give the timestamp of a record (a datetime object), compute the record\'s
timestamp index - this is the timestamp divided by the aggregation period.
Parameters:
recordTS: datetime instance
retval: record timestamp index, or None if no aggregation period'
| def _computeTimestampRecordIdx(self, recordTS):
| if (self._aggregationPeriod is None):
return None
if (self._aggregationPeriod['months'] > 0):
assert (self._aggregationPeriod['seconds'] == 0)
result = int((((recordTS.year * 12) + (recordTS.month - 1)) / self._aggregationPeriod['months']))
elif (self._aggregationPeriod['seconds'] > ... |
'Put us back at the beginning of the file again.'
| def rewind(self):
| if (self._modelRecordEncoder is not None):
self._modelRecordEncoder.rewind()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.